repo_name
stringlengths
6
97
path
stringlengths
3
341
text
stringlengths
8
1.02M
zxcvdavid/php-yaf
yaf_router.c
/* +----------------------------------------------------------------------+ | Yet Another Framework | +----------------------------------------------------------------------+ | This source file is subject to version 3.01 of the PHP license, | | that is bundled with this package in the file LICENSE, and is | | available through the world-wide-web at the following url: | | http://www.php.net/license/3_01.txt | | If you did not receive a copy of the PHP license and are unable to | | obtain it through the world-wide-web, please send a note to | | license@php.net so we can mail you a copy immediately. | +----------------------------------------------------------------------+ | Author: <NAME> <<EMAIL>> | +----------------------------------------------------------------------+ */ /* $Id: yaf_router.c 329200 2013-01-18 06:26:40Z laruence $ */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "php.h" #include "Zend/zend_interfaces.h" #include "php_yaf.h" #include "yaf_namespace.h" #include "yaf_exception.h" #include "yaf_application.h" /* for yaf_application_is_module_name */ #include "yaf_request.h" /* for yaf_request_set_routed */ #include "yaf_router.h" #include "yaf_config.h" #include "routes/yaf_route_interface.h" #include "routes/yaf_route_static.h" #include "routes/yaf_route_simple.h" #include "routes/yaf_route_supervar.h" #include "routes/yaf_route_regex.h" #include "routes/yaf_route_rewrite.h" #include "routes/yaf_route_map.h" zend_class_entry *yaf_router_ce; /** {{{ yaf_router_t * yaf_router_instance(yaf_router_t *this_ptr TSRMLS_DC) */ yaf_router_t * yaf_router_instance(yaf_router_t *this_ptr TSRMLS_DC) { zval *routes; yaf_router_t *instance; yaf_route_t *route; if (this_ptr) { instance = this_ptr; } else { MAKE_STD_ZVAL(instance); object_init_ex(instance, yaf_router_ce); } MAKE_STD_ZVAL(routes); array_init(routes); if (!YAF_G(default_route)) { static_route: MAKE_STD_ZVAL(route); object_init_ex(route, yaf_route_static_ce); } else { route = yaf_route_instance(NULL, YAF_G(default_route) TSRMLS_CC); if (!route) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unable to initialize default route, use %s instead", yaf_route_static_ce->name); goto static_route; } } zend_hash_update(Z_ARRVAL_P(routes), "_default", sizeof("_default"), (void **)&route, sizeof(zval *), NULL); zend_update_property(yaf_router_ce, instance, ZEND_STRL(YAF_ROUTER_PROPERTY_NAME_ROUTES), routes TSRMLS_CC); zval_ptr_dtor(&routes); return instance; } /** }}} */ /** {{{ int yaf_router_route(yaf_router_t *router, yaf_request_t *request TSRMLS_DC) */ int yaf_router_route(yaf_router_t *router, yaf_request_t *request TSRMLS_DC) { zval *routers, *ret; yaf_route_t **route; HashTable *ht; routers = zend_read_property(yaf_router_ce, router, ZEND_STRL(YAF_ROUTER_PROPERTY_NAME_ROUTES), 1 TSRMLS_CC); ht = Z_ARRVAL_P(routers); for(zend_hash_internal_pointer_end(ht); zend_hash_has_more_elements(ht) == SUCCESS; zend_hash_move_backwards(ht)) { if (zend_hash_get_current_data(ht, (void**)&route) == FAILURE) { continue; } zend_call_method_with_1_params(route, Z_OBJCE_PP(route), NULL, "route", &ret, request); if (IS_BOOL != Z_TYPE_P(ret) || !Z_BVAL_P(ret)) { zval_ptr_dtor(&ret); continue; } else { char *key; uint len = 0; ulong idx = 0; switch(zend_hash_get_current_key_ex(ht, &key, &len, &idx, 0, NULL)) { case HASH_KEY_IS_LONG: zend_update_property_long(yaf_router_ce, router, ZEND_STRL(YAF_ROUTER_PROPERTY_NAME_CURRENT_ROUTE), idx TSRMLS_CC); break; case HASH_KEY_IS_STRING: if (len) { zend_update_property_string(yaf_router_ce, router, ZEND_STRL(YAF_ROUTER_PROPERTY_NAME_CURRENT_ROUTE), key TSRMLS_CC); } break; } yaf_request_set_routed(request, 1 TSRMLS_CC); zval_ptr_dtor(&ret); break; } } return 1; } /* }}} */ /** {{{ int yaf_router_add_config(yaf_router_t *router, zval *configs TSRMLS_DC) */ int yaf_router_add_config(yaf_router_t *router, zval *configs TSRMLS_DC) { zval **entry; HashTable *ht; yaf_route_t *route; if (!configs || IS_ARRAY != Z_TYPE_P(configs)) { return 0; } else { char *key = NULL; uint len = 0; ulong idx = 0; zval *routes; routes = zend_read_property(yaf_router_ce, router, ZEND_STRL(YAF_ROUTER_PROPERTY_NAME_ROUTES), 1 TSRMLS_CC); ht = Z_ARRVAL_P(configs); for(zend_hash_internal_pointer_reset(ht); zend_hash_has_more_elements(ht) == SUCCESS; zend_hash_move_forward(ht)) { if (zend_hash_get_current_data(ht, (void**)&entry) == FAILURE) { continue; } if (!entry || Z_TYPE_PP(entry) != IS_ARRAY) { continue; } route = yaf_route_instance(NULL, *entry TSRMLS_CC); switch (zend_hash_get_current_key_ex(ht, &key, &len, &idx, 0, NULL)) { case HASH_KEY_IS_STRING: if (!route) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unable to initialize route named '%s'", key); continue; } zend_hash_update(Z_ARRVAL_P(routes), key, len, (void **)&route, sizeof(zval *), NULL); break; case HASH_KEY_IS_LONG: if (!route) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unable to initialize route at index '%ld'", idx); continue; } zend_hash_index_update(Z_ARRVAL_P(routes), idx, (void **)&route, sizeof(zval *), NULL); break; default: continue; } } return 1; } } /* }}} */ /** {{{ zval * yaf_router_parse_parameters(char *uri TSRMLS_DC) */ zval * yaf_router_parse_parameters(char *uri TSRMLS_DC) { char *key, *ptrptr, *tmp, *value; zval *params, *val; uint key_len; MAKE_STD_ZVAL(params); array_init(params); tmp = estrdup(uri); key = php_strtok_r(tmp, YAF_ROUTER_URL_DELIMIETER, &ptrptr); while (key) { key_len = strlen(key); if (key_len) { MAKE_STD_ZVAL(val); value = php_strtok_r(NULL, YAF_ROUTER_URL_DELIMIETER, &ptrptr); if (value && strlen(value)) { ZVAL_STRING(val, value, 1); } else { ZVAL_NULL(val); } zend_hash_update(Z_ARRVAL_P(params), key, key_len + 1, (void **)&val, sizeof(zval *), NULL); } key = php_strtok_r(NULL, YAF_ROUTER_URL_DELIMIETER, &ptrptr); } efree(tmp); return params; } /* }}} */ /** {{{ proto public Yaf_Router::__construct(void) */ PHP_METHOD(yaf_router, __construct) { yaf_router_instance(getThis() TSRMLS_CC); } /* }}} */ /** {{{ proto public Yaf_Router::route(Yaf_Request $req) */ PHP_METHOD(yaf_router, route) { yaf_request_t *request; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "z", &request) == FAILURE) { return; } else { RETURN_BOOL(yaf_router_route(getThis(), request TSRMLS_CC)); } } /* }}} */ /** {{{ proto public Yaf_Router::addRoute(string $name, Yaf_Route_Interface $route) */ PHP_METHOD(yaf_router, addRoute) { char *name; zval *routes; yaf_route_t *route; uint len = 0; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "sz", &name, &len, &route) == FAILURE) { return; } if (!len) { RETURN_FALSE; } if (IS_OBJECT != Z_TYPE_P(route) || !instanceof_function(Z_OBJCE_P(route), yaf_route_ce TSRMLS_CC)) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Expects a %s instance", yaf_route_ce->name); RETURN_FALSE; } routes = zend_read_property(yaf_router_ce, getThis(), ZEND_STRL(YAF_ROUTER_PROPERTY_NAME_ROUTES), 1 TSRMLS_CC); Z_ADDREF_P(route); zend_hash_update(Z_ARRVAL_P(routes), name, len + 1, (void **)&route, sizeof(zval *), NULL); RETURN_ZVAL(getThis(), 1, 0); } /* }}} */ /** {{{ proto public Yaf_Router::addConfig(Yaf_Config_Abstract $config) */ PHP_METHOD(yaf_router, addConfig) { yaf_config_t *config; zval *routes; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "z", &config) == FAILURE) { return; } if (IS_OBJECT == Z_TYPE_P(config) && instanceof_function(Z_OBJCE_P(config), yaf_config_ce TSRMLS_CC)){ routes = zend_read_property(yaf_config_ce, config, ZEND_STRL(YAF_CONFIG_PROPERT_NAME), 1 TSRMLS_CC); } else if (IS_ARRAY == Z_TYPE_P(config)) { routes = config; } else { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Expect a %s instance or an array, %s given", yaf_config_ce->name, zend_zval_type_name(config)); RETURN_FALSE; } if (yaf_router_add_config(getThis(), routes TSRMLS_CC)) { RETURN_ZVAL(getThis(), 1, 0); } else { RETURN_FALSE; } } /* }}} */ /** {{{ proto public Yaf_Router::getRoute(string $name) */ PHP_METHOD(yaf_router, getRoute) { char *name; uint len; zval *routes; yaf_route_t **route; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &name, &len) == FAILURE) { return; } if (!len) { RETURN_FALSE; } routes = zend_read_property(yaf_router_ce, getThis(), ZEND_STRL(YAF_ROUTER_PROPERTY_NAME_ROUTES), 1 TSRMLS_CC); if (zend_hash_find(Z_ARRVAL_P(routes), name, len + 1, (void **)&route) == SUCCESS) { RETURN_ZVAL(*route, 1, 0); } RETURN_NULL(); } /* }}} */ /** {{{ proto public Yaf_Router::getRoutes(void) */ PHP_METHOD(yaf_router, getRoutes) { zval * routes = zend_read_property(yaf_router_ce, getThis(), ZEND_STRL(YAF_ROUTER_PROPERTY_NAME_ROUTES), 1 TSRMLS_CC); RETURN_ZVAL(routes, 1, 0); } /* }}} */ /** {{{ proto public Yaf_Router::isModuleName(string $name) */ PHP_METHOD(yaf_router, isModuleName) { char *name; uint len; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &name, &len) == FAILURE) { return; } RETURN_BOOL(yaf_application_is_module_name(name, len TSRMLS_CC)); } /* }}} */ /** {{{ proto public Yaf_Router::getCurrentRoute(void) */ PHP_METHOD(yaf_router, getCurrentRoute) { zval *route = zend_read_property(yaf_router_ce, getThis(), ZEND_STRL(YAF_ROUTER_PROPERTY_NAME_CURRENT_ROUTE), 1 TSRMLS_CC); RETURN_ZVAL(route, 1, 0); } /* }}} */ /** {{{ yaf_router_methods */ zend_function_entry yaf_router_methods[] = { PHP_ME(yaf_router, __construct, NULL, ZEND_ACC_PUBLIC | ZEND_ACC_CTOR) PHP_ME(yaf_router, addRoute, NULL, ZEND_ACC_PUBLIC) PHP_ME(yaf_router, addConfig, NULL, ZEND_ACC_PUBLIC) PHP_ME(yaf_router, route, NULL, ZEND_ACC_PUBLIC) PHP_ME(yaf_router, getRoute, NULL, ZEND_ACC_PUBLIC) PHP_ME(yaf_router, getRoutes, NULL, ZEND_ACC_PUBLIC) PHP_ME(yaf_router, getCurrentRoute, NULL, ZEND_ACC_PUBLIC) {NULL, NULL, NULL} }; /* }}} */ /** {{{ YAF_STARTUP_FUNCTION */ YAF_STARTUP_FUNCTION(router) { zend_class_entry ce; (void)yaf_route_route_arginfo; /* tricky, supress warning "defined but not used" */ YAF_INIT_CLASS_ENTRY(ce, "Yaf_Router", "Yaf\\Router", yaf_router_methods); yaf_router_ce = zend_register_internal_class_ex(&ce, NULL, NULL TSRMLS_CC); yaf_router_ce->ce_flags |= ZEND_ACC_FINAL_CLASS; zend_declare_property_null(yaf_router_ce, ZEND_STRL(YAF_ROUTER_PROPERTY_NAME_ROUTES), ZEND_ACC_PROTECTED TSRMLS_CC); zend_declare_property_null(yaf_router_ce, ZEND_STRL(YAF_ROUTER_PROPERTY_NAME_CURRENT_ROUTE), ZEND_ACC_PROTECTED TSRMLS_CC); YAF_STARTUP(route); YAF_STARTUP(route_static); YAF_STARTUP(route_simple); YAF_STARTUP(route_supervar); YAF_STARTUP(route_rewrite); YAF_STARTUP(route_regex); YAF_STARTUP(route_map); return SUCCESS; } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */
zxcvdavid/php-yaf
routes/yaf_route_map.c
/* +----------------------------------------------------------------------+ | Yet Another Framework | +----------------------------------------------------------------------+ | This source file is subject to version 3.01 of the PHP license, | | that is bundled with this package in the file LICENSE, and is | | available through the world-wide-web at the following url: | | http://www.php.net/license/3_01.txt | | If you did not receive a copy of the PHP license and are unable to | | obtain it through the world-wide-web, please send a note to | | license@php.net so we can mail you a copy immediately. | +----------------------------------------------------------------------+ | Author: <NAME> <<EMAIL>> | +----------------------------------------------------------------------+ */ /* $Id: map.c 329197 2013-01-18 05:55:37Z laruence $*/ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "php.h" #include "ext/standard/php_smart_str.h" /* for smart_str */ #include "php_yaf.h" #include "yaf_namespace.h" #include "yaf_exception.h" #include "yaf_request.h" #include "yaf_router.h" #include "routes/yaf_route_interface.h" #include "routes/yaf_route_map.h" zend_class_entry *yaf_route_map_ce; /** {{{ ARG_INFO */ ZEND_BEGIN_ARG_INFO_EX(yaf_route_map_construct_arginfo, 0, 0, 0) ZEND_ARG_INFO(0, controller_prefer) ZEND_ARG_INFO(0, delimiter) ZEND_END_ARG_INFO() /* }}} */ /* {{{ yaf_route_t * yaf_route_map_instance(yaf_route_t *this_ptr, zend_bool controller_prefer, char *delim, uint len TSRMLS_DC) */ yaf_route_t * yaf_route_map_instance(yaf_route_t *this_ptr, zend_bool controller_prefer, char *delim, uint len TSRMLS_DC) { yaf_route_t *instance; if (this_ptr) { instance = this_ptr; } else { MAKE_STD_ZVAL(instance); object_init_ex(instance, yaf_route_map_ce); } if (controller_prefer) { zend_update_property_bool(yaf_route_map_ce, instance, ZEND_STRL(YAF_ROUTE_MAP_VAR_NAME_CTL_PREFER), 1 TSRMLS_CC); } if (delim && len) { zend_update_property_stringl(yaf_route_map_ce, instance, ZEND_STRL(YAF_ROUTE_MAP_VAR_NAME_DELIMETER), delim, len TSRMLS_CC); } return instance; } /* }}} */ /** {{{ int yaf_route_map_route(yaf_route_t *route, yaf_request_t *request TSRMLS_DC) */ int yaf_route_map_route(yaf_route_t *route, yaf_request_t *request TSRMLS_DC) { zval *ctl_prefer, *delimer, *zuri, *base_uri, *params; char *req_uri, *tmp, *rest, *ptrptr, *seg; char *query_str = NULL; uint seg_len = 0; smart_str route_result = {0}; zuri = zend_read_property(yaf_request_ce, request, ZEND_STRL(YAF_REQUEST_PROPERTY_NAME_URI), 1 TSRMLS_CC); base_uri = zend_read_property(yaf_request_ce, request, ZEND_STRL(YAF_REQUEST_PROPERTY_NAME_BASE), 1 TSRMLS_CC); ctl_prefer = zend_read_property(yaf_route_map_ce, route, ZEND_STRL(YAF_ROUTE_MAP_VAR_NAME_CTL_PREFER), 1 TSRMLS_CC); delimer = zend_read_property(yaf_route_map_ce, route, ZEND_STRL(YAF_ROUTE_MAP_VAR_NAME_DELIMETER), 1 TSRMLS_CC); if (base_uri && IS_STRING == Z_TYPE_P(base_uri) && !strncasecmp(Z_STRVAL_P(zuri), Z_STRVAL_P(base_uri), Z_STRLEN_P(base_uri))) { req_uri = estrdup(Z_STRVAL_P(zuri) + Z_STRLEN_P(base_uri)); } else { req_uri = estrdup(Z_STRVAL_P(zuri)); } if (Z_TYPE_P(delimer) == IS_STRING && Z_STRLEN_P(delimer)) { if ((query_str = strstr(req_uri, Z_STRVAL_P(delimer))) != NULL && *(query_str - 1) == '/') { tmp = req_uri; rest = query_str + Z_STRLEN_P(delimer); if (*rest == '\0') { req_uri = estrndup(req_uri, query_str - req_uri); query_str = NULL; efree(tmp); } else if (*rest == '/') { req_uri = estrndup(req_uri, query_str - req_uri); query_str = estrdup(rest); efree(tmp); } else { query_str = NULL; } } } seg = php_strtok_r(req_uri, YAF_ROUTER_URL_DELIMIETER, &ptrptr); while (seg) { seg_len = strlen(seg); if (seg_len) { smart_str_appendl(&route_result, seg, seg_len); } smart_str_appendc(&route_result, '_'); seg = php_strtok_r(NULL, YAF_ROUTER_URL_DELIMIETER, &ptrptr); } if (route_result.len) { if (Z_BVAL_P(ctl_prefer)) { zend_update_property_stringl(yaf_request_ce, request, ZEND_STRL(YAF_REQUEST_PROPERTY_NAME_CONTROLLER), route_result.c, route_result.len - 1 TSRMLS_CC); } else { zend_update_property_stringl(yaf_request_ce, request, ZEND_STRL(YAF_REQUEST_PROPERTY_NAME_ACTION), route_result.c, route_result.len - 1 TSRMLS_CC); } efree(route_result.c); } if (query_str) { params = yaf_router_parse_parameters(query_str TSRMLS_CC); (void)yaf_request_set_params_multi(request, params TSRMLS_CC); zval_ptr_dtor(&params); efree(query_str); } efree(req_uri); return 1; } /* }}} */ /** {{{ proto public Yaf_Route_Simple::route(Yaf_Request $req) */ PHP_METHOD(yaf_route_map, route) { yaf_request_t *request; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "O", &request, yaf_request_ce) == FAILURE) { return; } else { RETURN_BOOL(yaf_route_map_route(getThis(), request TSRMLS_CC)); } } /* }}} */ /** {{{ zval * yaf_route_map_assemble(zval *mvc, zval *query TSRMLS_DC) */ zval * yaf_route_map_assemble(yaf_route_t *this_ptr, zval *mvc, zval *query TSRMLS_DC) { char *tmp, *ptrptr, *pname; smart_str tvalue = {0}; uint tmp_len, has_delim = 0; zval *uri, *delim, *ctl_prefer, **tmp_data; MAKE_STD_ZVAL(uri); ctl_prefer = zend_read_property(yaf_route_map_ce, this_ptr, ZEND_STRL(YAF_ROUTE_MAP_VAR_NAME_CTL_PREFER), 1 TSRMLS_CC); delim = zend_read_property(yaf_route_map_ce, this_ptr, ZEND_STRL(YAF_ROUTE_MAP_VAR_NAME_DELIMETER), 1 TSRMLS_CC); if (IS_STRING == Z_TYPE_P(delim) && Z_STRLEN_P(delim)) { has_delim = 1; } do { if (Z_BVAL_P(ctl_prefer)) { if (zend_hash_find(Z_ARRVAL_P(mvc), ZEND_STRS(YAF_ROUTE_ASSEMBLE_ACTION_FORMAT), (void **)&tmp_data) == SUCCESS) { pname = estrndup(Z_STRVAL_PP(tmp_data), Z_STRLEN_PP(tmp_data)); } else { yaf_trigger_error(YAF_ERR_TYPE_ERROR TSRMLS_CC, "%s", "Undefined the 'action' parameter for the 1st parameter"); break; } } else { if (zend_hash_find(Z_ARRVAL_P(mvc), ZEND_STRS(YAF_ROUTE_ASSEMBLE_CONTROLLER_FORMAT), (void **)&tmp_data) == SUCCESS) { pname = estrndup(Z_STRVAL_PP(tmp_data), Z_STRLEN_PP(tmp_data)); } else { yaf_trigger_error(YAF_ERR_TYPE_ERROR TSRMLS_CC, "%s", "Undefined the 'controller' parameter for the 1st parameter"); break; } } tmp = php_strtok_r(pname, "_", &ptrptr); while(tmp) { tmp_len = strlen(tmp); if (tmp_len) { smart_str_appendc(&tvalue, '/'); smart_str_appendl(&tvalue, tmp, tmp_len); } tmp = php_strtok_r(NULL, "_", &ptrptr); } efree(pname); if (IS_ARRAY == Z_TYPE_P(query)) { uint key_len, i = 0; char *key; ulong key_idx; zval **tmp_data; if (has_delim) { smart_str_appendc(&tvalue, '/'); smart_str_appendl(&tvalue, Z_STRVAL_P(delim), Z_STRLEN_P(delim)); } for (zend_hash_internal_pointer_reset(Z_ARRVAL_P(query)); zend_hash_get_current_data(Z_ARRVAL_P(query), (void **)&tmp_data) == SUCCESS; zend_hash_move_forward(Z_ARRVAL_P(query))) { if (IS_STRING == Z_TYPE_PP(tmp_data) && HASH_KEY_IS_STRING == zend_hash_get_current_key_ex(Z_ARRVAL_P(query), &key, &key_len, &key_idx, 0, NULL)) { if (has_delim) { smart_str_appendc(&tvalue, '/'); smart_str_appendl(&tvalue, key, key_len - 1); smart_str_appendc(&tvalue, '/'); smart_str_appendl(&tvalue, Z_STRVAL_PP(tmp_data), Z_STRLEN_PP(tmp_data)); } else { if (i == 0) { smart_str_appendc(&tvalue, '?'); smart_str_appendl(&tvalue, key, key_len - 1); smart_str_appendc(&tvalue, '='); smart_str_appendl(&tvalue, Z_STRVAL_PP(tmp_data), Z_STRLEN_PP(tmp_data)); } else { smart_str_appendc(&tvalue, '&'); smart_str_appendl(&tvalue, key, key_len - 1); smart_str_appendc(&tvalue, '='); smart_str_appendl(&tvalue, Z_STRVAL_PP(tmp_data), Z_STRLEN_PP(tmp_data)); } } } i += 1; } } smart_str_0(&tvalue); ZVAL_STRING(uri, tvalue.c, 1); smart_str_free(&tvalue); return uri; } while (0); ZVAL_NULL(uri); return uri; } /** {{{ proto public Yaf_Route_Simple::__construct(bool $controller_prefer=FALSE, string $delimer = '#!') */ PHP_METHOD(yaf_route_map, __construct) { char *delim = NULL; uint delim_len = 0; zend_bool controller_prefer = 0; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|bs", &controller_prefer, &delim, &delim_len) == FAILURE) { YAF_UNINITIALIZED_OBJECT(getThis()); return; } (void)yaf_route_map_instance(getThis(), controller_prefer, delim, delim_len TSRMLS_CC); } /* }}} */ /** {{{ proto public Yaf_Route_Map::assemble(array $mvc[, array $query = NULL]) */ PHP_METHOD(yaf_route_map, assemble) { zval *mvc, *query; zval *return_uri; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "a|a", &mvc, &query) == FAILURE) { return; } else { if ((return_uri = yaf_route_map_assemble(getThis(), mvc, query TSRMLS_CC))) { RETURN_ZVAL(return_uri, 0, 1); } } } /* }}} */ /** {{{ yaf_route_map_methods */ zend_function_entry yaf_route_map_methods[] = { PHP_ME(yaf_route_map, __construct, yaf_route_map_construct_arginfo, ZEND_ACC_PUBLIC|ZEND_ACC_CTOR) PHP_ME(yaf_route_map, route, yaf_route_route_arginfo, ZEND_ACC_PUBLIC) PHP_ME(yaf_route_map, assemble, yaf_route_assemble_arginfo, ZEND_ACC_PUBLIC) {NULL, NULL, NULL} }; /* }}} */ /** {{{ YAF_STARTUP_FUNCTION */ YAF_STARTUP_FUNCTION(route_map) { zend_class_entry ce; YAF_INIT_CLASS_ENTRY(ce, "Yaf_Route_Map", "Yaf\\Route\\Map", yaf_route_map_methods); yaf_route_map_ce = zend_register_internal_class_ex(&ce, NULL, NULL TSRMLS_CC); zend_class_implements(yaf_route_map_ce TSRMLS_CC, 1, yaf_route_ce); yaf_route_map_ce->ce_flags |= ZEND_ACC_FINAL_CLASS; zend_declare_property_bool(yaf_route_map_ce, ZEND_STRL(YAF_ROUTE_MAP_VAR_NAME_CTL_PREFER), 0, ZEND_ACC_PROTECTED TSRMLS_CC); zend_declare_property_null(yaf_route_map_ce, ZEND_STRL(YAF_ROUTE_MAP_VAR_NAME_DELIMETER), ZEND_ACC_PROTECTED TSRMLS_CC); return SUCCESS; } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */
mihaiblidaru/onlinejudge.org
Volume1/100/100.c
#include <stdio.h> #include <stdlib.h> #include <stdbool.h> int calculate_num_cycles(int n) { int i = 1; while (n != 1) { i++; if (n % 2 != 0) n = 3 * n + 1; else n = n / 2; } return i; } int main() { int i = 0, j = 0, max, res, a; int x, y; while (scanf("%d %d", &i, &j) == 2) { if (j > i) { x = i; y = j; } else { x = j; y = i; } max = 0; for (a = x; a <= y; a++) { res = calculate_num_cycles(a); if (res > max) max = res; } printf("%d %d %d\n", i, j, max); } return 0; }
Mapwize/mapwize-ui-ios
MapwizeUI/BottomSheet/MWZUICollectionViewFlowLayout.h
<gh_stars>10-100 #import <UIKit/UIKit.h> NS_ASSUME_NONNULL_BEGIN @interface MWZUICollectionViewFlowLayout : UICollectionViewFlowLayout @end NS_ASSUME_NONNULL_END
Mapwize/mapwize-ui-ios
MapwizeUI/ResultList/MWZUIGroupedResultListDelegate.h
#ifndef MWZUIGroupedResultListDelegate_h #define MWZUIGroupedResultListDelegate_h #import <MapwizeSDK/MapwizeSDK.h> @protocol MWZUIGroupedResultListDelegate <NSObject> - (void) didSelect:(id<MWZObject>) mapwizeObject universe:(MWZUniverse*) universe forQuery:(NSString*) query; @end #endif
Mapwize/mapwize-ui-ios
MapwizeUI/Scenes/DefaultScene/MWZUIMapViewMenuBarDelegate.h
<filename>MapwizeUI/Scenes/DefaultScene/MWZUIMapViewMenuBarDelegate.h // // MWZMapViewMenuBarDelegate.h // MapwizeUI // // Created by Etienne on 29/10/2019. // Copyright © 2019 <NAME>. All rights reserved. // #ifndef MWZUIMapViewMenuBarDelegate_h #define MWZUIMapViewMenuBarDelegate_h @protocol MWZUIMapViewMenuBarDelegate <NSObject> - (void) didTapOnSearchButton; - (void) didTapOnMenuButton; - (void) didTapOnDirectionButton; @end #endif /* MWZMapViewMenuBarDelegate_h */
Mapwize/mapwize-ui-ios
MapwizeUI/Component/MWZUIFullContent/MWZUIFullContentViewComponentButton.h
// // MWZUIRoundedButtonIconText.h // BottomSheet // // Created by Etienne on 30/09/2020. // #import <UIKit/UIKit.h> NS_ASSUME_NONNULL_BEGIN /** Define MapwizeUI defined button type */ typedef NS_ENUM(NSUInteger, MWZUIFullContentViewComponentButtonType) { MWZUIFullContentViewComponentButtonDirection, MWZUIFullContentViewComponentButtonPhone, MWZUIFullContentViewComponentButtonWebsite, MWZUIFullContentViewComponentButtonShare, MWZUIFullContentViewComponentButtonCustom }; /** MWZUIFullContentViewComponentButton is used by the SDK to display button in the selected content view when expanded. */ @interface MWZUIFullContentViewComponentButton : UIButton /** Creates a MWZUIIconTextButton @param title displayed on the button @param image show on the left on the button @param color of the button @param outlined If YES, to color will be set as background color */ - (instancetype)initWithTitle:(NSString*) title image:(UIImage*) image color:(UIColor*) color outlined:(BOOL) outlined; @end NS_ASSUME_NONNULL_END
Mapwize/mapwize-ui-ios
MapwizeUI/Utils/MWZUIOpeningHoursUtils.h
#import <Foundation/Foundation.h> NS_ASSUME_NONNULL_BEGIN @class MWZUIOpeningInterval; @interface MWZUIOpeningHoursUtils : NSObject + (NSArray<MWZUIOpeningInterval*>*) getSortedIntervals:(NSArray<NSDictionary*>*) input; + (NSString*) getCurrentOpeningStateString:(NSArray<NSDictionary*>*) input timezoneCode:(NSString*)timezoneCode; + (NSString*) getDayName:(NSInteger)day; + (NSString*) getFormattedHours:(NSInteger)minuts; + (NSArray<NSDictionary*>*) getOpeningStrings:(NSArray<NSDictionary*>*) input; @end NS_ASSUME_NONNULL_END
Mapwize/mapwize-ui-ios
MapwizeUI/Component/MWZUIBottomSheet.h
#import <UIKit/UIKit.h> #import <MapwizeSDK/MapwizeSDK.h> #import "MWZUIBottomSheetDelegate.h" #import "MWZUIDefaultContentViewDelegate.h" #import "MWZUIFullContentViewDelegate.h" @class MWZPlaceDetails; NS_ASSUME_NONNULL_BEGIN @interface MWZUIBottomSheet : UIView <MWZUIDefaultContentViewDelegate, MWZUIFullContentViewDelegate> @property (nonatomic,weak) id<MWZUIBottomSheetDelegate> delegate; - (instancetype)initWithFrame:(CGRect) frame color:(UIColor*)color; - (void) showPlacePreview:(MWZPlacePreview*)placePreview; - (void) showPlaceDetails:(MWZPlaceDetails*)placeDetails shouldShowInformationButton:(BOOL) shouldShowInformationButton shouldShowReportRow:(BOOL) shouldShowReportRow language:(NSString*)language; - (void) showPlacelist:(MWZPlacelist*)placelist shouldShowInformationButton:(BOOL) shouldShowInformationButton language:(NSString*)language; - (void) showPlace:(MWZPlace*)place shouldShowInformationButton:(BOOL) shouldShowInformationButton language:(NSString*)language; - (void) removeContent; - (void) viewDidLayoutSubviews; @end NS_ASSUME_NONNULL_END
Mapwize/mapwize-ui-ios
MapwizeUI/BottomInfoView/MWZUIBottomInfoView.h
#import <UIKit/UIKit.h> @import MapwizeSDK; @protocol MWZUIBottomInfoViewDelegate; NS_ASSUME_NONNULL_BEGIN @interface MWZUIBottomInfoView : UIView @property (nonatomic, weak) id<MWZUIBottomInfoViewDelegate> delegate; - (instancetype) initWithColor:(UIColor*) color; - (void) selectContentWithPlace:(MWZPlace*) place language:(NSString*) language showInfoButton:(BOOL) showInfoButton; - (void) selectContentWithPlacePreview:(MWZPlacePreview*) placePreview; - (void) selectContentWithPlaceList:(MWZPlacelist*) placeList language:(NSString*) language showInfoButton:(BOOL) showInfoButton; - (void) unselectContent; @end NS_ASSUME_NONNULL_END
Mapwize/mapwize-ui-ios
MapwizeUI/Component/MWZUIBooking/MWZUIBookingView.h
#import <UIKit/UIKit.h> #import <MapwizeSDK/MapwizeSDK.h> #import "MWZUIFullContentViewComponentRow.h" NS_ASSUME_NONNULL_BEGIN @interface MWZUIBookingView : MWZUIFullContentViewComponentRow @property (nonatomic) UIScrollView* scrollView; - (instancetype) initWithFrame:(CGRect)frame placeDetails:(MWZPlaceDetails*)placeDetails color:(UIColor*)color; + (BOOL) isOccupied:(MWZPlaceDetails*)placeDetails; @end NS_ASSUME_NONNULL_END
Mapwize/mapwize-ui-ios
MapwizeUI/Scenes/SearchScene/MWZUISearchScene.h
#import <UIKit/UIKit.h> #import "MWZUISearchQueryBar.h" #import "MWZUISearchViewControllerOptions.h" #import "MWZUIGroupedResultList.h" #import "MWZUISearchSceneDelegate.h" #import "MWZUIScene.h" #import "MWZUIGroupedResultListDelegate.h" @import MapwizeSDK; NS_ASSUME_NONNULL_BEGIN @interface MWZUISearchScene : NSObject <MWZUIScene, MWZUISearchQueryBarDelegate, MWZUIGroupedResultListDelegate> @property (nonatomic) MWZUISearchViewControllerOptions* searchOptions; @property (nonatomic) MWZUISearchQueryBar* searchQueryBar; @property (nonatomic) MWZUIGroupedResultList* resultList; @property (nonatomic) UIView* resultContainerView; @property (nonatomic) UIView* backgroundView; @property (nonatomic) NSLayoutConstraint* resultContainerViewHeightConstraint; @property (nonatomic) UIColor* mainColor; @property (nonatomic, weak) id<MWZUISearchSceneDelegate> delegate; - (void) clearSearch; - (void) showSearchResults:(NSArray<id<MWZObject>>*) results universes:(NSArray<MWZUniverse*>*) universes activeUniverse:(MWZUniverse*) activeUniverse withLanguage:(NSString*) language forQuery:(NSString*) query; - (void) showResults:(NSArray<id<MWZObject>> *)results withLanguage:(NSString *)language forQuery:(NSString*) query; - (void) setNetworkError:(BOOL)networkError; @end NS_ASSUME_NONNULL_END
Mapwize/mapwize-ui-ios
MapwizeUI/BottomSheet/MWZUIPagerHeaderView.h
<gh_stars>10-100 #import <UIKit/UIKit.h> #import "MWZUIPagerHeaderViewDelegate.h" @class MWZUIPagerHeaderTitle; NS_ASSUME_NONNULL_BEGIN @interface MWZUIPagerHeaderView : UIView @property (nonatomic, weak) id<MWZUIPagerHeaderViewDelegate> delegate; @property (nonatomic) UIStackView* stackView; @property (nonatomic) NSArray<MWZUIPagerHeaderTitle*>* titles; @property (nonatomic) NSMutableArray<UIButton*>* buttons; @property (nonatomic) MWZUIPagerHeaderTitle* selectedTitle; @property (nonatomic) UIView* selectorView; @property (nonatomic) UIColor* color; @property (nonatomic) UIColor* haloColor; - (instancetype) initWithColor:(UIColor*) color; - (void) setSelectedIndex:(NSNumber*) index; @end NS_ASSUME_NONNULL_END
Mapwize/mapwize-ui-ios
MapwizeUI/Scenes/DirectionScene/MWZUIDirectionSceneDelegate.h
<gh_stars>10-100 #ifndef MWZUIDirectionSceneDelegate_h #define MWZUIDirectionSceneDelegate_h @import MapwizeSDK; @class MWZUIDirectionScene; @protocol MWZUIDirectionSceneDelegate <NSObject> - (void) directionSceneDidTapOnBackButton:(MWZUIDirectionScene*) scene; - (void) directionSceneDidTapOnFromButton:(MWZUIDirectionScene*) scene; - (void) directionSceneDidTapOnToButton:(MWZUIDirectionScene*) scene; - (void) directionSceneDidTapOnSwapButton:(MWZUIDirectionScene*) scene; - (void) directionSceneDirectionModeDidChange:(MWZDirectionMode*) directionMode; - (void) searchDirectionQueryDidChange:(NSString*) query; - (void) didSelect:(id<MWZObject>)mapwizeObject universe:(MWZUniverse*) universe forQuery:(NSString*) query; - (void) directionSceneDidTapOnCurrentLocation:(MWZUIDirectionScene*) scene; @end #endif /* MWZDirectionSceneDelegate_h */
Mapwize/mapwize-ui-ios
MapwizeUI/BottomSheet/MWZUIIssuesReportingViewController.h
<gh_stars>10-100 #import <UIKit/UIKit.h> @import MapwizeSDK; NS_ASSUME_NONNULL_BEGIN @interface MWZUIIssuesReportingViewController : UIViewController - (instancetype) initWithVenue:(MWZVenue*)venue placeDetails:(MWZPlaceDetails*)placeDetails userInfo:(MWZUserInfo*)userInfo language:(NSString*)language color:(UIColor*)color api:(id<MWZMapwizeApi>)api; @end NS_ASSUME_NONNULL_END
Mapwize/mapwize-ui-ios
MapwizeUI/Scenes/DefaultScene/MWZUIMapViewMenuBar.h
<filename>MapwizeUI/Scenes/DefaultScene/MWZUIMapViewMenuBar.h #import <UIKit/UIKit.h> #import "MWZUIMapViewMenuBarDelegate.h" NS_ASSUME_NONNULL_BEGIN @interface MWZUIMapViewMenuBar : UIView @property (nonatomic) UIButton* menuButton; @property (nonatomic) UIActivityIndicatorView* activityIndicator; @property (nonatomic) UIButton* directionButton; @property (nonatomic) UILabel* searchQueryLabel; @property (nonatomic, weak) id<MWZUIMapViewMenuBarDelegate> delegate; - (void) showActivityIndicator; - (void) hideActivityIndicator; @end NS_ASSUME_NONNULL_END
Mapwize/mapwize-ui-ios
MapwizeUI/CompassView/MWZUICompass.h
#import <UIKit/UIKit.h> #import <CoreLocation/CoreLocation.h> @protocol MWZUICompassDelegate; NS_ASSUME_NONNULL_BEGIN @interface MWZUICompass : UIImageView @property (nonatomic, weak) id<MWZUICompassDelegate> delegate; - (void) updateCompass:(CLLocationDirection) direction; @end NS_ASSUME_NONNULL_END
Mapwize/mapwize-ui-ios
MapwizeUI/Scenes/SearchScene/MWZUISearchQueryBarDelegate.h
#ifndef MWZUISearchQueryBarDelegate_h #define MWZUISearchQueryBarDelegate_h @protocol MWZUISearchQueryBarDelegate <NSObject> - (void) didTapOnBackButton; - (void) searchQueryDidChange:(NSString*) query; @end #endif /* MWZSearchQueryBarDelegate_h */
Mapwize/mapwize-ui-ios
MapwizeUI/MapViewController/MWZUIView.h
#import <UIKit/UIKit.h> @import MapwizeSDK; @protocol MWZUIViewDelegate; @protocol MWZUISearchSceneDelegate; @protocol MWZUIDefaultSceneDelegate; @protocol MWZUIDirectionSceneDelegate; @protocol MWZUIFloorControllerDelegate; @protocol MWZUICompassDelegate; @protocol MWZUIFollowUserButtonDelegate; @protocol MWZUILanguagesButtonDelegate; @protocol MWZUIUniversesButtonDelegate; @class MWZUISceneCoordinator; @class MWZUISettings; @class MWZUIOptions; @class MWZUIFollowUserButton; @class MWZUICompass; NS_ASSUME_NONNULL_BEGIN /** MWZUIView contains a MWZMapView and some basic UI Components such as SearchBar, DirectionSearchEngine, FloorController and others. It is an easy way to integrate Mapwize without bothering building the entire UI */ @interface MWZUIView : UIView /// The MWZMapView integrated in the MWZUIView @property (nonatomic) MWZMapView* mapView; /// The MWZUIViewDelegate that allows you to react to the map events @property (nonatomic, weak) id<MWZUIViewDelegate> delegate; /// The MWZUIFollowUserButton is exposed to help you align your UI to existing component @property (nonatomic) MWZUIFollowUserButton* followUserButton; @property (nonatomic) MWZUICompass* compassView; /** Init the MWZUIView using the global MWZMapwizeConfiguration (see SDK documentation for more information) */ - (instancetype) initWithFrame:(CGRect)frame mapwizeOptions:(MWZUIOptions*) options uiSettings:(MWZUISettings*) uiSettings; /** Init the MWZUIView using the MWZMapwizeConfiguration passed as parameter (see SDK documentation for more information) */ - (instancetype) initWithFrame:(CGRect)frame mapwizeConfiguration:(MWZMapwizeConfiguration*) mapwizeConfiguration mapwizeOptions:(MWZUIOptions*) options uiSettings:(MWZUISettings*) uiSettings; /** Set the indoorLocationProvider to the map in order to display the user location returned by this provider */ - (void) setIndoorLocationProvider:(ILIndoorLocationProvider*) indoorLocationProvider; /** Helper methods to add access to the map using an access key */ - (void) grantAccess:(NSString*) accessKey success:(void (^)(void)) success failure:(void (^)(NSError* error)) failure; /** Select the place passed as parameter. Selecting a place add a marker on the place and open the bottom view to show information about it. */ - (void) selectPlace:(MWZPlace*) place centerOn:(BOOL) centerOn; /** Select the placelist passed as parameter. Selecting a placelist add a marker on the places contained in the placelist and open the bottom view to show information about it. */ - (void) selectPlaceList:(MWZPlacelist*) placeList; /** Unselect the selected place or placelist and hide related UI component */ - (void) unselectContent; /** Display a direction on the map and change the UI to display the direction interface */ - (void) setDirection:(MWZDirection*) direction from:(id<MWZDirectionPoint>) from to:(id<MWZDirectionPoint>) to directionMode:(MWZDirectionMode*) directionMode; @end NS_ASSUME_NONNULL_END
Mapwize/mapwize-ui-ios
MapwizeUI/DirectionModule/MWZUIDirectionInfo.h
<reponame>Mapwize/mapwize-ui-ios<filename>MapwizeUI/DirectionModule/MWZUIDirectionInfo.h #import <UIKit/UIKit.h> @import MapwizeSDK; NS_ASSUME_NONNULL_BEGIN @interface MWZUIDirectionInfo : UIView - (instancetype) initWithColor:(UIColor*) color; - (void) showLoading; - (void) hideLoading; - (void) showErrorMessage:(NSString*) message; - (void) setInfoWith:(double) directionTravelTime directionDistance:(double) directionDistance; - (void) close; @end NS_ASSUME_NONNULL_END
Mapwize/mapwize-ui-ios
MapwizeUI/FloorController/MWZUIFloorController.h
#import <UIKit/UIKit.h> @import MapwizeSDK; #import "MWZUIFloorControllerDelegate.h" NS_ASSUME_NONNULL_BEGIN @interface MWZUIFloorController : UIScrollView @property (nonatomic,weak) id<MWZUIFloorControllerDelegate> floorControllerDelegate; - (instancetype) initWithColor:(UIColor*) color; - (void) mapwizeFloorsDidChange:(NSArray<MWZFloor*>*) floors showController:(BOOL) showController language:(NSString*)language; - (void) mapwizeFloorWillChange:(MWZFloor*) floor; - (void) mapwizeFloorDidChange:(MWZFloor*) floor; @end NS_ASSUME_NONNULL_END
Mapwize/mapwize-ui-ios
MapwizeUI/UniverseButton/MWZUIUniversesButtonDelegate.h
<filename>MapwizeUI/UniverseButton/MWZUIUniversesButtonDelegate.h @import MapwizeSDK; #ifndef MWZUIUniversesButtonDelegate_h #define MWZUIUniversesButtonDelegate_h @protocol MWZUIUniversesButtonDelegate <NSObject> - (void) didSelectUniverse:(MWZUniverse*) universe; @end #endif
Mapwize/mapwize-ui-ios
MapwizeUI/Scenes/DirectionScene/MWZUIDirectionModeSegmentDelegate.h
<filename>MapwizeUI/Scenes/DirectionScene/MWZUIDirectionModeSegmentDelegate.h #ifndef MWZUIDirectionModeSegmentDelegate_h #define MWZUIDirectionModeSegmentDelegate_h @import MapwizeSDK; @class MWZUIDirectionModeSegment; @protocol MWZUIDirectionModeSegmentDelegate <NSObject> - (void) directionModeSegment:(MWZUIDirectionModeSegment*) segment didChangeMode:(MWZDirectionMode*) mode; @end #endif /* MWZDirectionModeSegmentDelegate_h */
Mapwize/mapwize-ui-ios
MapwizeUI/FollowUserButton/MWZUIFollowUserButton.h
<reponame>Mapwize/mapwize-ui-ios<filename>MapwizeUI/FollowUserButton/MWZUIFollowUserButton.h #import <UIKit/UIKit.h> @import MapwizeSDK; @protocol MWZUIFollowUserButtonDelegate; NS_ASSUME_NONNULL_BEGIN @interface MWZUIFollowUserButton : UIButton @property (nonatomic, weak) id<MWZUIFollowUserButtonDelegate> delegate; - (instancetype) initWithColor:(UIColor*) color; - (void) setFollowUserMode:(MWZFollowUserMode) mode; @end NS_ASSUME_NONNULL_END
Mapwize/mapwize-ui-ios
MapwizeUI/BottomSheet/MWZUIPagerHeaderViewDelegate.h
#ifndef MWZUIPagerHeaderViewDelegate_h #define MWZUIPagerHeaderViewDelegate_h @class MWZUIPagerHeaderView; @class MWZUIPagerHeaderTitle; @protocol MWZUIPagerHeaderViewDelegate <NSObject> - (void) pagerHeader:(MWZUIPagerHeaderView*) pagerHeader didChangeTitle:(MWZUIPagerHeaderTitle*) title; @end #endif /* MWZUIPagerHeaderViewDelegate_h */
Mapwize/mapwize-ui-ios
MapwizeUI/FollowUserButton/MWZUIFollowUserButtonDelegate.h
#ifndef MWZUIFollowUserButtonDelegate_h #define MWZUIFollowUserButtonDelegate_h @class MWZUIFollowUserButton; @protocol MWZUIFollowUserButtonDelegate <NSObject> - (void) didTapWithoutLocation; - (void) followUserButton:(MWZUIFollowUserButton*) followUserButton didChangeFollowUserMode:(MWZFollowUserMode) followUserMode; - (ILIndoorLocation*) followUserButtonRequiresUserLocation:(MWZUIFollowUserButton*) followUserButton; - (MWZFollowUserMode) followUserButtonRequiresFollowUserMode:(MWZUIFollowUserButton*) followUserButton; @end #endif
Mapwize/mapwize-ui-ios
MapwizeUI/Events/MWZUIEventChannel.h
<filename>MapwizeUI/Events/MWZUIEventChannel.h<gh_stars>10-100 #ifndef MWZUIEventChannel_h #define MWZUIEventChannel_h #import <Foundation/Foundation.h> /** Enum to identify how a place or placelist has been selected */ typedef NS_ENUM(NSUInteger, MWZUIEventChannel) { /// The user click on the place icon MWZUIEventChannelMapClick, /// The user select the place in the search result list MWZUIEventChannelSearch, /// The user select the place in the main search list MWZUIEventChannelMainSearch }; #endif
Mapwize/mapwize-ui-ios
MapwizeUI/Scenes/SearchScene/MWZUISearchQueryBar.h
#import <UIKit/UIKit.h> #import "MWZUISearchQueryBarDelegate.h" NS_ASSUME_NONNULL_BEGIN @interface MWZUISearchQueryBar : UIView <UITextFieldDelegate> @property (nonatomic) UIButton* backButton; @property (nonatomic) UIButton* clearButton; @property (nonatomic) UITextField* searchTextField; @property (nonatomic, weak) id<MWZUISearchQueryBarDelegate> delegate; @end NS_ASSUME_NONNULL_END
Mapwize/mapwize-ui-ios
MapwizeUI/BottomSheet/MWZUILabelRow.h
<gh_stars>10-100 #import <UIKit/UIKit.h> NS_ASSUME_NONNULL_BEGIN @interface MWZUILabelRow : UIView @property (nonatomic) UIImage* image; @property (nonatomic) UIColor* color; - (instancetype) initWithImage:(UIImage*)image label:(NSString*)label color:(UIColor*)color; @end NS_ASSUME_NONNULL_END
Mapwize/mapwize-ui-ios
MapwizeUI/MapwizeUI.h
#import <UIKit/UIKit.h> FOUNDATION_EXPORT double MapwizeUIVersionNumber; FOUNDATION_EXPORT const unsigned char MapwizeUIVersionString[]; #import "MWZUIOptions.h" #import "MWZUIView.h" #import "MWZUIViewDelegate.h" #import "MWZUIEventChannel.h" #import "MWZUISettings.h" #import "MWZUIBottomSheetComponents.h" #import "MWZUIFullContentViewComponentButton.h" #import "MWZUIFullContentViewComponentRow.h" #import "MWZUIIconTextButton.h"
Mapwize/mapwize-ui-ios
MapwizeUI/Scenes/DefaultScene/MWZUIDefaultSceneDelegate.h
#ifndef MWZUIDefaultSceneDelegate_h #define MWZUIDefaultSceneDelegate_h @class MWZUIBottomSheetComponents; @class MWZPlaceDetails; @class MWZPlacelist; @class MWZUIBottomSheetComponents; @protocol MWZUIDefaultSceneDelegate <NSObject> - (void) didTapOnSearchButton; - (void) didTapOnMenuButton; - (void) didTapOnInformationButton; - (MWZUIBottomSheetComponents*) requireComponentForPlaceDetails:(MWZPlaceDetails*)placeDetails withDefaultComponents:(MWZUIBottomSheetComponents*)components; - (MWZUIBottomSheetComponents*) requireComponentForPlacelist:(MWZPlacelist*)placelist withDefaultComponents:(MWZUIBottomSheetComponents*)components; - (void) didClose; - (void) didTapOnDirectionButton; - (void) didTapOnCallButton; - (void) didTapOnShareButton; - (void) didTapOnWebsiteButton; - (void) didTapOnReportIssueButton:(MWZPlaceDetails *)details; @end #endif /* MWZDefaultSceneDelegate_h */
Mapwize/mapwize-ui-ios
MapwizeUI/BottomSheet/MWZUIDefaultContentViewDelegate.h
#ifndef MWZUIDefaultContentViewDelegate_h #define MWZUIDefaultContentViewDelegate_h @protocol MWZUIDefaultContentViewDelegate <NSObject> - (void) didTapOnDirectionButton; - (void) didTapOnCallButton; - (void) didTapOnShareButton; - (void) didTapOnWebsiteButton; - (void) didTapOnInfoButton; @end #endif /* MWZUIDefaultContentViewDelegate_h */
Mapwize/mapwize-ui-ios
MapwizeUI/BottomInfoView/MWZUIBottomInfoViewDelegate.h
#ifndef MWZUIBottomInfoViewDelegate_h #define MWZUIBottomInfoViewDelegate_h @protocol MWZUIBottomInfoViewDelegate <NSObject> - (void) didPressDirection; - (void) didPressInformation; @end #endif
Mapwize/mapwize-ui-ios
MapwizeUI/LanguageButton/MWZUILanguagesButtonDelegate.h
#ifndef MWZUILanguagesButtonDelegate_h #define MWZUILanguagesButtonDelegate_h @protocol MWZUILanguagesButtonDelegate <NSObject> - (void) didSelectLanguage:(NSString*) language; @end #endif
Mapwize/mapwize-ui-ios
MapwizeUI/FloorController/MWZUIFloorControllerDelegate.h
#ifndef MWZUIFloorControllerDelegate_h #define MWZUIFloorControllerDelegate_h @class MWZUIFloorController; @protocol MWZUIFloorControllerDelegate <NSObject> - (void) floorController:(MWZUIFloorController*) floorController didSelect:(NSNumber*) floorOrder; @end #endif
Mapwize/mapwize-ui-ios
MapwizeUI/Scenes/DefaultScene/MWZUIDefaultScene.h
<gh_stars>10-100 #import <UIKit/UIKit.h> #import "MWZUIMapViewMenuBar.h" #import "MWZUIMapViewMenuBarDelegate.h" #import "MWZUIDefaultSceneDelegate.h" #import "MWZUIBottomInfoView.h" #import "MWZUIBottomInfoViewDelegate.h" #import "MWZUIScene.h" #import "MWZUIDefaultSceneProperties.h" #import "MWZUIBottomSheet.h" NS_ASSUME_NONNULL_BEGIN @interface MWZUIDefaultScene : NSObject <MWZUIScene, MWZUIMapViewMenuBarDelegate, MWZUIBottomInfoViewDelegate, MWZUIBottomSheetDelegate> - (instancetype) initWith:(UIColor*) mainColor menuIsHidden:(BOOL) menuIsHidden; @property (nonatomic) MWZUIMapViewMenuBar* menuBar; @property (nonatomic) MWZUIBottomSheet* bottomSheet; @property (nonatomic) UIColor* mainColor; @property (assign) BOOL menuIsHidden; @property (nonatomic, weak) id<MWZUIDefaultSceneDelegate> delegate; @property (nonatomic) MWZUIDefaultSceneProperties* sceneProperties; @end NS_ASSUME_NONNULL_END
Mapwize/mapwize-ui-ios
MapwizeUI/Scenes/DirectionScene/MWZUIDirectionHeader.h
<gh_stars>10-100 #import <UIKit/UIKit.h> #import "MWZUIDirectionHeaderDelegate.h" @import MapwizeSDK; NS_ASSUME_NONNULL_BEGIN @class MWZUIBorderedTextField; @interface MWZUIDirectionHeader : UIView <UITextFieldDelegate> @property (nonatomic, weak) id<MWZUIDirectionHeaderDelegate> delegate; - (instancetype)initWithFrame:(CGRect)frame color:(UIColor*) mainColor; - (void) setButtonsHidden:(BOOL) isHidden; - (void) openFromSearch; - (void) closeFromSearch; - (void) openToSearch; - (void) closeToSearch; - (void) setFromText:(NSString*) text asPlaceHolder:(BOOL) asPlaceHolder; - (void) setToText:(NSString*) text asPlaceHolder:(BOOL) asPlaceHolder; - (void) setAvailableModes:(NSArray<MWZDirectionMode*>*) modes; - (void) setSelectedMode:(MWZDirectionMode*) mode; @end NS_ASSUME_NONNULL_END
Mapwize/mapwize-ui-ios
MapwizeUI/BottomSheet/MWZUIPagerHeaderTitle.h
// // MWZUIPagerHeaderButton.h // MapwizeUI // // Created by Etienne on 09/10/2020. // Copyright © 2020 <NAME>. All rights reserved. // #import <Foundation/Foundation.h> NS_ASSUME_NONNULL_BEGIN @interface MWZUIPagerHeaderTitle : NSObject @property (nonatomic) NSNumber* index; @property (nonatomic) NSString* title; -(instancetype) initWithIndex:(NSNumber*)index title:(NSString*)title; @end NS_ASSUME_NONNULL_END
Mapwize/mapwize-ui-ios
MapwizeUI/FloorController/MWZUIFloorView.h
<reponame>Mapwize/mapwize-ui-ios #import <UIKit/UIKit.h> NS_ASSUME_NONNULL_BEGIN @interface MWZUIFloorView : UILabel @property (nonatomic) NSNumber* floor; @property (nonatomic, assign) BOOL selected; @property (nonatomic) UIColor* mainColor; - (instancetype) initWithFrame:(CGRect) frame withIsSelected:(BOOL) isSelected mainColor:(UIColor*) mainColor; - (void) setPreselected:(BOOL) preselected; @end NS_ASSUME_NONNULL_END
Mapwize/mapwize-ui-ios
MapwizeUI/Component/MWZUIBooking/MWZUIBookingGridView.h
#import <UIKit/UIKit.h> @import MapwizeSDK; NS_ASSUME_NONNULL_BEGIN @interface MWZUIBookingGridView : UIView @property (nonatomic) UIColor* color; - (instancetype)initWithFrame:(CGRect)frame gridWidth:(double)gridWidth color:(UIColor*)color; - (void) setCurrentTime:(double)hours events:(NSArray<MWZPlaceDetailsEvent*>*)events; @end NS_ASSUME_NONNULL_END
Mapwize/mapwize-ui-ios
MapwizeUI/ResultList/MWZUIGroupedResultList.h
<reponame>Mapwize/mapwize-ui-ios<gh_stars>10-100 #import <UIKit/UIKit.h> @import MapwizeSDK; @protocol MWZUIGroupedResultListDelegate; NS_ASSUME_NONNULL_BEGIN @interface MWZUIGroupedResultList : UIView @property (nonatomic, weak) id<MWZUIGroupedResultListDelegate> resultDelegate; - (void) setLanguage:(NSString*) language; - (void) swapResults:(NSArray<id<MWZObject>>*) results universes:(NSArray<MWZUniverse*>*) universes activeUniverse:(MWZUniverse*) activeUniverse language:(NSString*) language forQuery:(NSString*) query; - (void) swapResults:(NSArray<id<MWZObject>> *)results language:(NSString *)language forQuery:(NSString*) query; - (void) setNetworkError:(BOOL)networkError; @end NS_ASSUME_NONNULL_END
Mapwize/mapwize-ui-ios
MapwizeUI/BottomSheet/MWZUITwoLinesRow.h
#import <Foundation/Foundation.h> #import "MWZUIFullContentViewComponentRow.h" NS_ASSUME_NONNULL_BEGIN @protocol MWZUITextFieldRowDelegate <NSObject> - (void) didChangeInput:(NSString*)value; @end @interface MWZUITwoLinesRow : MWZUIFullContentViewComponentRow @property (nonatomic, weak) id<MWZUITextFieldRowDelegate> delegate; - (instancetype) initWithImage:(UIImage*)image label:(NSString*)label view:(UIView*)view color:(UIColor*)color; - (void) setErrorMessage:(NSString*)message; @end NS_ASSUME_NONNULL_END
Mapwize/mapwize-ui-ios
MapwizeUI/Commons/MWZUIBorderedTextField.h
#import <UIKit/UIKit.h> NS_ASSUME_NONNULL_BEGIN @interface MWZUIBorderedTextField : UITextField @end NS_ASSUME_NONNULL_END
Mapwize/mapwize-ui-ios
MapwizeUI/QrCodeButton/MWZUIQrCodeButtonDelegate.h
<filename>MapwizeUI/QrCodeButton/MWZUIQrCodeButtonDelegate.h<gh_stars>10-100 #ifndef MWZUIQrCodeButtonDelegate_h #define MWZUIQrCodeButtonDelegate_h @class MWZUIQrCodeButton; @protocol MWZUIQrCodeButtonDelegate <NSObject> - (void) didTapOn:(MWZUIQrCodeButton*) button; @end #endif /* MWZUIQrCodeButtonDelegate_h */
Mapwize/mapwize-ui-ios
MapwizeUI/Component/MWZUIFullContent/MWZUIFullContentViewComponentRow.h
<filename>MapwizeUI/Component/MWZUIFullContent/MWZUIFullContentViewComponentRow.h #import <UIKit/UIKit.h> NS_ASSUME_NONNULL_BEGIN /** Define MapwizeUI defined row type */ typedef NS_ENUM(NSUInteger, MWZUIFullContentViewComponentRowType) { MWZUIFullContentViewComponentRowOpeningHours, MWZUIFullContentViewComponentRowPhoneNumber, MWZUIFullContentViewComponentRowWebsite, MWZUIFullContentViewComponentRowSchedule, MWZUIFullContentViewComponentRowCustom }; /** MWZUIFullContentViewComponentRow is the component that display a custom information as a row in the expanded bottom view */ @interface MWZUIFullContentViewComponentRow : UIView @property (nonatomic) UIImage* image; @property (nonatomic) UIView* contentView; @property (nonatomic) UIColor* color; @property (nonatomic) UITapGestureRecognizer* tapGestureRecognizer; @property (nonatomic) MWZUIFullContentViewComponentRowType type; @property (nonatomic, assign) BOOL infoAvailable; /** Creates a MWZUIIconTextButton @param image show on the left on the button @param contentView the view that will be display in the row @param color of the button @param tapGestureRecognizer added to the row to respond to clic event @param type the MWZUIFullContentViewComponentRowType. You should use custom if you are using a custom row @param infoAvailable if false, the content will be replaced by a place holder */ - (instancetype) initWithImage:(UIImage*)image contentView:(UIView*)contentView color:(UIColor*)color tapGestureRecognizer:(nullable UITapGestureRecognizer*)tapGestureRecognizer type:(MWZUIFullContentViewComponentRowType)type infoAvailable:(BOOL) infoAvailable; @end NS_ASSUME_NONNULL_END
Mapwize/mapwize-ui-ios
MapwizeUI/CompassView/MWZUICompassDelegate.h
<filename>MapwizeUI/CompassView/MWZUICompassDelegate.h #ifndef MWZUICompassDelegate_h #define MWZUICompassDelegate_h @class MWZUICompass; @protocol MWZUICompassDelegate <NSObject> - (void) didPress:(MWZUICompass*) compass; @end #endif
Mapwize/mapwize-ui-ios
MapwizeUI/BottomSheet/MWZUIIssueTypeCell.h
<reponame>Mapwize/mapwize-ui-ios #import <UIKit/UIKit.h> NS_ASSUME_NONNULL_BEGIN @interface MWZUIIssueTypeCell : UICollectionViewCell @property (nonatomic) UILabel* label; @property (nonatomic) NSLayoutConstraint* width; @property (nonatomic) UIColor* color; @end NS_ASSUME_NONNULL_END
Mapwize/mapwize-ui-ios
MapwizeUI/BottomSheet/MWZUIFullContentViewDelegate.h
#ifndef MWZUIFullContentViewDelegate_h #define MWZUIFullContentViewDelegate_h @protocol MWZUIFullContentViewDelegate <NSObject> - (void) didTapOnDirectionButton; - (void) didTapOnCallButton; - (void) didTapOnShareButton; - (void) didTapOnWebsiteButton; - (void) didTapOnInfoButton; - (void) didTapOnReportIssueButton; @end #endif /* MWZUIFullContentViewDelegate_h */
Mapwize/mapwize-ui-ios
MapwizeUI/UniverseButton/MWZUIUniversesButton.h
#import <UIKit/UIKit.h> @import MapwizeSDK; @protocol MWZUIUniversesButtonDelegate; NS_ASSUME_NONNULL_BEGIN @interface MWZUIUniversesButton : UIButton @property (nonatomic, weak) id<MWZUIUniversesButtonDelegate> delegate; - (instancetype) init; - (void) showIfNeeded; - (void) mapwizeAccessibleUniversesDidChange:(NSArray<MWZUniverse*>*) accessibleUniverses; @end NS_ASSUME_NONNULL_END
Mapwize/mapwize-ui-ios
MapwizeUI/Component/Commons/MWZUIOpeningHoursTableViewCell.h
<gh_stars>10-100 // // MWZUIOpeningHoursTableViewCell.h // BottomSheet // // Created by Etienne on 02/10/2020. // #import <UIKit/UIKit.h> NS_ASSUME_NONNULL_BEGIN @interface MWZUIOpeningHoursTableViewCell : UITableViewCell @property (nonatomic, retain) UILabel* dayLabel; @property (nonatomic, retain) UILabel* hoursLabel; @property (nonatomic, retain) UIImageView* toggleImage; @end NS_ASSUME_NONNULL_END
Mapwize/mapwize-ui-ios
MapwizeUI/Component/MWZUICollectionViewCell.h
<filename>MapwizeUI/Component/MWZUICollectionViewCell.h // // MWZUICollectionViewCell.h // BottomSheet // // Created by Etienne on 28/09/2020. // #import <UIKit/UIKit.h> NS_ASSUME_NONNULL_BEGIN @interface MWZUICollectionViewCell : UICollectionViewCell @property (nonatomic) UIImageView* imageView; @end NS_ASSUME_NONNULL_END
Mapwize/mapwize-ui-ios
MapwizeUI/BottomSheet/MWZUIBottomSheetDelegate.h
#ifndef MWZUIBottomSheetDelegate_h #define MWZUIBottomSheetDelegate_h #import <MapwizeSDK/MapwizeSDK.h> @class MWZUIBottomSheet; @class MWZUIBottomSheetComponents; @protocol MWZUIBottomSheetDelegate <NSObject> - (MWZUIBottomSheetComponents*) requireComponentForPlaceDetails:(MWZPlaceDetails*)placeDetails withDefaultComponents:(MWZUIBottomSheetComponents*)components; - (MWZUIBottomSheetComponents*) requireComponentForPlacelist:(MWZPlacelist*)placelist withDefaultComponents:(MWZUIBottomSheetComponents*)components; - (void) didClose; - (void) didTapOnDirectionButton; - (void) didTapOnCallButton; - (void) didTapOnShareButton; - (void) didTapOnWebsiteButton; - (void) didTapOnInfoButton; - (void) didTapOnReportIssueButton:(MWZPlaceDetails*) details; @end #endif /* MWZUIBottomSheetDelegate_h */
Mapwize/mapwize-ui-ios
MapwizeUI/Scenes/DirectionScene/MWZUIDirectionModeSegment.h
<filename>MapwizeUI/Scenes/DirectionScene/MWZUIDirectionModeSegment.h #import <UIKit/UIKit.h> #import "MWZUIDirectionModeSegmentDelegate.h" @import MapwizeSDK; NS_ASSUME_NONNULL_BEGIN @interface MWZUIDirectionModeSegment : UIView @property (nonatomic, weak) id<MWZUIDirectionModeSegmentDelegate> delegate; @property (nonatomic) UIScrollView* scrollView; @property (nonatomic) UIStackView* stackView; @property (nonatomic) NSArray<MWZDirectionMode*>* modes; @property (nonatomic) NSMutableArray<UIButton*>* buttons; @property (nonatomic) MWZDirectionMode* selectedMode; @property (nonatomic) UIView* selectorView; @property (nonatomic) UIColor* color; @property (nonatomic) UIColor* haloColor; - (instancetype) initWithColor:(UIColor*) color; @end NS_ASSUME_NONNULL_END
Mapwize/mapwize-ui-ios
MapwizeUI/Commons/MWZUIIconTextButton.h
<gh_stars>10-100 #import <UIKit/UIKit.h> NS_ASSUME_NONNULL_BEGIN /** MWZUIIconTextButton is used by the SDK to display button in the selected content view. */ @interface MWZUIIconTextButton : UIButton /** Creates a MWZUIIconTextButton @param title displayed on the button @param image show on the left on the button @param color of the button @param outlined If YES, to color will be set as background color */ - (instancetype)initWithTitle:(NSString*) title image:(UIImage*) image color:(UIColor*) color outlined:(BOOL) outlined; @end NS_ASSUME_NONNULL_END
Mapwize/mapwize-ui-ios
MapwizeUI/Scenes/DirectionScene/MWZUIDirectionScene.h
<reponame>Mapwize/mapwize-ui-ios #import <Foundation/Foundation.h> #import "MWZUIDirectionHeader.h" #import "MWZUIDirectionHeaderDelegate.h" #import "MWZUIScene.h" #import "MWZUIDirectionSceneDelegate.h" #import "MWZUIGroupedResultListDelegate.h" #import "MWZUIDirectionInfo.h" #import "MWZUIGroupedResultList.h" #import "MWZUICurrentLocationView.h" @import MapwizeSDK; NS_ASSUME_NONNULL_BEGIN @interface MWZUIDirectionScene : NSObject <MWZUIScene, MWZUIDirectionHeaderDelegate, MWZUIGroupedResultListDelegate> @property (nonatomic, weak) id<MWZUIDirectionSceneDelegate> delegate; @property (nonatomic) UIView* topConstraintView; @property (nonatomic) UIView* resultListTopConstraintView; @property (nonatomic) NSLayoutConstraint* resultListTopConstraint; @property (nonatomic) NSLayoutConstraint* topConstraintViewMarginTop; @property (nonatomic) MWZUIDirectionHeader* directionHeader; @property (nonatomic) MWZUIDirectionInfo* directionInfo; @property (nonatomic) MWZUIGroupedResultList* resultList; @property (nonatomic) UIView* backgroundView; @property (nonatomic) UIColor* mainColor; @property (nonatomic) MWZUICurrentLocationView* currentLocationView; - (void) setFromText:(NSString*) text asPlaceHolder:(BOOL) asPlaceHolder; - (void) setToText:(NSString*) text asPlaceHolder:(BOOL) asPlaceHolder; - (void) setAvailableModes:(NSArray<MWZDirectionMode*>*) modes; - (void) setSelectedMode:(MWZDirectionMode*) mode; - (void) setInfoWith:(double) directionTravelTime directionDistance:(double) directionDistance directionMode:(MWZDirectionMode*) directionMode; - (void) showLoading; - (void) hideLoading; - (void) showErrorMessage:(NSString*) errorMessage; - (void) setDirectionInfoHidden:(BOOL) hidden; - (void) openFromSearch; - (void) closeFromSearch; - (void) openToSearch; - (void) closeToSearch; - (void) showSearchResults:(NSArray<id<MWZObject>>*) results universes:(NSArray<MWZUniverse*>*) universes activeUniverse:(MWZUniverse*) activeUniverse withLanguage:(NSString*) language forQuery:(NSString*) query; - (void) setSearchResultsHidden:(BOOL) hidden; - (void) setCurrentLocationViewHidden:(BOOL) hidden; @end NS_ASSUME_NONNULL_END
Mapwize/mapwize-ui-ios
MapwizeUI/Component/MWZUIFullContent/MWZUIOpeningHoursView.h
// // MWZUIOpeningHoursView.h // BottomSheet // // Created by Etienne on 30/09/2020. // #import <UIKit/UIKit.h> NS_ASSUME_NONNULL_BEGIN @interface MWZUIOpeningHoursView : UIView @property (nonatomic, assign) BOOL expanded; @property (nonatomic) NSArray* openingHours; - (void) toggleExpanded; - (void) setOpeningHours:(NSArray *)openingHours timezoneCode:(NSString*) timezoneCode; @end NS_ASSUME_NONNULL_END
Mapwize/mapwize-ui-ios
MapwizeUI/Component/MWZUIFullContent/MWZUIBottomSheetComponents.h
#import <Foundation/Foundation.h> #import "MWZUIFullContentViewComponentButton.h" #import "MWZUIFullContentViewComponentRow.h" #import "MWZUIIconTextButton.h" NS_ASSUME_NONNULL_BEGIN /** MWZUIBottomSheetComponents is used as container for all the content displayed in the bottomsheet. */ @interface MWZUIBottomSheetComponents : NSObject /// If true, the details view won't be able to expand @property (assign) BOOL preventExpand; /// The list of buttons displayed in the large view @property (nonatomic) NSMutableArray<MWZUIFullContentViewComponentButton*>* headerButtons; /// The list of rows displayed in the large view @property (nonatomic) NSMutableArray<MWZUIFullContentViewComponentRow*>* contentRows; /// The list of buttons displayed in the small view @property (nonatomic) NSMutableArray<MWZUIIconTextButton*>* minimizedViewButtons; - (instancetype) initWithHeaderButtons:(NSMutableArray<MWZUIFullContentViewComponentButton*>*)headerButtons contentRows:(NSMutableArray<MWZUIFullContentViewComponentRow*>*) contentRows minimizedViewButtons:(NSMutableArray<MWZUIIconTextButton*>*) minimizedViewButtons preventExpand:(BOOL)preventExpand; @end NS_ASSUME_NONNULL_END
Mapwize/mapwize-ui-ios
MapwizeUI/BottomSheet/MWZUIPagerView.h
<reponame>Mapwize/mapwize-ui-ios<filename>MapwizeUI/BottomSheet/MWZUIPagerView.h #import <UIKit/UIKit.h> NS_ASSUME_NONNULL_BEGIN @interface MWZUIPagerView : UIView - (instancetype)initWithFrame:(CGRect)frame color:(UIColor*)color; - (void) addSlide:(UIView*)slide named:(NSString*)name; - (void) build; @end NS_ASSUME_NONNULL_END
Mapwize/mapwize-ui-ios
MapwizeUI/Commons/MWZUIPaddingLabel.h
#import <UIKit/UIKit.h> NS_ASSUME_NONNULL_BEGIN @interface MWZUIPaddingLabel : UILabel @property (nonatomic, assign) int topInset; @property (nonatomic, assign) int bottomInset; @property (nonatomic, assign) int leftInset; @property (nonatomic, assign) int rightInset; @end NS_ASSUME_NONNULL_END
Mapwize/mapwize-ui-ios
MapwizeUI/Component/MWZUIDefaultContent/MWZUIDefaultContentView.h
#import <UIKit/UIKit.h> #import <MapwizeSDK/MapwizeSDK.h> #import "MWZUIDefaultContentViewDelegate.h" NS_ASSUME_NONNULL_BEGIN @class MWZPlaceDetails; @class MWZUIIconTextButton; @interface MWZUIDefaultContentView : UIView @property (nonatomic, weak) id<MWZUIDefaultContentViewDelegate> delegate; @property (nonatomic) MWZPlaceDetails* placeDetails; @property (nonatomic) MWZPlacePreview* placePreview; @property (nonatomic) UIColor* color; -(instancetype) initWithFrame:(CGRect)frame color:(UIColor*)color; -(NSMutableArray<MWZUIIconTextButton*>*) buildButtonsForPlacelist:(MWZPlacelist *)placelist showInfoButton:(BOOL)showInfoButton; -(NSMutableArray<MWZUIIconTextButton*>*) buildButtonsForPlaceDetails:(MWZPlaceDetails*)placeDetails showInfoButton:(BOOL)showInfoButton; -(NSMutableArray<MWZUIIconTextButton*>*) buildButtonsForPlace:(MWZPlace *)place showInfoButton:(BOOL)showInfoButton; -(void)setContentForPlaceDetails:(MWZPlaceDetails*)placeDetails language:(NSString*)language buttons:(NSMutableArray<MWZUIIconTextButton*>*)buttons; -(void)setContentForPlacelist:(MWZPlacelist*)placelist language:(NSString*)language buttons:(NSMutableArray<MWZUIIconTextButton*>*)buttons; -(void)setContentForPlace:(MWZPlace*)place language:(NSString*)language buttons:(NSMutableArray<MWZUIIconTextButton*>*)buttons; @end NS_ASSUME_NONNULL_END
Mapwize/mapwize-ui-ios
MapwizeUI/QrCodeButton/MWZUIQrCodeButton.h
#import <Foundation/Foundation.h> #import <UIKit/UIKit.h> @protocol MWZUIQrCodeButtonDelegate; NS_ASSUME_NONNULL_BEGIN @interface MWZUIQrCodeButton : UIButton @property (nonatomic, weak) id<MWZUIQrCodeButtonDelegate> delegate; - (instancetype) init; @end NS_ASSUME_NONNULL_END
Mapwize/mapwize-ui-ios
MapwizeUI/BottomSheet/MWZUIIssueTypeView.h
<gh_stars>10-100 #import <UIKit/UIKit.h> #import <MapwizeSDK/MapwizeSDK.h> #import "MWZUIFullContentViewComponentRow.h" #import "MWZUIIssueTypeViewDelegate.h" NS_ASSUME_NONNULL_BEGIN @interface MWZUIIssueTypeView : MWZUIFullContentViewComponentRow <UICollectionViewDelegate, UICollectionViewDataSource> @property (nonatomic) UICollectionView* collectionView; @property (nonatomic) NSLayoutConstraint* collectionViewHeight; @property (nonatomic) NSArray<MWZIssueType*>* issueTypes; @property (nonatomic) NSString* language; @property (nonatomic, weak) id<MWZUIIssueTypeViewDelegate> delegate; @property (nonatomic) UILabel* errorMessageLabel; - (instancetype) initWithFrame:(CGRect)frame issueTypes:(NSArray<MWZIssueType*>*)issueTypes color:(UIColor*)color language:(NSString*)language; - (void) setErrorMessage:(NSString*)message; @end NS_ASSUME_NONNULL_END
Mapwize/mapwize-ui-ios
MapwizeUI/Utils/MWZUIOpeningInterval.h
<gh_stars>10-100 // // MWZUIOpeningInterval.h // MapwizeUI // // Created by Etienne on 15/10/2020. // Copyright © 2020 <NAME>. All rights reserved. // #import <Foundation/Foundation.h> NS_ASSUME_NONNULL_BEGIN @interface MWZUIOpeningInterval : NSObject @property (nonatomic, assign) NSInteger day; @property (nonatomic, assign) NSInteger open; @property (nonatomic, assign) NSInteger close; - (instancetype) initWithDay:(NSInteger)day open:(NSInteger)open close:(NSInteger)close; @end NS_ASSUME_NONNULL_END
Mapwize/mapwize-ui-ios
MapwizeUI/Utils/MWZUIOptions.h
#import <Foundation/Foundation.h> @import MapwizeSDK; /** This class allows to pass MWZOptions to the map with option that are not in the MapwizeSDK */ @interface MWZUIOptions : MWZOptions /** The location to center on */ @property (nonatomic, retain) MWZLatLngFloor* _Nullable centerOnLocation; @end
Mapwize/mapwize-ui-ios
MapwizeUI/Component/MWZUIFullContent/MWZUIFullContentView.h
#import <UIKit/UIKit.h> #import <MapwizeSDK/MapwizeSDK.h> #import "MWZUIFullContentViewDelegate.h" @class MWZPlaceDetails; @class MWZUIFullContentViewComponentButton; @class MWZUIFullContentViewComponentRow; NS_ASSUME_NONNULL_BEGIN @interface MWZUIFullContentView : UIView @property (nonatomic, weak) id<MWZUIFullContentViewDelegate> delegate; @property (nonatomic) MWZPlaceDetails* placeDetails; @property (nonatomic) UIColor* color; -(instancetype) initWithFrame:(CGRect)frame color:(UIColor*)color; -(void)setContentForPlaceDetails:(MWZPlaceDetails*)placeDetails language:(NSString*)language buttons:(NSArray<MWZUIFullContentViewComponentButton*>*)buttons rows:(NSArray<MWZUIFullContentViewComponentRow*>*)rows; - (NSMutableArray<MWZUIFullContentViewComponentButton*>*) buildHeaderButtonsForPlaceDetails:(MWZPlaceDetails*)placeDetails showInfoButton:(BOOL)shouldShowInformationButton language:(NSString*)language; - (NSMutableArray<MWZUIFullContentViewComponentRow*>*) buildContentRowsForPlaceDetails:(MWZPlaceDetails*)placeDetails language:(NSString*)language shouldShowReportRow:(BOOL)reportRow; @end NS_ASSUME_NONNULL_END
Mapwize/mapwize-ui-ios
MapwizeUI/BottomSheet/MWZUIReportIssueRow.h
<filename>MapwizeUI/BottomSheet/MWZUIReportIssueRow.h<gh_stars>10-100 #import <UIKit/UIKit.h> #import <MapwizeSDK/MapwizeSDK.h> #import "MWZUIFullContentViewComponentRow.h" NS_ASSUME_NONNULL_BEGIN @protocol MWZUIReportIssueRowDelegate <NSObject> - (void) didTapOnReportIssue; @end @interface MWZUIReportIssueRow : MWZUIFullContentViewComponentRow @property (nonatomic, weak) id<MWZUIReportIssueRowDelegate> delegate; - (instancetype) initWithFrame:(CGRect)frame color:(UIColor*)color; @end NS_ASSUME_NONNULL_END
Mapwize/mapwize-ui-ios
MapwizeUI/Utils/ILIndoorLocation+DirectionPoint.h
<reponame>Mapwize/mapwize-ui-ios @import IndoorLocation; @import MapwizeSDK; NS_ASSUME_NONNULL_BEGIN @interface ILIndoorLocation (MWZDirectionPoint) <MWZDirectionPoint> @end NS_ASSUME_NONNULL_END
Mapwize/mapwize-ui-ios
MapwizeUI/Scenes/DefaultScene/MWZUIDefaultSceneProperties.h
<gh_stars>10-100 #import <Foundation/Foundation.h> @import MapwizeSDK; @interface MWZUIDefaultSceneProperties : NSObject @property (nonatomic) MWZVenue* venue; @property (nonatomic) BOOL venueLoading; @property (nonatomic) id selectedContent; @property (nonatomic) MWZPlaceDetails* placeDetails; @property (nonatomic) NSString* language; @property (nonatomic, assign) BOOL infoButtonHidden; @property (nonatomic, assign) BOOL reportRowHidden; + (instancetype) scenePropertiesWithProperties:(MWZUIDefaultSceneProperties*) properties; @end
Mapwize/mapwize-ui-ios
MapwizeUI/Utils/MWZUIColors.h
<gh_stars>10-100 #import <Foundation/Foundation.h> #import <UIKit/UIKit.h> NS_ASSUME_NONNULL_BEGIN @interface MWZUIColors : NSObject + (UIImage *)tintedBackgroundImageWithImage:(UIImage*) input tint:(UIColor *)color; @end NS_ASSUME_NONNULL_END
Mapwize/mapwize-ui-ios
MapwizeUI/Scenes/DirectionScene/MWZUIDirectionHeaderDelegate.h
#ifndef MWZUIDirectionHeaderDelegate_h #define MWZUIDirectionHeaderDelegate_h @import MapwizeSDK; @class MWZUIDirectionHeader; @protocol MWZUIDirectionHeaderDelegate <NSObject> - (void) directionHeaderDidTapOnBackButton:(MWZUIDirectionHeader*) directionHeader; - (void) directionHeaderDidTapOnFromButton:(MWZUIDirectionHeader*) directionHeader; - (void) directionHeaderDidTapOnToButton:(MWZUIDirectionHeader*) directionHeader; - (void) directionHeaderDidTapOnSwapButton:(MWZUIDirectionHeader*) directionHeader; - (void) directionHeaderDirectionModeDidChange:(MWZDirectionMode*) directionMode; - (void) searchDirectionQueryDidChange:(NSString*) query; @end #endif /* MWZUIDirectionHeaderDelegate_h */
Mapwize/mapwize-ui-ios
MapwizeUI/BottomSheet/MWZUIOpeningHoursTodayTableViewCell.h
<reponame>Mapwize/mapwize-ui-ios // // MWZUIOpeningHoursTodayTableViewCell.h // MapwizeUI // // Created by Etienne on 15/10/2020. // Copyright © 2020 <NAME>. All rights reserved. // #import <UIKit/UIKit.h> NS_ASSUME_NONNULL_BEGIN @interface MWZUIOpeningHoursTodayTableViewCell : UITableViewCell @property (nonatomic, retain) UILabel* hoursLabel; @property (nonatomic, retain) UIImageView* toggleImage; @end NS_ASSUME_NONNULL_END
Mapwize/mapwize-ui-ios
MapwizeUI/Scenes/MWZUISceneCoordinator.h
<filename>MapwizeUI/Scenes/MWZUISceneCoordinator.h<gh_stars>10-100 #import <Foundation/Foundation.h> #import <UIKit/UIKit.h> #import "MWZUIDefaultScene.h" #import "MWZUISearchScene.h" #import "MWZUIDirectionScene.h" NS_ASSUME_NONNULL_BEGIN @interface MWZUISceneCoordinator : NSObject -(instancetype) initWithContainerView:(UIView*) containerView; @property (nonatomic, weak) UIView* containerView; @property (nonatomic, weak) MWZUIDefaultScene* defaultScene; @property (nonatomic, weak) MWZUISearchScene* searchScene; @property (nonatomic, weak) MWZUIDirectionScene* directionScene; -(void) transitionFromDefaultToSearch; -(void) transitionFromSearchToDefault; -(void) transitionFromDefaultToDirection; -(void) transitionFromDirectionToDefault; -(void) transitionFromDirectionToSearch; -(void) transitionFromSearchToDirection; @end NS_ASSUME_NONNULL_END
Mapwize/mapwize-ui-ios
MapwizeUI/Scenes/SearchScene/MWZUISearchSceneDelegate.h
#ifndef MWZUISearchSceneDelegate_h #define MWZUISearchSceneDelegate_h @protocol MWZUISearchSceneDelegate <NSObject> - (void) didTapOnBackButton; - (void) searchQueryDidChange:(NSString*) query; - (void) didSelect:(id<MWZObject>)mapwizeObject universe:(MWZUniverse*) universe forQuery:(NSString*) query; @end #endif /* MWZSearchSceneDelegate_h */
Mapwize/mapwize-ui-ios
MapwizeUI/Scenes/SearchScene/MWZUISearchViewControllerOptions.h
<filename>MapwizeUI/Scenes/SearchScene/MWZUISearchViewControllerOptions.h<gh_stars>10-100 #import <Foundation/Foundation.h> @import MapwizeSDK; NS_ASSUME_NONNULL_BEGIN @interface MWZUISearchViewControllerOptions : NSObject @property (assign) BOOL isDirection; @property (assign) BOOL isFrom; @property (assign) BOOL isTo; @property (nonatomic, copy) NSString* language; @property (nonatomic, copy) NSString* venueId; @property (nonatomic, copy) NSString* universeId; @property (nonatomic, copy) NSArray<MWZUniverse*>* groupByUniverses; @property (nonatomic, copy) ILIndoorLocation* indoorLocation; @property (nonatomic, copy) MWZApiFilter* apiFilter; @end NS_ASSUME_NONNULL_END
Mapwize/mapwize-ui-ios
MapwizeUI/BottomSheet/MWZUIIssueTypeViewDelegate.h
#ifndef MWZUIIssueTypeViewDelegate_h #define MWZUIIssueTypeViewDelegate_h #import <MapwizeSDK/MapwizeSDK.h> @protocol MWZUIIssueTypeViewDelegate <NSObject> - (void) didSelectIssueType:(MWZIssueType*)issueType; @end #endif
Mapwize/mapwize-ui-ios
MapwizeUI/Scenes/MWZUIScene.h
#import <UIKit/UIKit.h> #ifndef MWZUIScene_h #define MWZUIScene_h @protocol MWZUIScene <NSObject> - (instancetype) initWith:(UIColor*) mainColor; - (void) addTo:(UIView*) view; - (void) setHidden:(BOOL) hidden; - (UIView*) getTopViewToConstraint; - (UIView*) getBottomViewToConstraint; @end #endif
Mapwize/mapwize-ui-ios
MapwizeUI/LanguageButton/MWZUILanguagesButton.h
#import <UIKit/UIKit.h> @import MapwizeSDK; @protocol MWZUILanguagesButtonDelegate; NS_ASSUME_NONNULL_BEGIN @interface MWZUILanguagesButton : UIButton @property (nonatomic, weak) id<MWZUILanguagesButtonDelegate> delegate; - (instancetype) init; - (void) mapwizeDidEnterInVenue:(MWZVenue*) venue; - (void) mapwizeDidExitVenue; @end NS_ASSUME_NONNULL_END
Mapwize/mapwize-ui-ios
MapwizeUI/Commons/MWZUIPaddingTextField.h
<reponame>Mapwize/mapwize-ui-ios<gh_stars>10-100 #import <UIKit/UIKit.h> NS_ASSUME_NONNULL_BEGIN @interface MWZUIPaddingTextField : UITextField @property (nonatomic, assign) int topInset; @property (nonatomic, assign) int bottomInset; @property (nonatomic, assign) int leftInset; @property (nonatomic, assign) int rightInset; @end NS_ASSUME_NONNULL_END
Mapwize/mapwize-ui-ios
MapwizeUI/MapViewController/MWZUISettings.h
#import <Foundation/Foundation.h> NS_ASSUME_NONNULL_BEGIN @interface MWZUISettings: NSObject @property (nonatomic, assign) BOOL menuButtonIsHidden; @property (nonatomic, assign) BOOL followUserButtonIsHidden; @property (nonatomic, assign) BOOL floorControllerIsHidden; @property (nonatomic, assign) BOOL compassIsHidden; @end NS_ASSUME_NONNULL_END
Mapwize/mapwize-ui-ios
MapwizeUI/MapViewController/MWZUIViewDelegate.h
<filename>MapwizeUI/MapViewController/MWZUIViewDelegate.h #ifndef MWZUIViewDelegate_h #define MWZUIViewDelegate_h @class MWZUIView; #import "MWZUIEventChannel.h" @import MapwizeSDK; @class MWZUIBottomSheetComponents; /** The MWZUIViewDelegate allow you to respond to the map events */ @protocol MWZUIViewDelegate <NSObject> /** Called when the MWZUIView is ready to use @param mapwizeView the view that triggered the event */ - (void) mapwizeViewDidLoad:(MWZUIView* _Nonnull) mapwizeView; @optional /** Called when the user click on the information button when a place is selected Can be use to open a new view to show more information about this place or redirect the user elsewhere in your application @param mapwizeView the view that triggered the event @param place the selected place when the click occurs */ - (void) mapwizeView:(MWZUIView* _Nonnull) mapwizeView didTapOnPlaceInformationButton:(MWZPlace* _Nonnull) place; /** Called when the user click on the information button when a placelist is selected Can be use to open a new view to show more information about this placelist or redirect the user elsewhere in your application @param mapwizeView the view that triggered the event @param placelist the selected placelist when the click occurs */ - (void) mapwizeView:(MWZUIView* _Nonnull) mapwizeView didTapOnPlacelistInformationButton:(MWZPlacelist* _Nonnull) placelist; /** Called when the user click on follow user mode button and no location is currently available Can be use to propose the user to activate the gps @param mapwizeView the view that triggered the event */ - (void) mapwizeViewDidTapOnFollowWithoutLocation:(MWZUIView* _Nonnull) mapwizeView; /** Called when the user click on the menu button in the search bar Can be use to open a drawer menu @param mapwizeView the view that triggered the event */ - (void) mapwizeViewDidTapOnMenu:(MWZUIView* _Nonnull) mapwizeView; /** Called when a place or a placelist is selected Information button can be useful if you have more information to display to the user or to redirect him elsewhere in your application @param mapwizeView the view that triggered the event @param mapwizeObject the selected object @return YES if the information button should be displayed. NO otherwise */ - (BOOL) mapwizeView:(MWZUIView* _Nonnull) mapwizeView shouldShowInformationButtonFor:(id<MWZObject> _Nonnull) mapwizeObject; - (BOOL) mapwizeView:(MWZUIView* _Nonnull) mapwizeView shouldShowReportRowFor:(id<MWZObject> _Nonnull) mapwizeObject; /** Called when the bottom view is going to be displayed The MapwizeUI SDK build all component that will be displayed in the view and give it back to the developper through this method's components argument. You can change, remove or add component in the MWZUIBottomSheetComponents and return it. The returned MWZUIBottomSheetComponents will be used to display the final content. @param mapwizeView the view that called the methode @param placeDetails the placeDetails object about to be displayed in the bottom view @param components the components build by the SDK based on the info contains in the object. */ - (MWZUIBottomSheetComponents* _Nonnull) mapwizeView:(MWZUIView* _Nonnull) mapwizeView requireComponentForPlaceDetails:(MWZPlaceDetails* _Nonnull)placeDetails withDefaultComponents:(MWZUIBottomSheetComponents* _Nonnull)components; /** Called when the available floors change Floor controller is useful when you have multiple floor but you can decide to hide it if you have only one floor available. @param mapwizeView the view that triggered the event @param floors the new available floors @return YES if the floors controller should be displayed. NO otherwise */ - (BOOL) mapwizeView:(MWZUIView* _Nonnull) mapwizeView shouldShowFloorControllerFor:(NSArray<MWZFloor*>* _Nonnull) floors; /** Called when the follow user mode changed. Can be used to change the UI depending on the current follow user mode @param mapwizeView the view that triggered the event @param followUserMode the active followUserMode */ - (void)mapwizeView:(MWZUIView *_Nonnull)mapwizeView followUserModeDidChange:(MWZFollowUserMode)followUserMode; /** Called when the user click on the map @param mapwizeView the view that triggered the event @param clickEvent the click event produced by the click */ - (void)mapwizeView:(MWZUIView *_Nonnull)mapwizeView didTap:(MWZClickEvent *_Nonnull)clickEvent; /** Called when the MWZUIView starts to display a venue @param mapwizeView the view that triggered the event @param venue that will be displayed */ - (void)mapwizeView:(MWZUIView *_Nonnull)mapwizeView venueWillEnter:(MWZVenue *_Nonnull)venue; /** Called when the MWZUIView finishes to display a venue @param mapwizeView the view that triggered the event @param venue that is displayed */ - (void)mapwizeView:(MWZUIView *_Nonnull)mapwizeView venueDidEnter:(MWZVenue *_Nonnull)venue; /** Called when the MWZUIView hides a venue @param mapwizeView the view that triggered the event @param venue that is hidden */ - (void)mapwizeView:(MWZUIView *_Nonnull)mapwizeView venueDidExit:(MWZVenue *_Nonnull)venue; /** Called when the universe will change @param mapwizeView the view that triggered the event @param universe that will be displayed */ - (void)mapwizeView:(MWZUIView *_Nonnull)mapwizeView universeWillChange:(MWZUniverse *_Nonnull)universe; /** Called when the universe did change @param mapwizeView the view that triggered the event @param universe that is displayed */ - (void)mapwizeView:(MWZUIView *_Nonnull)mapwizeView universeDidChange:(MWZUniverse *_Nonnull)universe; /** Called when the universes available for the displayed venue changed. Triggered just after venueEnter or if new access are granted. @param mapwizeView the view that triggered the event @param universes the available universes */ - (void)mapwizeView:(MWZUIView *_Nonnull)mapwizeView universesDidChange:(NSArray<MWZUniverse *> *_Nonnull)universes; /** Called when the MWZUIView will change the displayed floor @param mapwizeView the view that triggered the event @param floor that will be displayed */ - (void)mapwizeView:(MWZUIView *_Nonnull)mapwizeView floorWillChange:(MWZFloor *_Nullable)floor; /** Called when the MWZUIView changes the displayed floor @param mapwizeView the view that triggered the event @param floor the displayed floor */ - (void)mapwizeView:(MWZUIView *_Nonnull)mapwizeView floorDidChange:(MWZFloor *_Nullable)floor; /** Called when the MWZUIView changes the available floors for the displayed venue. Triggered just after venueDidEnter, universeDidChange and venueDidExit. Can also be triggered based on the map movements. @param mapwizeView the mapwize view that triggered the event @param floors the available floors */ - (void)mapwizeView:(MWZUIView *_Nonnull)mapwizeView floorsDidChange:(NSArray<MWZFloor *> *_Nonnull)floors; /** Called when the language change for the displayed venue @param mapView the mapwize view that triggered the event @param language the current language */ - (void)mapwizeView:(MWZUIView *_Nonnull)mapView languageDidChange:(NSString* _Nonnull)language; /** Called when the MWZUIView needs to display the user location. Use this method to display a custom user location view. Specifications about the MGLUserLocationAnnotationView can be found at https://docs.mapbox.com/ios/api/maps */ - (MWZUserLocationAnnotationView *_Nonnull)viewForUserLocationAnnotation; /** Called when a marker is tapped @param mapwizeView the view that triggered the event @param marker tapped */ - (void)mapwizeView:(MWZUIView *_Nonnull)mapwizeView didTapOnMarker:(MWZMapwizeAnnotation *_Nonnull)marker; - (void)mapwizeView:(MWZUIView *_Nonnull)mapwizeView didTapMarker:(MWZMarker*_Nonnull)marker; /** Called when a navigation will start or will be recomputed */ - (void)mapwizeViewWillStartNavigation:(MWZUIView *_Nonnull)mapwizeView; /** Called when a navigation is started or has been recompute */ - (void)mapwizeViewDidStartNavigation:(MWZUIView *_Nonnull)mapwizeView forDirection:(MWZDirection* _Nonnull) direction; /** Called when the navigation stopped */ - (void)mapwizeViewDidStopNavigation:(MWZUIView *_Nonnull)mapwizeView; /** Called when the no direction have been found */ - (void)mapwizeView:(MWZUIView *_Nonnull)mapwizeView navigationFailedWithError:(NSError* _Nonnull) error; /** Called during a navigation when the user location change. You should decide to recompute based on the navigation information (For exemple : return locationDelta > 10) */ - (BOOL)mapwizeView:(MWZUIView *_Nonnull) mapwizeView shouldRecomputeNavigation:(MWZNavigationInfo* _Nonnull) navigationInfo; /** Called when a place has been selected @param place the selected place @param currentUniverse the current displayed universe @param searchResultUniverse the universe that will be display after the place selection. If the universe does not change, the value will be the same as currentUniverse @param channel the channel through the place has been selected (Search, MainSearches or MapClick) @param searchQuery the last query that ran in the search bar before the selection. This value is nil if the channel is not Search */ - (void) mapwizeView:(MWZUIView *_Nonnull) mapwizeView didSelectPlace:(MWZPlace*_Nonnull) place currentUniverse:(MWZUniverse*_Nonnull) currentUniverse searchResultUniverse:(MWZUniverse*_Nonnull) searchResultUniverse channel:(MWZUIEventChannel) channel searchQuery:(NSString*_Nullable) searchQuery; /** Called when a place has been selected @param placelist the selected placelist @param currentUniverse the current displayed universe @param searchResultUniverse the universe that will be display after the placelist selection. If the universe does not change, the value will be the same as currentUniverse @param channel the channel through the placelist has been selected (Search, MainSearches) @param searchQuery the last query that ran in the search bar before the selection. This value is nil if the channel is not Search */ - (void) mapwizeView:(MWZUIView *_Nonnull) mapwizeView didSelectPlacelist:(MWZPlacelist*_Nonnull) placelist currentUniverse:(MWZUniverse*_Nonnull) currentUniverse searchResultUniverse:(MWZUniverse*_Nonnull) searchResultUniverse channel:(MWZUIEventChannel) channel searchQuery:(NSString*_Nullable) searchQuery; /** Called when a direction start @param venue the current displayed venue @param universe the current displayed universe @param from the starting point @param to the destination point @param mode the mode set for the direction @param isNavigation true if the current direction is used as navigation */ - (void) mapwizeView:(MWZUIView *_Nonnull) mapwizeView didStartDirectionInVenue:(MWZVenue*_Nonnull) venue universe:(MWZUniverse*_Nonnull) universe from:(id<MWZDirectionPoint>_Nonnull) from to:(id<MWZDirectionPoint>_Nonnull) to mode:(NSString*_Nonnull) mode isNavigation:(BOOL) isNavigation; - (void) mapwizeViewDidFailLoadingContent:(MWZUIView * _Nonnull)mapwizeView; @end #endif
2shady4u/libkra
libkra/kra_layer_data.h
// ############################################################################ # // Copyright © 2022 <NAME> & <NAME> <<EMAIL>> // Licensed under the MIT License. // See LICENSE in the project root for license information. // ############################################################################ # #ifndef KRA_LAYER_DATA_H #define KRA_LAYER_DATA_H #include "kra_utility.h" #include <memory> #include <regex> #include <numeric> #define WRITEBUFFERSIZE (8192) namespace kra { /* This class contains the actual data as stored in the layer's unique binary file */ class LayerData { private: class Tile { public: // These can also be negative!!! // The left (X) position of the tile. int32_t left; // The top (Y) position of the tile. int32_t top; // Number of compressed bytes that represent the tile data. int compressed_length; // Compressed image data of this tile. std::vector<uint8_t> compressed_data; }; std::vector<std::unique_ptr<Tile>> tiles; int32_t top; int32_t left; int32_t bottom; int32_t right; unsigned int _get_element_value(const std::vector<unsigned char> &p_layer_content, const std::string &p_element_name, unsigned int &p_index) const; std::string _get_header_line(const std::vector<unsigned char> &p_layer_content, unsigned int &p_index) const; void _update_dimensions(); int _lzff_decompress(const void *input, const int length, void *output, int maxout) const; public: // Version statement of the layer, always equal to 2. unsigned int version; // Number of vertical pixels stored in each tile, always equal to 64. unsigned int tile_height; // Number of horizontal pixels stored in each tile, always equal to 64. unsigned int tile_width; // Number of elements in each pixel, is equal to 4 for RGBA. unsigned int pixel_size; void import_attributes(const std::vector<unsigned char> &p_layer_content); std::vector<uint8_t> get_composed_data(ColorSpace color_space) const; unsigned int get_width() const; unsigned int get_height() const; int32_t get_top() const; int32_t get_left() const; int32_t get_bottom() const; int32_t get_right() const; void print_layer_data_attributes() const; }; }; #endif // KRA_LAYER_DATA_H
2shady4u/libkra
libkra/kra_layer.h
<gh_stars>0 // ############################################################################ # // Copyright © 2022 <NAME> & <NAME> <<EMAIL>> // Licensed under the MIT License. // See LICENSE in the project root for license information. // ############################################################################ # #ifndef KRA_LAYER_H #define KRA_LAYER_H #include "kra_utility.h" #include "kra_layer_data.h" #include "kra_exported_layer.h" #include "../tinyxml2/tinyxml2.h" #include "../zlib/contrib/minizip/unzip.h" namespace kra { /* This class stores the attributes (as found in 'maindoc.xml') for a single layer */ /* The exact same class is used for both PAINT_LAYER and GROUP_LAYER to reduce code complexity */ class Layer { private: void _import_paint_attributes(const std::string &p_name, unzFile &p_file, const tinyxml2::XMLElement *p_xml_element); void _import_group_attributes(const std::string &p_name, unzFile &p_file, const tinyxml2::XMLElement *p_xml_element); void _print_paint_layer_attributes() const; void _print_group_layer_attributes() const; public: std::string filename; std::string name; std::string uuid; unsigned int x; unsigned int y; uint8_t opacity; bool visible = true; LayerType type; // PAINT_LAYER ColorSpace color_space = RGBA; std::unique_ptr<LayerData> layer_data; // GROUP_LAYER std::vector<std::unique_ptr<Layer>> children; void import_attributes(const std::string &p_name, unzFile &p_file, const tinyxml2::XMLElement *p_xml_element); std::unique_ptr<ExportedLayer> get_exported_layer() const; void print_layer_attributes() const; }; }; #endif // KRA_LAYER_H
2shady4u/libkra
libkra/kra_utility.h
// ############################################################################ # // Copyright © 2022 <NAME> & <NAME> <<EMAIL>> // Licensed under the MIT License. // See LICENSE in the project root for license information. // ############################################################################ # #ifndef KRA_UTILITY_H #define KRA_UTILITY_H #include "../zlib/contrib/minizip/unzip.h" #include <vector> #include <string> #define WRITEBUFFERSIZE (8192) namespace kra { enum LayerType { PAINT_LAYER, GROUP_LAYER }; enum ColorSpace { RGBA, RGBA16, RGBAF16, RGBAF32, CMYK, OTHER }; enum VerbosityLevel { QUIET, NORMAL, VERBOSE, VERY_VERBOSE }; extern VerbosityLevel verbosity_level; int extract_current_file_to_vector(unzFile &p_file, std::vector<unsigned char> &p_result); ColorSpace get_color_space(const std::string &p_color_space_name); const std::string get_color_space_name(ColorSpace p_color_space); }; #endif // KRA_UTILITY_H
2shady4u/libkra
libkra/kra_document.h
// ############################################################################ # // Copyright © 2022 <NAME> & <NAME> <<EMAIL>> // Licensed under the MIT License. // See LICENSE in the project root for license information. // ############################################################################ # #ifndef KRA_DOCUMENT_H #define KRA_DOCUMENT_H #include "kra_utility.h" #include "kra_layer.h" #include "kra_exported_layer.h" #include "../tinyxml2/tinyxml2.h" #include "../zlib/contrib/minizip/unzip.h" #include <unordered_map> #include <codecvt> #include <locale> namespace kra { /* This class stores the general properties of a KRA/KRZ-archive as well as a vector of layers containing the actual data */ class Document { private: std::vector<std::unique_ptr<Layer>> _parse_layers(unzFile &p_file, const tinyxml2::XMLElement *xmlElement); void _create_layer_map(); void _add_layer_to_map(const std::unique_ptr<Layer> &layer); public: std::string name; unsigned int width; unsigned int height; ColorSpace color_space; std::vector<std::unique_ptr<Layer>> layers; std::unordered_map<std::string, const std::unique_ptr<Layer> &> layer_map; void load(const std::wstring &p_path); std::unique_ptr<ExportedLayer> get_exported_layer_at(int p_layer_index) const; std::unique_ptr<ExportedLayer> get_exported_layer_with_uuid(const std::string &p_uuid) const; std::vector<std::unique_ptr<ExportedLayer>> get_all_exported_layers() const; void print_document_attributes() const; }; }; #endif // KRA_DOCUMENT_H
2shady4u/libkra
libkra/kra_exported_layer.h
// ############################################################################ # // Copyright © 2022 <NAME> & <NAME> <<EMAIL>> // Licensed under the MIT License. // See LICENSE in the project root for license information. // ############################################################################ # #ifndef KRA_EXPORTED_LAYER_H #define KRA_EXPORTED_LAYER_H namespace kra { // This class represents an exported version of a Layer */ /* In the case of a PAINT_LAYER, this class stores the decompressed data of the entire layer */ /* In the case of a GROUP_LAYER, this class stores a vector of UUIDs of its child layers */ class ExportedLayer { public: std::string name; unsigned int x; unsigned int y; uint8_t opacity; bool visible; LayerType type; // PAINT_LAYER ColorSpace color_space = RGBA; int32_t top; int32_t left; int32_t bottom; int32_t right; unsigned int pixel_size; std::vector<uint8_t> data; // GROUP_LAYER std::vector<std::string> child_uuids; }; }; #endif // KRA_EXPORTED_LAYER_H
dandycheung/baulk
lib/archive/liblzma/check/crc64_fast.c
<filename>lib/archive/liblzma/check/crc64_fast.c /////////////////////////////////////////////////////////////////////////////// // /// \file crc64.c /// \brief CRC64 calculation /// /// Calculate the CRC64 using the slice-by-four algorithm. This is the same /// idea that is used in crc32_fast.c, but for CRC64 we use only four tables /// instead of eight to avoid increasing CPU cache usage. // // Author: <NAME> // // This file has been put into the public domain. // You can do whatever you want with this file. // /////////////////////////////////////////////////////////////////////////////// #include "check.h" #include "crc_macros.h" #ifdef WORDS_BIGENDIAN # define A1(x) ((x) >> 56) #else # define A1 A #endif // See the comments in crc32_fast.c. They aren't duplicated here. extern LZMA_API(uint64_t) lzma_crc64(const uint8_t *buf, size_t size, uint64_t crc) { crc = ~crc; #ifdef WORDS_BIGENDIAN crc = bswap64(crc); #endif if (size > 4) { while ((uintptr_t)(buf) & 3) { crc = lzma_crc64_table[0][*buf++ ^ A1(crc)] ^ S8(crc); --size; } const uint8_t *const limit = buf + (size & ~(size_t)(3)); size &= (size_t)(3); while (buf < limit) { #ifdef WORDS_BIGENDIAN const uint32_t tmp = (crc >> 32) ^ aligned_read32ne(buf); #else const uint32_t tmp = crc ^ aligned_read32ne(buf); #endif buf += 4; crc = lzma_crc64_table[3][A(tmp)] ^ lzma_crc64_table[2][B(tmp)] ^ S32(crc) ^ lzma_crc64_table[1][C(tmp)] ^ lzma_crc64_table[0][D(tmp)]; } } while (size-- != 0) crc = lzma_crc64_table[0][*buf++ ^ A1(crc)] ^ S8(crc); #ifdef WORDS_BIGENDIAN crc = bswap64(crc); #endif return ~crc; }
dandycheung/baulk
tools/baulk-dock/resource.h
/// #ifndef BAULK_DOCK_BAULK_RESOURCE_H #define BAULK_DOCK_BAULK_RESOURCE_H #define ICON_BAULK_BASE 256 #define IMAGE_BAULK_BASE64 401 #define IMAGE_BAULK_BASE96 402 #define IMAGE_BAULK_BASE128 403 #define IMAGE_BAULK_BASE256 404 #define IDM_BAULK_DOCK_ABOUT 500 #define IDD_BAULK_DOCK_WINDOW 1001 #define IDC_BUTTON_STARTTASK 2101 #define IDC_BUTTON_STARTENV 2102 #define IDM_ENGINE_COMBOX 2103 #endif
dandycheung/baulk
lib/mem/mimalloc/src/segment-cache.c
<filename>lib/mem/mimalloc/src/segment-cache.c /* ---------------------------------------------------------------------------- Copyright (c) 2020, Microsoft Research, <NAME> This is free software; you can redistribute it and/or modify it under the terms of the MIT license. A copy of the license can be found in the file "LICENSE" at the root of this distribution. -----------------------------------------------------------------------------*/ /* ---------------------------------------------------------------------------- Implements a cache of segments to avoid expensive OS calls and to reuse the commit_mask to optimize the commit/decommit calls. The full memory map of all segments is also implemented here. -----------------------------------------------------------------------------*/ #include "mimalloc.h" #include "mimalloc-internal.h" #include "mimalloc-atomic.h" #include "bitmap.h" // atomic bitmap //#define MI_CACHE_DISABLE 1 // define to completely disable the segment cache #define MI_CACHE_FIELDS (16) #define MI_CACHE_MAX (MI_BITMAP_FIELD_BITS*MI_CACHE_FIELDS) // 1024 on 64-bit #define BITS_SET() MI_ATOMIC_VAR_INIT(UINTPTR_MAX) #define MI_CACHE_BITS_SET MI_INIT16(BITS_SET) // note: update if MI_CACHE_FIELDS changes typedef struct mi_cache_slot_s { void* p; size_t memid; bool is_pinned; mi_commit_mask_t commit_mask; mi_commit_mask_t decommit_mask; _Atomic(mi_msecs_t) expire; } mi_cache_slot_t; static mi_decl_cache_align mi_cache_slot_t cache[MI_CACHE_MAX]; // = 0 static mi_decl_cache_align mi_bitmap_field_t cache_available[MI_CACHE_FIELDS] = { MI_CACHE_BITS_SET }; // zero bit = available! static mi_decl_cache_align mi_bitmap_field_t cache_available_large[MI_CACHE_FIELDS] = { MI_CACHE_BITS_SET }; static mi_decl_cache_align mi_bitmap_field_t cache_inuse[MI_CACHE_FIELDS]; // zero bit = free mi_decl_noinline void* _mi_segment_cache_pop(size_t size, mi_commit_mask_t* commit_mask, mi_commit_mask_t* decommit_mask, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld) { #ifdef MI_CACHE_DISABLE return NULL; #else // only segment blocks if (size != MI_SEGMENT_SIZE) return NULL; // numa node determines start field const int numa_node = _mi_os_numa_node(tld); size_t start_field = 0; if (numa_node > 0) { start_field = (MI_CACHE_FIELDS / _mi_os_numa_node_count())*numa_node; if (start_field >= MI_CACHE_FIELDS) start_field = 0; } // find an available slot mi_bitmap_index_t bitidx = 0; bool claimed = false; if (*large) { // large allowed? claimed = _mi_bitmap_try_find_from_claim(cache_available_large, MI_CACHE_FIELDS, start_field, 1, &bitidx); if (claimed) *large = true; } if (!claimed) { claimed = _mi_bitmap_try_find_from_claim(cache_available, MI_CACHE_FIELDS, start_field, 1, &bitidx); if (claimed) *large = false; } if (!claimed) return NULL; // found a slot mi_cache_slot_t* slot = &cache[mi_bitmap_index_bit(bitidx)]; void* p = slot->p; *memid = slot->memid; *is_pinned = slot->is_pinned; *is_zero = false; *commit_mask = slot->commit_mask; *decommit_mask = slot->decommit_mask; slot->p = NULL; mi_atomic_storei64_release(&slot->expire,(mi_msecs_t)0); // mark the slot as free again mi_assert_internal(_mi_bitmap_is_claimed(cache_inuse, MI_CACHE_FIELDS, 1, bitidx)); _mi_bitmap_unclaim(cache_inuse, MI_CACHE_FIELDS, 1, bitidx); return p; #endif } static mi_decl_noinline void mi_commit_mask_decommit(mi_commit_mask_t* cmask, void* p, size_t total, mi_stats_t* stats) { if (mi_commit_mask_is_empty(cmask)) { // nothing } else if (mi_commit_mask_is_full(cmask)) { _mi_os_decommit(p, total, stats); } else { // todo: one call to decommit the whole at once? mi_assert_internal((total%MI_COMMIT_MASK_BITS)==0); size_t part = total/MI_COMMIT_MASK_BITS; size_t idx; size_t count; mi_commit_mask_foreach(cmask, idx, count) { void* start = (uint8_t*)p + (idx*part); size_t size = count*part; _mi_os_decommit(start, size, stats); } mi_commit_mask_foreach_end() } mi_commit_mask_create_empty(cmask); } #define MI_MAX_PURGE_PER_PUSH (4) static mi_decl_noinline void mi_segment_cache_purge(bool force, mi_os_tld_t* tld) { MI_UNUSED(tld); if (!mi_option_is_enabled(mi_option_allow_decommit)) return; mi_msecs_t now = _mi_clock_now(); size_t purged = 0; const size_t max_visits = (force ? MI_CACHE_MAX /* visit all */ : MI_CACHE_FIELDS /* probe at most N (=16) slots */); size_t idx = (force ? 0 : _mi_random_shuffle((uintptr_t)now) % MI_CACHE_MAX /* random start */ ); for (size_t visited = 0; visited < max_visits; visited++,idx++) { // visit N slots if (idx >= MI_CACHE_MAX) idx = 0; // wrap mi_cache_slot_t* slot = &cache[idx]; mi_msecs_t expire = mi_atomic_loadi64_relaxed(&slot->expire); if (expire != 0 && (force || now >= expire)) { // racy read // seems expired, first claim it from available purged++; mi_bitmap_index_t bitidx = mi_bitmap_index_create_from_bit(idx); if (_mi_bitmap_claim(cache_available, MI_CACHE_FIELDS, 1, bitidx, NULL)) { // was available, we claimed it expire = mi_atomic_loadi64_acquire(&slot->expire); if (expire != 0 && (force || now >= expire)) { // safe read // still expired, decommit it mi_atomic_storei64_relaxed(&slot->expire,(mi_msecs_t)0); mi_assert_internal(!mi_commit_mask_is_empty(&slot->commit_mask) && _mi_bitmap_is_claimed(cache_available_large, MI_CACHE_FIELDS, 1, bitidx)); _mi_abandoned_await_readers(); // wait until safe to decommit // decommit committed parts // TODO: instead of decommit, we could also free to the OS? mi_commit_mask_decommit(&slot->commit_mask, slot->p, MI_SEGMENT_SIZE, tld->stats); mi_commit_mask_create_empty(&slot->decommit_mask); } _mi_bitmap_unclaim(cache_available, MI_CACHE_FIELDS, 1, bitidx); // make it available again for a pop } if (!force && purged > MI_MAX_PURGE_PER_PUSH) break; // bound to no more than N purge tries per push } } } void _mi_segment_cache_collect(bool force, mi_os_tld_t* tld) { mi_segment_cache_purge(force, tld ); } mi_decl_noinline bool _mi_segment_cache_push(void* start, size_t size, size_t memid, const mi_commit_mask_t* commit_mask, const mi_commit_mask_t* decommit_mask, bool is_large, bool is_pinned, mi_os_tld_t* tld) { #ifdef MI_CACHE_DISABLE return false; #else // only for normal segment blocks if (size != MI_SEGMENT_SIZE || ((uintptr_t)start % MI_SEGMENT_ALIGN) != 0) return false; // numa node determines start field int numa_node = _mi_os_numa_node(NULL); size_t start_field = 0; if (numa_node > 0) { start_field = (MI_CACHE_FIELDS / _mi_os_numa_node_count())*numa_node; if (start_field >= MI_CACHE_FIELDS) start_field = 0; } // purge expired entries mi_segment_cache_purge(false /* force? */, tld); // find an available slot mi_bitmap_index_t bitidx; bool claimed = _mi_bitmap_try_find_from_claim(cache_inuse, MI_CACHE_FIELDS, start_field, 1, &bitidx); if (!claimed) return false; mi_assert_internal(_mi_bitmap_is_claimed(cache_available, MI_CACHE_FIELDS, 1, bitidx)); mi_assert_internal(_mi_bitmap_is_claimed(cache_available_large, MI_CACHE_FIELDS, 1, bitidx)); #if MI_DEBUG>1 if (is_pinned || is_large) { mi_assert_internal(mi_commit_mask_is_full(commit_mask)); } #endif // set the slot mi_cache_slot_t* slot = &cache[mi_bitmap_index_bit(bitidx)]; slot->p = start; slot->memid = memid; slot->is_pinned = is_pinned; mi_atomic_storei64_relaxed(&slot->expire,(mi_msecs_t)0); slot->commit_mask = *commit_mask; slot->decommit_mask = *decommit_mask; if (!mi_commit_mask_is_empty(commit_mask) && !is_large && !is_pinned && mi_option_is_enabled(mi_option_allow_decommit)) { long delay = mi_option_get(mi_option_segment_decommit_delay); if (delay == 0) { _mi_abandoned_await_readers(); // wait until safe to decommit mi_commit_mask_decommit(&slot->commit_mask, start, MI_SEGMENT_SIZE, tld->stats); mi_commit_mask_create_empty(&slot->decommit_mask); } else { mi_atomic_storei64_release(&slot->expire, _mi_clock_now() + delay); } } // make it available _mi_bitmap_unclaim((is_large ? cache_available_large : cache_available), MI_CACHE_FIELDS, 1, bitidx); return true; #endif } /* ----------------------------------------------------------- The following functions are to reliably find the segment or block that encompasses any pointer p (or NULL if it is not in any of our segments). We maintain a bitmap of all memory with 1 bit per MI_SEGMENT_SIZE (64MiB) set to 1 if it contains the segment meta data. ----------------------------------------------------------- */ #if (MI_INTPTR_SIZE==8) #define MI_MAX_ADDRESS ((size_t)20 << 40) // 20TB #else #define MI_MAX_ADDRESS ((size_t)2 << 30) // 2Gb #endif #define MI_SEGMENT_MAP_BITS (MI_MAX_ADDRESS / MI_SEGMENT_SIZE) #define MI_SEGMENT_MAP_SIZE (MI_SEGMENT_MAP_BITS / 8) #define MI_SEGMENT_MAP_WSIZE (MI_SEGMENT_MAP_SIZE / MI_INTPTR_SIZE) static _Atomic(uintptr_t) mi_segment_map[MI_SEGMENT_MAP_WSIZE + 1]; // 2KiB per TB with 64MiB segments static size_t mi_segment_map_index_of(const mi_segment_t* segment, size_t* bitidx) { mi_assert_internal(_mi_ptr_segment(segment) == segment); // is it aligned on MI_SEGMENT_SIZE? if ((uintptr_t)segment >= MI_MAX_ADDRESS) { *bitidx = 0; return MI_SEGMENT_MAP_WSIZE; } else { const uintptr_t segindex = ((uintptr_t)segment) / MI_SEGMENT_SIZE; *bitidx = segindex % MI_INTPTR_BITS; const size_t mapindex = segindex / MI_INTPTR_BITS; mi_assert_internal(mapindex < MI_SEGMENT_MAP_WSIZE); return mapindex; } } void _mi_segment_map_allocated_at(const mi_segment_t* segment) { size_t bitidx; size_t index = mi_segment_map_index_of(segment, &bitidx); mi_assert_internal(index <= MI_SEGMENT_MAP_WSIZE); if (index==MI_SEGMENT_MAP_WSIZE) return; uintptr_t mask = mi_atomic_load_relaxed(&mi_segment_map[index]); uintptr_t newmask; do { newmask = (mask | ((uintptr_t)1 << bitidx)); } while (!mi_atomic_cas_weak_release(&mi_segment_map[index], &mask, newmask)); } void _mi_segment_map_freed_at(const mi_segment_t* segment) { size_t bitidx; size_t index = mi_segment_map_index_of(segment, &bitidx); mi_assert_internal(index <= MI_SEGMENT_MAP_WSIZE); if (index == MI_SEGMENT_MAP_WSIZE) return; uintptr_t mask = mi_atomic_load_relaxed(&mi_segment_map[index]); uintptr_t newmask; do { newmask = (mask & ~((uintptr_t)1 << bitidx)); } while (!mi_atomic_cas_weak_release(&mi_segment_map[index], &mask, newmask)); } // Determine the segment belonging to a pointer or NULL if it is not in a valid segment. static mi_segment_t* _mi_segment_of(const void* p) { mi_segment_t* segment = _mi_ptr_segment(p); if (segment == NULL) return NULL; size_t bitidx; size_t index = mi_segment_map_index_of(segment, &bitidx); // fast path: for any pointer to valid small/medium/large object or first MI_SEGMENT_SIZE in huge const uintptr_t mask = mi_atomic_load_relaxed(&mi_segment_map[index]); if (mi_likely((mask & ((uintptr_t)1 << bitidx)) != 0)) { return segment; // yes, allocated by us } if (index==MI_SEGMENT_MAP_WSIZE) return NULL; // TODO: maintain max/min allocated range for efficiency for more efficient rejection of invalid pointers? // search downwards for the first segment in case it is an interior pointer // could be slow but searches in MI_INTPTR_SIZE * MI_SEGMENT_SIZE (512MiB) steps trough // valid huge objects // note: we could maintain a lowest index to speed up the path for invalid pointers? size_t lobitidx; size_t loindex; uintptr_t lobits = mask & (((uintptr_t)1 << bitidx) - 1); if (lobits != 0) { loindex = index; lobitidx = mi_bsr(lobits); // lobits != 0 } else if (index == 0) { return NULL; } else { mi_assert_internal(index > 0); uintptr_t lomask = mask; loindex = index; do { loindex--; lomask = mi_atomic_load_relaxed(&mi_segment_map[loindex]); } while (lomask != 0 && loindex > 0); if (lomask == 0) return NULL; lobitidx = mi_bsr(lomask); // lomask != 0 } mi_assert_internal(loindex < MI_SEGMENT_MAP_WSIZE); // take difference as the addresses could be larger than the MAX_ADDRESS space. size_t diff = (((index - loindex) * (8*MI_INTPTR_SIZE)) + bitidx - lobitidx) * MI_SEGMENT_SIZE; segment = (mi_segment_t*)((uint8_t*)segment - diff); if (segment == NULL) return NULL; mi_assert_internal((void*)segment < p); bool cookie_ok = (_mi_ptr_cookie(segment) == segment->cookie); mi_assert_internal(cookie_ok); if (mi_unlikely(!cookie_ok)) return NULL; if (((uint8_t*)segment + mi_segment_size(segment)) <= (uint8_t*)p) return NULL; // outside the range mi_assert_internal(p >= (void*)segment && (uint8_t*)p < (uint8_t*)segment + mi_segment_size(segment)); return segment; } // Is this a valid pointer in our heap? static bool mi_is_valid_pointer(const void* p) { return (_mi_segment_of(p) != NULL); } mi_decl_nodiscard mi_decl_export bool mi_is_in_heap_region(const void* p) mi_attr_noexcept { return mi_is_valid_pointer(p); } /* // Return the full segment range belonging to a pointer static void* mi_segment_range_of(const void* p, size_t* size) { mi_segment_t* segment = _mi_segment_of(p); if (segment == NULL) { if (size != NULL) *size = 0; return NULL; } else { if (size != NULL) *size = segment->segment_size; return segment; } mi_assert_expensive(page == NULL || mi_segment_is_valid(_mi_page_segment(page),tld)); mi_assert_internal(page == NULL || (mi_segment_page_size(_mi_page_segment(page)) - (MI_SECURE == 0 ? 0 : _mi_os_page_size())) >= block_size); mi_reset_delayed(tld); mi_assert_internal(page == NULL || mi_page_not_in_queue(page, tld)); return page; } */
dandycheung/baulk
vendor/bela/external/escapeargv.c
//// #include <stdio.h> #include <stdlib.h> #include <assert.h> #include <stdbool.h> #include <string.h> size_t escape_length(char *s, size_t *rlen, bool *hp) { size_t len = 0; size_t i = 0; bool hasspace = false; len = strlen(s); if (len == 0) { // "" return 2; } size_t n = len; for (; i < len; i++) { switch (s[i]) { case '"': case '\\': n++; break; case ' ': case '\t': hasspace = true; break; default: break; } } *rlen = len; if (hasspace) { n += 2; } *hp = hasspace; return n; } bool argv_escape_join(const char *const *string_array, int array_length, char **result) { assert(string_array); assert(array_length >= 0); assert(result); // Determine the length of the concatenated string first. size_t string_length = 1; // Count the null terminator. for (int i = 0; i < array_length; i++) { size_t rlen = 0; // NOT use bool hp = false; // NOT use string_length += escape_length(string_array[i], &rlen, &hp); if (i < array_length - 1) { string_length++; // Count whitespace. } } char *string_out = malloc(sizeof(char) * string_length); if (string_out == NULL) { return false; } char *current = string_out; for (int i = 0; i < array_length; i++) { size_t rlen = 0; bool hp = false; size_t part_length = escape_length(string_array[i], &rlen, &hp); if (rlen == 0) { strcpy(current, "\"\""); current += 2; if (i < array_length - 1) { *current = ' '; current += 1; } continue; } if (rlen == part_length) { // NOLINTNEXTLINE(clang-analyzer-security.insecureAPI.strcpy) strcpy(current, string_array[i]); current += part_length; // We add a space after every part of the string except for the last one. if (i < array_length - 1) { *current = ' '; current += 1; } continue; } int slashes = 0; size_t j = 0; if (hp) { current[j] = '"'; j++; } char *s = string_array[i]; for (size_t k = 0; k < rlen; k++) { switch (s[k]) { case '\\': slashes++; current[j] = s[k]; break; case '"': { for (; slashes > 0; slashes--) { current[j] = '\\'; j++; } current[j] = '\\'; j++; current[j] = s[k]; } break; default: slashes = 0; current[j] = s[k]; break; } j++; } if (hp) { for (; slashes > 0; slashes--) { current[j] = '\\'; j++; } current[j] = '"'; j++; } current += part_length; // We add a space after every part of the string except for the last one. if (i < array_length - 1) { *current = ' '; current += 1; } } *current = '\0'; *result = string_out; return true; } int main(int argc, char **argv) { char *out = NULL; if (argv_escape_join(argv, argc, &out)) { fprintf(stderr, "%s\n", out); free(out); } return 0; }
dandycheung/baulk
lib/archive/zlib/google/redact.h
// Copyright (c) 2022 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef THIRD_PARTY_ZLIB_GOOGLE_REDACT_H_ #define THIRD_PARTY_ZLIB_GOOGLE_REDACT_H_ #include <ostream> #include "base/files/file_path.h" #include "base/logging.h" namespace zip { // Redacts file paths in log messages. // Example: // LOG(ERROR) << "Cannot open " << Redact(path); class Redact { public: explicit Redact(const base::FilePath& path) : path_(path) {} friend std::ostream& operator<<(std::ostream& out, const Redact&& r) { return LOG_IS_ON(INFO) ? out << "'" << r.path_ << "'" : out << "(redacted)"; } private: const base::FilePath& path_; }; } // namespace zip #endif // THIRD_PARTY_ZLIB_GOOGLE_REDACT_H_
dandycheung/baulk
vendor/bela/test/appexeclink/afunix.c
#undef UNICODE #define WIN32_LEAN_AND_MEAN #include <windows.h> #include <winsock2.h> #include <ws2tcpip.h> #include <afunix.h> #include <stdlib.h> #include <stdio.h> #pragma comment(lib, "Ws2_32") #define SERVER_SOCKET "server.sock" int __cdecl main(void) { SOCKET ClientSocket = INVALID_SOCKET; SOCKET ListenSocket = INVALID_SOCKET; int Result; char SendBuffer[] = "af_unix from Windows to WSL!"; int SendResult; SOCKADDR_UN ServerSocket; WSADATA WsaData; // Initialize Winsock Result = WSAStartup(MAKEWORD(2, 2), &WsaData); if (Result != 0) { printf("WSAStartup failed with error: %d\n", Result); goto Exit; } // Create a AF_UNIX stream server socket. ListenSocket = socket(AF_UNIX, SOCK_STREAM, 0); if (ListenSocket == INVALID_SOCKET) { printf("socket failed with error: %ld\n", WSAGetLastError()); goto Exit; } memset(&ServerSocket, 0, sizeof(ServerSocket)); ServerSocket.sun_family = AF_UNIX; strncpy(ServerSocket.sun_path, SERVER_SOCKET, strlen(SERVER_SOCKET)); // Bind the socket to the path. Result = bind(ListenSocket, (struct sockaddr *)&ServerSocket, sizeof(ServerSocket)); if (Result == SOCKET_ERROR) { printf("bind failed with error: %d\n", WSAGetLastError()); goto Exit; } // Listen to start accepting connections. Result = listen(ListenSocket, SOMAXCONN); if (Result == SOCKET_ERROR) { printf("listen failed with error: %d\n", WSAGetLastError()); goto Exit; } printf("Accepting connections on: '%s'\n", SERVER_SOCKET); // Accept a connection. ClientSocket = accept(ListenSocket, NULL, NULL); if (ClientSocket == INVALID_SOCKET) { printf("accept failed with error: %d\n", WSAGetLastError()); goto Exit; } printf("Accepted a connection.\n"); printf("Relayed %zu bytes: '%s'\n", strlen(SendBuffer), SendBuffer); // Send some data. SendResult = send(ClientSocket, SendBuffer, (int)strlen(SendBuffer), 0); if (SendResult == SOCKET_ERROR) { printf("send failed with error: %d\n", WSAGetLastError()); goto Exit; } // shutdown the connection. printf("Shutting down\n"); Result = shutdown(ClientSocket, 0); if (Result == SOCKET_ERROR) { printf("shutdown failed with error: %d\n", WSAGetLastError()); goto Exit; } Exit: // cleanup if (ListenSocket != INVALID_SOCKET) { closesocket(ListenSocket); } if (ClientSocket != INVALID_SOCKET) { closesocket(ClientSocket); } // Analogous to `unlink` DeleteFileA(SERVER_SOCKET); WSACleanup(); return 0; }
dandycheung/baulk
lib/mem/mimalloc/src/segment.c
/* ---------------------------------------------------------------------------- Copyright (c) 2018-2020, Microsoft Research, <NAME> This is free software; you can redistribute it and/or modify it under the terms of the MIT license. A copy of the license can be found in the file "LICENSE" at the root of this distribution. -----------------------------------------------------------------------------*/ #include "mimalloc.h" #include "mimalloc-internal.h" #include "mimalloc-atomic.h" #include <string.h> // memset #include <stdio.h> #define MI_PAGE_HUGE_ALIGN (256*1024) static void mi_segment_delayed_decommit(mi_segment_t* segment, bool force, mi_stats_t* stats); // ------------------------------------------------------------------- // commit mask // ------------------------------------------------------------------- static bool mi_commit_mask_all_set(const mi_commit_mask_t* commit, const mi_commit_mask_t* cm) { for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) { if ((commit->mask[i] & cm->mask[i]) != cm->mask[i]) return false; } return true; } static bool mi_commit_mask_any_set(const mi_commit_mask_t* commit, const mi_commit_mask_t* cm) { for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) { if ((commit->mask[i] & cm->mask[i]) != 0) return true; } return false; } static void mi_commit_mask_create_intersect(const mi_commit_mask_t* commit, const mi_commit_mask_t* cm, mi_commit_mask_t* res) { for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) { res->mask[i] = (commit->mask[i] & cm->mask[i]); } } static void mi_commit_mask_clear(mi_commit_mask_t* res, const mi_commit_mask_t* cm) { for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) { res->mask[i] &= ~(cm->mask[i]); } } static void mi_commit_mask_set(mi_commit_mask_t* res, const mi_commit_mask_t* cm) { for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) { res->mask[i] |= cm->mask[i]; } } static void mi_commit_mask_create(size_t bitidx, size_t bitcount, mi_commit_mask_t* cm) { mi_assert_internal(bitidx < MI_COMMIT_MASK_BITS); mi_assert_internal((bitidx + bitcount) <= MI_COMMIT_MASK_BITS); if (bitcount == MI_COMMIT_MASK_BITS) { mi_assert_internal(bitidx==0); mi_commit_mask_create_full(cm); } else if (bitcount == 0) { mi_commit_mask_create_empty(cm); } else { mi_commit_mask_create_empty(cm); size_t i = bitidx / MI_COMMIT_MASK_FIELD_BITS; size_t ofs = bitidx % MI_COMMIT_MASK_FIELD_BITS; while (bitcount > 0) { mi_assert_internal(i < MI_COMMIT_MASK_FIELD_COUNT); size_t avail = MI_COMMIT_MASK_FIELD_BITS - ofs; size_t count = (bitcount > avail ? avail : bitcount); size_t mask = (count >= MI_COMMIT_MASK_FIELD_BITS ? ~((size_t)0) : (((size_t)1 << count) - 1) << ofs); cm->mask[i] = mask; bitcount -= count; ofs = 0; i++; } } } size_t _mi_commit_mask_committed_size(const mi_commit_mask_t* cm, size_t total) { mi_assert_internal((total%MI_COMMIT_MASK_BITS)==0); size_t count = 0; for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) { size_t mask = cm->mask[i]; if (~mask == 0) { count += MI_COMMIT_MASK_FIELD_BITS; } else { for (; mask != 0; mask >>= 1) { // todo: use popcount if ((mask&1)!=0) count++; } } } // we use total since for huge segments each commit bit may represent a larger size return ((total / MI_COMMIT_MASK_BITS) * count); } size_t _mi_commit_mask_next_run(const mi_commit_mask_t* cm, size_t* idx) { size_t i = (*idx) / MI_COMMIT_MASK_FIELD_BITS; size_t ofs = (*idx) % MI_COMMIT_MASK_FIELD_BITS; size_t mask = 0; // find first ones while (i < MI_COMMIT_MASK_FIELD_COUNT) { mask = cm->mask[i]; mask >>= ofs; if (mask != 0) { while ((mask&1) == 0) { mask >>= 1; ofs++; } break; } i++; ofs = 0; } if (i >= MI_COMMIT_MASK_FIELD_COUNT) { // not found *idx = MI_COMMIT_MASK_BITS; return 0; } else { // found, count ones size_t count = 0; *idx = (i*MI_COMMIT_MASK_FIELD_BITS) + ofs; do { mi_assert_internal(ofs < MI_COMMIT_MASK_FIELD_BITS && (mask&1) == 1); do { count++; mask >>= 1; } while ((mask&1) == 1); if ((((*idx + count) % MI_COMMIT_MASK_FIELD_BITS) == 0)) { i++; if (i >= MI_COMMIT_MASK_FIELD_COUNT) break; mask = cm->mask[i]; ofs = 0; } } while ((mask&1) == 1); mi_assert_internal(count > 0); return count; } } /* -------------------------------------------------------------------------------- Segment allocation If a thread ends, it "abandons" pages with used blocks and there is an abandoned segment list whose segments can be reclaimed by still running threads, much like work-stealing. -------------------------------------------------------------------------------- */ /* ----------------------------------------------------------- Slices ----------------------------------------------------------- */ static const mi_slice_t* mi_segment_slices_end(const mi_segment_t* segment) { return &segment->slices[segment->slice_entries]; } static uint8_t* mi_slice_start(const mi_slice_t* slice) { mi_segment_t* segment = _mi_ptr_segment(slice); mi_assert_internal(slice >= segment->slices && slice < mi_segment_slices_end(segment)); return ((uint8_t*)segment + ((slice - segment->slices)*MI_SEGMENT_SLICE_SIZE)); } /* ----------------------------------------------------------- Bins ----------------------------------------------------------- */ // Use bit scan forward to quickly find the first zero bit if it is available static inline size_t mi_slice_bin8(size_t slice_count) { if (slice_count<=1) return slice_count; mi_assert_internal(slice_count <= MI_SLICES_PER_SEGMENT); slice_count--; size_t s = mi_bsr(slice_count); // slice_count > 1 if (s <= 2) return slice_count + 1; size_t bin = ((s << 2) | ((slice_count >> (s - 2))&0x03)) - 4; return bin; } static inline size_t mi_slice_bin(size_t slice_count) { mi_assert_internal(slice_count*MI_SEGMENT_SLICE_SIZE <= MI_SEGMENT_SIZE); mi_assert_internal(mi_slice_bin8(MI_SLICES_PER_SEGMENT) <= MI_SEGMENT_BIN_MAX); size_t bin = mi_slice_bin8(slice_count); mi_assert_internal(bin <= MI_SEGMENT_BIN_MAX); return bin; } static inline size_t mi_slice_index(const mi_slice_t* slice) { mi_segment_t* segment = _mi_ptr_segment(slice); ptrdiff_t index = slice - segment->slices; mi_assert_internal(index >= 0 && index < (ptrdiff_t)segment->slice_entries); return index; } /* ----------------------------------------------------------- Slice span queues ----------------------------------------------------------- */ static void mi_span_queue_push(mi_span_queue_t* sq, mi_slice_t* slice) { // todo: or push to the end? mi_assert_internal(slice->prev == NULL && slice->next==NULL); slice->prev = NULL; // paranoia slice->next = sq->first; sq->first = slice; if (slice->next != NULL) slice->next->prev = slice; else sq->last = slice; slice->xblock_size = 0; // free } static mi_span_queue_t* mi_span_queue_for(size_t slice_count, mi_segments_tld_t* tld) { size_t bin = mi_slice_bin(slice_count); mi_span_queue_t* sq = &tld->spans[bin]; mi_assert_internal(sq->slice_count >= slice_count); return sq; } static void mi_span_queue_delete(mi_span_queue_t* sq, mi_slice_t* slice) { mi_assert_internal(slice->xblock_size==0 && slice->slice_count>0 && slice->slice_offset==0); // should work too if the queue does not contain slice (which can happen during reclaim) if (slice->prev != NULL) slice->prev->next = slice->next; if (slice == sq->first) sq->first = slice->next; if (slice->next != NULL) slice->next->prev = slice->prev; if (slice == sq->last) sq->last = slice->prev; slice->prev = NULL; slice->next = NULL; slice->xblock_size = 1; // no more free } /* ----------------------------------------------------------- Invariant checking ----------------------------------------------------------- */ static bool mi_slice_is_used(const mi_slice_t* slice) { return (slice->xblock_size > 0); } #if (MI_DEBUG>=3) static bool mi_span_queue_contains(mi_span_queue_t* sq, mi_slice_t* slice) { for (mi_slice_t* s = sq->first; s != NULL; s = s->next) { if (s==slice) return true; } return false; } static bool mi_segment_is_valid(mi_segment_t* segment, mi_segments_tld_t* tld) { mi_assert_internal(segment != NULL); mi_assert_internal(_mi_ptr_cookie(segment) == segment->cookie); mi_assert_internal(segment->abandoned <= segment->used); mi_assert_internal(segment->thread_id == 0 || segment->thread_id == _mi_thread_id()); mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->decommit_mask)); // can only decommit committed blocks //mi_assert_internal(segment->segment_info_size % MI_SEGMENT_SLICE_SIZE == 0); mi_slice_t* slice = &segment->slices[0]; const mi_slice_t* end = mi_segment_slices_end(segment); size_t used_count = 0; mi_span_queue_t* sq; while(slice < end) { mi_assert_internal(slice->slice_count > 0); mi_assert_internal(slice->slice_offset == 0); size_t index = mi_slice_index(slice); size_t maxindex = (index + slice->slice_count >= segment->slice_entries ? segment->slice_entries : index + slice->slice_count) - 1; if (mi_slice_is_used(slice)) { // a page in use, we need at least MAX_SLICE_OFFSET valid back offsets used_count++; for (size_t i = 0; i <= MI_MAX_SLICE_OFFSET && index + i <= maxindex; i++) { mi_assert_internal(segment->slices[index + i].slice_offset == i*sizeof(mi_slice_t)); mi_assert_internal(i==0 || segment->slices[index + i].slice_count == 0); mi_assert_internal(i==0 || segment->slices[index + i].xblock_size == 1); } // and the last entry as well (for coalescing) const mi_slice_t* last = slice + slice->slice_count - 1; if (last > slice && last < mi_segment_slices_end(segment)) { mi_assert_internal(last->slice_offset == (slice->slice_count-1)*sizeof(mi_slice_t)); mi_assert_internal(last->slice_count == 0); mi_assert_internal(last->xblock_size == 1); } } else { // free range of slices; only last slice needs a valid back offset mi_slice_t* last = &segment->slices[maxindex]; if (segment->kind != MI_SEGMENT_HUGE || slice->slice_count <= (segment->slice_entries - segment->segment_info_slices)) { mi_assert_internal((uint8_t*)slice == (uint8_t*)last - last->slice_offset); } mi_assert_internal(slice == last || last->slice_count == 0 ); mi_assert_internal(last->xblock_size == 0 || (segment->kind==MI_SEGMENT_HUGE && last->xblock_size==1)); if (segment->kind != MI_SEGMENT_HUGE && segment->thread_id != 0) { // segment is not huge or abandoned sq = mi_span_queue_for(slice->slice_count,tld); mi_assert_internal(mi_span_queue_contains(sq,slice)); } } slice = &segment->slices[maxindex+1]; } mi_assert_internal(slice == end); mi_assert_internal(used_count == segment->used + 1); return true; } #endif /* ----------------------------------------------------------- Segment size calculations ----------------------------------------------------------- */ static size_t mi_segment_info_size(mi_segment_t* segment) { return segment->segment_info_slices * MI_SEGMENT_SLICE_SIZE; } static uint8_t* _mi_segment_page_start_from_slice(const mi_segment_t* segment, const mi_slice_t* slice, size_t xblock_size, size_t* page_size) { ptrdiff_t idx = slice - segment->slices; size_t psize = (size_t)slice->slice_count * MI_SEGMENT_SLICE_SIZE; // make the start not OS page aligned for smaller blocks to avoid page/cache effects size_t start_offset = (xblock_size >= MI_INTPTR_SIZE && xblock_size <= 1024 ? MI_MAX_ALIGN_GUARANTEE : 0); if (page_size != NULL) { *page_size = psize - start_offset; } return (uint8_t*)segment + ((idx*MI_SEGMENT_SLICE_SIZE) + start_offset); } // Start of the page available memory; can be used on uninitialized pages uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size) { const mi_slice_t* slice = mi_page_to_slice((mi_page_t*)page); uint8_t* p = _mi_segment_page_start_from_slice(segment, slice, page->xblock_size, page_size); mi_assert_internal(page->xblock_size > 0 || _mi_ptr_page(p) == page); mi_assert_internal(_mi_ptr_segment(p) == segment); return p; } static size_t mi_segment_calculate_slices(size_t required, size_t* pre_size, size_t* info_slices) { size_t page_size = _mi_os_page_size(); size_t isize = _mi_align_up(sizeof(mi_segment_t), page_size); size_t guardsize = 0; if (MI_SECURE>0) { // in secure mode, we set up a protected page in between the segment info // and the page data (and one at the end of the segment) guardsize = page_size; required = _mi_align_up(required, page_size); } if (pre_size != NULL) *pre_size = isize; isize = _mi_align_up(isize + guardsize, MI_SEGMENT_SLICE_SIZE); if (info_slices != NULL) *info_slices = isize / MI_SEGMENT_SLICE_SIZE; size_t segment_size = (required==0 ? MI_SEGMENT_SIZE : _mi_align_up( required + isize + guardsize, MI_SEGMENT_SLICE_SIZE) ); mi_assert_internal(segment_size % MI_SEGMENT_SLICE_SIZE == 0); return (segment_size / MI_SEGMENT_SLICE_SIZE); } /* ---------------------------------------------------------------------------- Segment caches We keep a small segment cache per thread to increase local reuse and avoid setting/clearing guard pages in secure mode. ------------------------------------------------------------------------------- */ static void mi_segments_track_size(long segment_size, mi_segments_tld_t* tld) { if (segment_size>=0) _mi_stat_increase(&tld->stats->segments,1); else _mi_stat_decrease(&tld->stats->segments,1); tld->count += (segment_size >= 0 ? 1 : -1); if (tld->count > tld->peak_count) tld->peak_count = tld->count; tld->current_size += segment_size; if (tld->current_size > tld->peak_size) tld->peak_size = tld->current_size; } static void mi_segment_os_free(mi_segment_t* segment, mi_segments_tld_t* tld) { segment->thread_id = 0; _mi_segment_map_freed_at(segment); mi_segments_track_size(-((long)mi_segment_size(segment)),tld); if (MI_SECURE>0) { // _mi_os_unprotect(segment, mi_segment_size(segment)); // ensure no more guard pages are set // unprotect the guard pages; we cannot just unprotect the whole segment size as part may be decommitted size_t os_pagesize = _mi_os_page_size(); _mi_os_unprotect((uint8_t*)segment + mi_segment_info_size(segment) - os_pagesize, os_pagesize); uint8_t* end = (uint8_t*)segment + mi_segment_size(segment) - os_pagesize; _mi_os_unprotect(end, os_pagesize); } // purge delayed decommits now? (no, leave it to the cache) // mi_segment_delayed_decommit(segment,true,tld->stats); // _mi_os_free(segment, mi_segment_size(segment), /*segment->memid,*/ tld->stats); const size_t size = mi_segment_size(segment); if (size != MI_SEGMENT_SIZE || !_mi_segment_cache_push(segment, size, segment->memid, &segment->commit_mask, &segment->decommit_mask, segment->mem_is_large, segment->mem_is_pinned, tld->os)) { const size_t csize = _mi_commit_mask_committed_size(&segment->commit_mask, size); if (csize > 0 && !segment->mem_is_pinned) _mi_stat_decrease(&_mi_stats_main.committed, csize); _mi_abandoned_await_readers(); // wait until safe to free _mi_arena_free(segment, mi_segment_size(segment), segment->memid, segment->mem_is_pinned /* pretend not committed to not double count decommits */, tld->os); } } // called by threads that are terminating void _mi_segment_thread_collect(mi_segments_tld_t* tld) { MI_UNUSED(tld); // nothing to do } /* ----------------------------------------------------------- Span management ----------------------------------------------------------- */ static void mi_segment_commit_mask(mi_segment_t* segment, bool conservative, uint8_t* p, size_t size, uint8_t** start_p, size_t* full_size, mi_commit_mask_t* cm) { mi_assert_internal(_mi_ptr_segment(p) == segment); mi_assert_internal(segment->kind != MI_SEGMENT_HUGE); mi_commit_mask_create_empty(cm); if (size == 0 || size > MI_SEGMENT_SIZE || segment->kind == MI_SEGMENT_HUGE) return; const size_t segstart = mi_segment_info_size(segment); const size_t segsize = mi_segment_size(segment); if (p >= (uint8_t*)segment + segsize) return; size_t pstart = (p - (uint8_t*)segment); mi_assert_internal(pstart + size <= segsize); size_t start; size_t end; if (conservative) { // decommit conservative start = _mi_align_up(pstart, MI_COMMIT_SIZE); end = _mi_align_down(pstart + size, MI_COMMIT_SIZE); mi_assert_internal(start >= segstart); mi_assert_internal(end <= segsize); } else { // commit liberal start = _mi_align_down(pstart, MI_MINIMAL_COMMIT_SIZE); end = _mi_align_up(pstart + size, MI_MINIMAL_COMMIT_SIZE); } if (pstart >= segstart && start < segstart) { // note: the mask is also calculated for an initial commit of the info area start = segstart; } if (end > segsize) { end = segsize; } mi_assert_internal(start <= pstart && (pstart + size) <= end); mi_assert_internal(start % MI_COMMIT_SIZE==0 && end % MI_COMMIT_SIZE == 0); *start_p = (uint8_t*)segment + start; *full_size = (end > start ? end - start : 0); if (*full_size == 0) return; size_t bitidx = start / MI_COMMIT_SIZE; mi_assert_internal(bitidx < MI_COMMIT_MASK_BITS); size_t bitcount = *full_size / MI_COMMIT_SIZE; // can be 0 if (bitidx + bitcount > MI_COMMIT_MASK_BITS) { _mi_warning_message("commit mask overflow: idx=%zu count=%zu start=%zx end=%zx p=0x%p size=%zu fullsize=%zu\n", bitidx, bitcount, start, end, p, size, *full_size); } mi_assert_internal((bitidx + bitcount) <= MI_COMMIT_MASK_BITS); mi_commit_mask_create(bitidx, bitcount, cm); } static bool mi_segment_commitx(mi_segment_t* segment, bool commit, uint8_t* p, size_t size, mi_stats_t* stats) { mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->decommit_mask)); // try to commit in at least MI_MINIMAL_COMMIT_SIZE sizes. /* if (commit && size > 0) { const size_t csize = _mi_align_up(size, MI_MINIMAL_COMMIT_SIZE); if (p + csize <= mi_segment_end(segment)) { size = csize; } } */ // commit liberal, but decommit conservative uint8_t* start = NULL; size_t full_size = 0; mi_commit_mask_t mask; mi_segment_commit_mask(segment, !commit/*conservative*/, p, size, &start, &full_size, &mask); if (mi_commit_mask_is_empty(&mask) || full_size==0) return true; if (commit && !mi_commit_mask_all_set(&segment->commit_mask, &mask)) { bool is_zero = false; mi_commit_mask_t cmask; mi_commit_mask_create_intersect(&segment->commit_mask, &mask, &cmask); _mi_stat_decrease(&_mi_stats_main.committed, _mi_commit_mask_committed_size(&cmask, MI_SEGMENT_SIZE)); // adjust for overlap if (!_mi_os_commit(start,full_size,&is_zero,stats)) return false; mi_commit_mask_set(&segment->commit_mask, &mask); } else if (!commit && mi_commit_mask_any_set(&segment->commit_mask, &mask)) { mi_assert_internal((void*)start != (void*)segment); //mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &mask)); mi_commit_mask_t cmask; mi_commit_mask_create_intersect(&segment->commit_mask, &mask, &cmask); _mi_stat_increase(&_mi_stats_main.committed, full_size - _mi_commit_mask_committed_size(&cmask, MI_SEGMENT_SIZE)); // adjust for overlap if (segment->allow_decommit) { _mi_os_decommit(start, full_size, stats); // ok if this fails } mi_commit_mask_clear(&segment->commit_mask, &mask); } // increase expiration of reusing part of the delayed decommit if (commit && mi_commit_mask_any_set(&segment->decommit_mask, &mask)) { segment->decommit_expire = _mi_clock_now() + mi_option_get(mi_option_decommit_delay); } // always undo delayed decommits mi_commit_mask_clear(&segment->decommit_mask, &mask); return true; } static bool mi_segment_ensure_committed(mi_segment_t* segment, uint8_t* p, size_t size, mi_stats_t* stats) { mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->decommit_mask)); // note: assumes commit_mask is always full for huge segments as otherwise the commit mask bits can overflow if (mi_commit_mask_is_full(&segment->commit_mask) && mi_commit_mask_is_empty(&segment->decommit_mask)) return true; // fully committed return mi_segment_commitx(segment,true,p,size,stats); } static void mi_segment_perhaps_decommit(mi_segment_t* segment, uint8_t* p, size_t size, mi_stats_t* stats) { if (!segment->allow_decommit) return; if (mi_option_get(mi_option_decommit_delay) == 0) { mi_segment_commitx(segment, false, p, size, stats); } else { // register for future decommit in the decommit mask uint8_t* start = NULL; size_t full_size = 0; mi_commit_mask_t mask; mi_segment_commit_mask(segment, true /*conservative*/, p, size, &start, &full_size, &mask); if (mi_commit_mask_is_empty(&mask) || full_size==0) return; // update delayed commit mi_assert_internal(segment->decommit_expire > 0 || mi_commit_mask_is_empty(&segment->decommit_mask)); mi_commit_mask_t cmask; mi_commit_mask_create_intersect(&segment->commit_mask, &mask, &cmask); // only decommit what is committed; span_free may try to decommit more mi_commit_mask_set(&segment->decommit_mask, &cmask); mi_msecs_t now = _mi_clock_now(); if (segment->decommit_expire == 0) { // no previous decommits, initialize now segment->decommit_expire = now + mi_option_get(mi_option_decommit_delay); } else if (segment->decommit_expire <= now) { // previous decommit mask already expired // mi_segment_delayed_decommit(segment, true, stats); segment->decommit_expire = now + mi_option_get(mi_option_decommit_extend_delay); // (mi_option_get(mi_option_decommit_delay) / 8); // wait a tiny bit longer in case there is a series of free's } else { // previous decommit mask is not yet expired, increase the expiration by a bit. segment->decommit_expire += mi_option_get(mi_option_decommit_extend_delay); } } } static void mi_segment_delayed_decommit(mi_segment_t* segment, bool force, mi_stats_t* stats) { if (!segment->allow_decommit || mi_commit_mask_is_empty(&segment->decommit_mask)) return; mi_msecs_t now = _mi_clock_now(); if (!force && now < segment->decommit_expire) return; mi_commit_mask_t mask = segment->decommit_mask; segment->decommit_expire = 0; mi_commit_mask_create_empty(&segment->decommit_mask); size_t idx; size_t count; mi_commit_mask_foreach(&mask, idx, count) { // if found, decommit that sequence if (count > 0) { uint8_t* p = (uint8_t*)segment + (idx*MI_COMMIT_SIZE); size_t size = count * MI_COMMIT_SIZE; mi_segment_commitx(segment, false, p, size, stats); } } mi_commit_mask_foreach_end() mi_assert_internal(mi_commit_mask_is_empty(&segment->decommit_mask)); } static bool mi_segment_is_abandoned(mi_segment_t* segment) { return (segment->thread_id == 0); } // note: can be called on abandoned segments static void mi_segment_span_free(mi_segment_t* segment, size_t slice_index, size_t slice_count, mi_segments_tld_t* tld) { mi_assert_internal(slice_index < segment->slice_entries); mi_span_queue_t* sq = (segment->kind == MI_SEGMENT_HUGE || mi_segment_is_abandoned(segment) ? NULL : mi_span_queue_for(slice_count,tld)); if (slice_count==0) slice_count = 1; mi_assert_internal(slice_index + slice_count - 1 < segment->slice_entries); // set first and last slice (the intermediates can be undetermined) mi_slice_t* slice = &segment->slices[slice_index]; slice->slice_count = (uint32_t)slice_count; mi_assert_internal(slice->slice_count == slice_count); // no overflow? slice->slice_offset = 0; if (slice_count > 1) { mi_slice_t* last = &segment->slices[slice_index + slice_count - 1]; last->slice_count = 0; last->slice_offset = (uint32_t)(sizeof(mi_page_t)*(slice_count - 1)); last->xblock_size = 0; } // perhaps decommit mi_segment_perhaps_decommit(segment,mi_slice_start(slice),slice_count*MI_SEGMENT_SLICE_SIZE,tld->stats); // and push it on the free page queue (if it was not a huge page) if (sq != NULL) mi_span_queue_push( sq, slice ); else slice->xblock_size = 0; // mark huge page as free anyways } /* // called from reclaim to add existing free spans static void mi_segment_span_add_free(mi_slice_t* slice, mi_segments_tld_t* tld) { mi_segment_t* segment = _mi_ptr_segment(slice); mi_assert_internal(slice->xblock_size==0 && slice->slice_count>0 && slice->slice_offset==0); size_t slice_index = mi_slice_index(slice); mi_segment_span_free(segment,slice_index,slice->slice_count,tld); } */ static void mi_segment_span_remove_from_queue(mi_slice_t* slice, mi_segments_tld_t* tld) { mi_assert_internal(slice->slice_count > 0 && slice->slice_offset==0 && slice->xblock_size==0); mi_assert_internal(_mi_ptr_segment(slice)->kind != MI_SEGMENT_HUGE); mi_span_queue_t* sq = mi_span_queue_for(slice->slice_count, tld); mi_span_queue_delete(sq, slice); } // note: can be called on abandoned segments static mi_slice_t* mi_segment_span_free_coalesce(mi_slice_t* slice, mi_segments_tld_t* tld) { mi_assert_internal(slice != NULL && slice->slice_count > 0 && slice->slice_offset == 0); mi_segment_t* segment = _mi_ptr_segment(slice); bool is_abandoned = mi_segment_is_abandoned(segment); // for huge pages, just mark as free but don't add to the queues if (segment->kind == MI_SEGMENT_HUGE) { mi_assert_internal(segment->used == 1); // decreased right after this call in `mi_segment_page_clear` slice->xblock_size = 0; // mark as free anyways // we should mark the last slice `xblock_size=0` now to maintain invariants but we skip it to // avoid a possible cache miss (and the segment is about to be freed) return slice; } // otherwise coalesce the span and add to the free span queues size_t slice_count = slice->slice_count; mi_slice_t* next = slice + slice->slice_count; mi_assert_internal(next <= mi_segment_slices_end(segment)); if (next < mi_segment_slices_end(segment) && next->xblock_size==0) { // free next block -- remove it from free and merge mi_assert_internal(next->slice_count > 0 && next->slice_offset==0); slice_count += next->slice_count; // extend if (!is_abandoned) { mi_segment_span_remove_from_queue(next, tld); } } if (slice > segment->slices) { mi_slice_t* prev = mi_slice_first(slice - 1); mi_assert_internal(prev >= segment->slices); if (prev->xblock_size==0) { // free previous slice -- remove it from free and merge mi_assert_internal(prev->slice_count > 0 && prev->slice_offset==0); slice_count += prev->slice_count; if (!is_abandoned) { mi_segment_span_remove_from_queue(prev, tld); } slice = prev; } } // and add the new free page mi_segment_span_free(segment, mi_slice_index(slice), slice_count, tld); return slice; } static void mi_segment_slice_split(mi_segment_t* segment, mi_slice_t* slice, size_t slice_count, mi_segments_tld_t* tld) { mi_assert_internal(_mi_ptr_segment(slice)==segment); mi_assert_internal(slice->slice_count >= slice_count); mi_assert_internal(slice->xblock_size > 0); // no more in free queue if (slice->slice_count <= slice_count) return; mi_assert_internal(segment->kind != MI_SEGMENT_HUGE); size_t next_index = mi_slice_index(slice) + slice_count; size_t next_count = slice->slice_count - slice_count; mi_segment_span_free(segment, next_index, next_count, tld); slice->slice_count = (uint32_t)slice_count; } // Note: may still return NULL if committing the memory failed static mi_page_t* mi_segment_span_allocate(mi_segment_t* segment, size_t slice_index, size_t slice_count, mi_segments_tld_t* tld) { mi_assert_internal(slice_index < segment->slice_entries); mi_slice_t* slice = &segment->slices[slice_index]; mi_assert_internal(slice->xblock_size==0 || slice->xblock_size==1); // commit before changing the slice data if (!mi_segment_ensure_committed(segment, _mi_segment_page_start_from_slice(segment, slice, 0, NULL), slice_count * MI_SEGMENT_SLICE_SIZE, tld->stats)) { return NULL; // commit failed! } // convert the slices to a page slice->slice_offset = 0; slice->slice_count = (uint32_t)slice_count; mi_assert_internal(slice->slice_count == slice_count); const size_t bsize = slice_count * MI_SEGMENT_SLICE_SIZE; slice->xblock_size = (uint32_t)(bsize >= MI_HUGE_BLOCK_SIZE ? MI_HUGE_BLOCK_SIZE : bsize); mi_page_t* page = mi_slice_to_page(slice); mi_assert_internal(mi_page_block_size(page) == bsize); // set slice back pointers for the first MI_MAX_SLICE_OFFSET entries size_t extra = slice_count-1; if (extra > MI_MAX_SLICE_OFFSET) extra = MI_MAX_SLICE_OFFSET; if (slice_index + extra >= segment->slice_entries) extra = segment->slice_entries - slice_index - 1; // huge objects may have more slices than avaiable entries in the segment->slices slice++; for (size_t i = 1; i <= extra; i++, slice++) { slice->slice_offset = (uint32_t)(sizeof(mi_slice_t)*i); slice->slice_count = 0; slice->xblock_size = 1; } // and also for the last one (if not set already) (the last one is needed for coalescing) // note: the cast is needed for ubsan since the index can be larger than MI_SLICES_PER_SEGMENT for huge allocations (see #543) mi_slice_t* last = &((mi_slice_t*)segment->slices)[slice_index + slice_count - 1]; if (last < mi_segment_slices_end(segment) && last >= slice) { last->slice_offset = (uint32_t)(sizeof(mi_slice_t)*(slice_count-1)); last->slice_count = 0; last->xblock_size = 1; } // and initialize the page page->is_reset = false; page->is_committed = true; segment->used++; return page; } static mi_page_t* mi_segments_page_find_and_allocate(size_t slice_count, mi_segments_tld_t* tld) { mi_assert_internal(slice_count*MI_SEGMENT_SLICE_SIZE <= MI_LARGE_OBJ_SIZE_MAX); // search from best fit up mi_span_queue_t* sq = mi_span_queue_for(slice_count, tld); if (slice_count == 0) slice_count = 1; while (sq <= &tld->spans[MI_SEGMENT_BIN_MAX]) { for (mi_slice_t* slice = sq->first; slice != NULL; slice = slice->next) { if (slice->slice_count >= slice_count) { // found one mi_span_queue_delete(sq, slice); mi_segment_t* segment = _mi_ptr_segment(slice); if (slice->slice_count > slice_count) { mi_segment_slice_split(segment, slice, slice_count, tld); } mi_assert_internal(slice != NULL && slice->slice_count == slice_count && slice->xblock_size > 0); mi_page_t* page = mi_segment_span_allocate(segment, mi_slice_index(slice), slice->slice_count, tld); if (page == NULL) { // commit failed; return NULL but first restore the slice mi_segment_span_free_coalesce(slice, tld); return NULL; } return page; } } sq++; } // could not find a page.. return NULL; } /* ----------------------------------------------------------- Segment allocation ----------------------------------------------------------- */ // Allocate a segment from the OS aligned to `MI_SEGMENT_SIZE` . static mi_segment_t* mi_segment_init(mi_segment_t* segment, size_t required, mi_segments_tld_t* tld, mi_os_tld_t* os_tld, mi_page_t** huge_page) { mi_assert_internal((required==0 && huge_page==NULL) || (required>0 && huge_page != NULL)); mi_assert_internal((segment==NULL) || (segment!=NULL && required==0)); // calculate needed sizes first size_t info_slices; size_t pre_size; const size_t segment_slices = mi_segment_calculate_slices(required, &pre_size, &info_slices); const size_t slice_entries = (segment_slices > MI_SLICES_PER_SEGMENT ? MI_SLICES_PER_SEGMENT : segment_slices); const size_t segment_size = segment_slices * MI_SEGMENT_SLICE_SIZE; // Commit eagerly only if not the first N lazy segments (to reduce impact of many threads that allocate just a little) const bool eager_delay = (// !_mi_os_has_overcommit() && // never delay on overcommit systems _mi_current_thread_count() > 1 && // do not delay for the first N threads tld->count < (size_t)mi_option_get(mi_option_eager_commit_delay)); const bool eager = !eager_delay && mi_option_is_enabled(mi_option_eager_commit); bool commit = eager || (required > 0); // Try to get from our cache first bool is_zero = false; const bool commit_info_still_good = (segment != NULL); mi_commit_mask_t commit_mask; mi_commit_mask_t decommit_mask; if (segment != NULL) { commit_mask = segment->commit_mask; decommit_mask = segment->decommit_mask; } else { mi_commit_mask_create_empty(&commit_mask); mi_commit_mask_create_empty(&decommit_mask); } if (segment==NULL) { // Allocate the segment from the OS bool mem_large = (!eager_delay && (MI_SECURE==0)); // only allow large OS pages once we are no longer lazy bool is_pinned = false; size_t memid = 0; segment = (mi_segment_t*)_mi_segment_cache_pop(segment_size, &commit_mask, &decommit_mask, &mem_large, &is_pinned, &is_zero, &memid, os_tld); if (segment==NULL) { segment = (mi_segment_t*)_mi_arena_alloc_aligned(segment_size, MI_SEGMENT_SIZE, &commit, &mem_large, &is_pinned, &is_zero, &memid, os_tld); if (segment == NULL) return NULL; // failed to allocate if (commit) { mi_commit_mask_create_full(&commit_mask); } else { mi_commit_mask_create_empty(&commit_mask); } } mi_assert_internal(segment != NULL && (uintptr_t)segment % MI_SEGMENT_SIZE == 0); const size_t commit_needed = _mi_divide_up(info_slices*MI_SEGMENT_SLICE_SIZE, MI_COMMIT_SIZE); mi_assert_internal(commit_needed>0); mi_commit_mask_t commit_needed_mask; mi_commit_mask_create(0, commit_needed, &commit_needed_mask); if (!mi_commit_mask_all_set(&commit_mask, &commit_needed_mask)) { // at least commit the info slices mi_assert_internal(commit_needed*MI_COMMIT_SIZE >= info_slices*MI_SEGMENT_SLICE_SIZE); bool ok = _mi_os_commit(segment, commit_needed*MI_COMMIT_SIZE, &is_zero, tld->stats); if (!ok) return NULL; // failed to commit mi_commit_mask_set(&commit_mask, &commit_needed_mask); } segment->memid = memid; segment->mem_is_pinned = is_pinned; segment->mem_is_large = mem_large; segment->mem_is_committed = mi_commit_mask_is_full(&commit_mask); mi_segments_track_size((long)(segment_size), tld); _mi_segment_map_allocated_at(segment); } // zero the segment info? -- not always needed as it is zero initialized from the OS mi_atomic_store_ptr_release(mi_segment_t, &segment->abandoned_next, NULL); // tsan if (!is_zero) { ptrdiff_t ofs = offsetof(mi_segment_t, next); size_t prefix = offsetof(mi_segment_t, slices) - ofs; memset((uint8_t*)segment+ofs, 0, prefix + sizeof(mi_slice_t)*segment_slices); } if (!commit_info_still_good) { segment->commit_mask = commit_mask; // on lazy commit, the initial part is always committed segment->allow_decommit = (mi_option_is_enabled(mi_option_allow_decommit) && !segment->mem_is_pinned && !segment->mem_is_large); if (segment->allow_decommit) { segment->decommit_expire = _mi_clock_now() + mi_option_get(mi_option_decommit_delay); segment->decommit_mask = decommit_mask; mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->decommit_mask)); #if MI_DEBUG>2 const size_t commit_needed = _mi_divide_up(info_slices*MI_SEGMENT_SLICE_SIZE, MI_COMMIT_SIZE); mi_commit_mask_t commit_needed_mask; mi_commit_mask_create(0, commit_needed, &commit_needed_mask); mi_assert_internal(!mi_commit_mask_any_set(&segment->decommit_mask, &commit_needed_mask)); #endif } else { mi_assert_internal(mi_commit_mask_is_empty(&decommit_mask)); segment->decommit_expire = 0; mi_commit_mask_create_empty( &segment->decommit_mask ); mi_assert_internal(mi_commit_mask_is_empty(&segment->decommit_mask)); } } // initialize segment info segment->segment_slices = segment_slices; segment->segment_info_slices = info_slices; segment->thread_id = _mi_thread_id(); segment->cookie = _mi_ptr_cookie(segment); segment->slice_entries = slice_entries; segment->kind = (required == 0 ? MI_SEGMENT_NORMAL : MI_SEGMENT_HUGE); // memset(segment->slices, 0, sizeof(mi_slice_t)*(info_slices+1)); _mi_stat_increase(&tld->stats->page_committed, mi_segment_info_size(segment)); // set up guard pages size_t guard_slices = 0; if (MI_SECURE>0) { // in secure mode, we set up a protected page in between the segment info // and the page data, and at the end of the segment. size_t os_pagesize = _mi_os_page_size(); mi_assert_internal(mi_segment_info_size(segment) - os_pagesize >= pre_size); _mi_os_protect((uint8_t*)segment + mi_segment_info_size(segment) - os_pagesize, os_pagesize); uint8_t* end = (uint8_t*)segment + mi_segment_size(segment) - os_pagesize; mi_segment_ensure_committed(segment, end, os_pagesize, tld->stats); _mi_os_protect(end, os_pagesize); if (slice_entries == segment_slices) segment->slice_entries--; // don't use the last slice :-( guard_slices = 1; } // reserve first slices for segment info mi_page_t* page0 = mi_segment_span_allocate(segment, 0, info_slices, tld); mi_assert_internal(page0!=NULL); if (page0==NULL) return NULL; // cannot fail as we always commit in advance mi_assert_internal(segment->used == 1); segment->used = 0; // don't count our internal slices towards usage // initialize initial free pages if (segment->kind == MI_SEGMENT_NORMAL) { // not a huge page mi_assert_internal(huge_page==NULL); mi_segment_span_free(segment, info_slices, segment->slice_entries - info_slices, tld); } else { mi_assert_internal(huge_page!=NULL); mi_assert_internal(mi_commit_mask_is_empty(&segment->decommit_mask)); mi_assert_internal(mi_commit_mask_is_full(&segment->commit_mask)); *huge_page = mi_segment_span_allocate(segment, info_slices, segment_slices - info_slices - guard_slices, tld); mi_assert_internal(*huge_page != NULL); // cannot fail as we commit in advance } mi_assert_expensive(mi_segment_is_valid(segment,tld)); return segment; } // Allocate a segment from the OS aligned to `MI_SEGMENT_SIZE` . static mi_segment_t* mi_segment_alloc(size_t required, mi_segments_tld_t* tld, mi_os_tld_t* os_tld, mi_page_t** huge_page) { return mi_segment_init(NULL, required, tld, os_tld, huge_page); } static void mi_segment_free(mi_segment_t* segment, bool force, mi_segments_tld_t* tld) { MI_UNUSED(force); mi_assert_internal(segment != NULL); mi_assert_internal(segment->next == NULL); mi_assert_internal(segment->used == 0); // Remove the free pages mi_slice_t* slice = &segment->slices[0]; const mi_slice_t* end = mi_segment_slices_end(segment); size_t page_count = 0; while (slice < end) { mi_assert_internal(slice->slice_count > 0); mi_assert_internal(slice->slice_offset == 0); mi_assert_internal(mi_slice_index(slice)==0 || slice->xblock_size == 0); // no more used pages .. if (slice->xblock_size == 0 && segment->kind != MI_SEGMENT_HUGE) { mi_segment_span_remove_from_queue(slice, tld); } page_count++; slice = slice + slice->slice_count; } mi_assert_internal(page_count == 2); // first page is allocated by the segment itself // stats _mi_stat_decrease(&tld->stats->page_committed, mi_segment_info_size(segment)); // return it to the OS mi_segment_os_free(segment, tld); } /* ----------------------------------------------------------- Page Free ----------------------------------------------------------- */ static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld); // note: can be called on abandoned pages static mi_slice_t* mi_segment_page_clear(mi_page_t* page, mi_segments_tld_t* tld) { mi_assert_internal(page->xblock_size > 0); mi_assert_internal(mi_page_all_free(page)); mi_segment_t* segment = _mi_ptr_segment(page); mi_assert_internal(segment->used > 0); size_t inuse = page->capacity * mi_page_block_size(page); _mi_stat_decrease(&tld->stats->page_committed, inuse); _mi_stat_decrease(&tld->stats->pages, 1); // reset the page memory to reduce memory pressure? if (!segment->mem_is_pinned && !page->is_reset && mi_option_is_enabled(mi_option_page_reset)) { size_t psize; uint8_t* start = _mi_page_start(segment, page, &psize); page->is_reset = true; _mi_os_reset(start, psize, tld->stats); } // zero the page data, but not the segment fields page->is_zero_init = false; ptrdiff_t ofs = offsetof(mi_page_t, capacity); memset((uint8_t*)page + ofs, 0, sizeof(*page) - ofs); page->xblock_size = 1; // and free it mi_slice_t* slice = mi_segment_span_free_coalesce(mi_page_to_slice(page), tld); segment->used--; // cannot assert segment valid as it is called during reclaim // mi_assert_expensive(mi_segment_is_valid(segment, tld)); return slice; } void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t* tld) { mi_assert(page != NULL); mi_segment_t* segment = _mi_page_segment(page); mi_assert_expensive(mi_segment_is_valid(segment,tld)); // mark it as free now mi_segment_page_clear(page, tld); mi_assert_expensive(mi_segment_is_valid(segment, tld)); if (segment->used == 0) { // no more used pages; remove from the free list and free the segment mi_segment_free(segment, force, tld); } else if (segment->used == segment->abandoned) { // only abandoned pages; remove from free list and abandon mi_segment_abandon(segment,tld); } } /* ----------------------------------------------------------- Abandonment When threads terminate, they can leave segments with live blocks (reachable through other threads). Such segments are "abandoned" and will be reclaimed by other threads to reuse their pages and/or free them eventually We maintain a global list of abandoned segments that are reclaimed on demand. Since this is shared among threads the implementation needs to avoid the A-B-A problem on popping abandoned segments: <https://en.wikipedia.org/wiki/ABA_problem> We use tagged pointers to avoid accidentially identifying reused segments, much like stamped references in Java. Secondly, we maintain a reader counter to avoid resetting or decommitting segments that have a pending read operation. Note: the current implementation is one possible design; another way might be to keep track of abandoned segments in the arenas/segment_cache's. This would have the advantage of keeping all concurrent code in one place and not needing to deal with ABA issues. The drawback is that it is unclear how to scan abandoned segments efficiently in that case as they would be spread among all other segments in the arenas. ----------------------------------------------------------- */ // Use the bottom 20-bits (on 64-bit) of the aligned segment pointers // to put in a tag that increments on update to avoid the A-B-A problem. #define MI_TAGGED_MASK MI_SEGMENT_MASK typedef uintptr_t mi_tagged_segment_t; static mi_segment_t* mi_tagged_segment_ptr(mi_tagged_segment_t ts) { return (mi_segment_t*)(ts & ~MI_TAGGED_MASK); } static mi_tagged_segment_t mi_tagged_segment(mi_segment_t* segment, mi_tagged_segment_t ts) { mi_assert_internal(((uintptr_t)segment & MI_TAGGED_MASK) == 0); uintptr_t tag = ((ts & MI_TAGGED_MASK) + 1) & MI_TAGGED_MASK; return ((uintptr_t)segment | tag); } // This is a list of visited abandoned pages that were full at the time. // this list migrates to `abandoned` when that becomes NULL. The use of // this list reduces contention and the rate at which segments are visited. static mi_decl_cache_align _Atomic(mi_segment_t*) abandoned_visited; // = NULL // The abandoned page list (tagged as it supports pop) static mi_decl_cache_align _Atomic(mi_tagged_segment_t) abandoned; // = NULL // Maintain these for debug purposes (these counts may be a bit off) static mi_decl_cache_align _Atomic(size_t) abandoned_count; static mi_decl_cache_align _Atomic(size_t) abandoned_visited_count; // We also maintain a count of current readers of the abandoned list // in order to prevent resetting/decommitting segment memory if it might // still be read. static mi_decl_cache_align _Atomic(size_t) abandoned_readers; // = 0 // Push on the visited list static void mi_abandoned_visited_push(mi_segment_t* segment) { mi_assert_internal(segment->thread_id == 0); mi_assert_internal(mi_atomic_load_ptr_relaxed(mi_segment_t,&segment->abandoned_next) == NULL); mi_assert_internal(segment->next == NULL); mi_assert_internal(segment->used > 0); mi_segment_t* anext = mi_atomic_load_ptr_relaxed(mi_segment_t, &abandoned_visited); do { mi_atomic_store_ptr_release(mi_segment_t, &segment->abandoned_next, anext); } while (!mi_atomic_cas_ptr_weak_release(mi_segment_t, &abandoned_visited, &anext, segment)); mi_atomic_increment_relaxed(&abandoned_visited_count); } // Move the visited list to the abandoned list. static bool mi_abandoned_visited_revisit(void) { // quick check if the visited list is empty if (mi_atomic_load_ptr_relaxed(mi_segment_t, &abandoned_visited) == NULL) return false; // grab the whole visited list mi_segment_t* first = mi_atomic_exchange_ptr_acq_rel(mi_segment_t, &abandoned_visited, NULL); if (first == NULL) return false; // first try to swap directly if the abandoned list happens to be NULL mi_tagged_segment_t afirst; mi_tagged_segment_t ts = mi_atomic_load_relaxed(&abandoned); if (mi_tagged_segment_ptr(ts)==NULL) { size_t count = mi_atomic_load_relaxed(&abandoned_visited_count); afirst = mi_tagged_segment(first, ts); if (mi_atomic_cas_strong_acq_rel(&abandoned, &ts, afirst)) { mi_atomic_add_relaxed(&abandoned_count, count); mi_atomic_sub_relaxed(&abandoned_visited_count, count); return true; } } // find the last element of the visited list: O(n) mi_segment_t* last = first; mi_segment_t* next; while ((next = mi_atomic_load_ptr_relaxed(mi_segment_t, &last->abandoned_next)) != NULL) { last = next; } // and atomically prepend to the abandoned list // (no need to increase the readers as we don't access the abandoned segments) mi_tagged_segment_t anext = mi_atomic_load_relaxed(&abandoned); size_t count; do { count = mi_atomic_load_relaxed(&abandoned_visited_count); mi_atomic_store_ptr_release(mi_segment_t, &last->abandoned_next, mi_tagged_segment_ptr(anext)); afirst = mi_tagged_segment(first, anext); } while (!mi_atomic_cas_weak_release(&abandoned, &anext, afirst)); mi_atomic_add_relaxed(&abandoned_count, count); mi_atomic_sub_relaxed(&abandoned_visited_count, count); return true; } // Push on the abandoned list. static void mi_abandoned_push(mi_segment_t* segment) { mi_assert_internal(segment->thread_id == 0); mi_assert_internal(mi_atomic_load_ptr_relaxed(mi_segment_t, &segment->abandoned_next) == NULL); mi_assert_internal(segment->next == NULL); mi_assert_internal(segment->used > 0); mi_tagged_segment_t next; mi_tagged_segment_t ts = mi_atomic_load_relaxed(&abandoned); do { mi_atomic_store_ptr_release(mi_segment_t, &segment->abandoned_next, mi_tagged_segment_ptr(ts)); next = mi_tagged_segment(segment, ts); } while (!mi_atomic_cas_weak_release(&abandoned, &ts, next)); mi_atomic_increment_relaxed(&abandoned_count); } // Wait until there are no more pending reads on segments that used to be in the abandoned list // called for example from `arena.c` before decommitting void _mi_abandoned_await_readers(void) { size_t n; do { n = mi_atomic_load_acquire(&abandoned_readers); if (n != 0) mi_atomic_yield(); } while (n != 0); } // Pop from the abandoned list static mi_segment_t* mi_abandoned_pop(void) { mi_segment_t* segment; // Check efficiently if it is empty (or if the visited list needs to be moved) mi_tagged_segment_t ts = mi_atomic_load_relaxed(&abandoned); segment = mi_tagged_segment_ptr(ts); if (mi_likely(segment == NULL)) { if (mi_likely(!mi_abandoned_visited_revisit())) { // try to swap in the visited list on NULL return NULL; } } // Do a pop. We use a reader count to prevent // a segment to be decommitted while a read is still pending, // and a tagged pointer to prevent A-B-A link corruption. // (this is called from `region.c:_mi_mem_free` for example) mi_atomic_increment_relaxed(&abandoned_readers); // ensure no segment gets decommitted mi_tagged_segment_t next = 0; ts = mi_atomic_load_acquire(&abandoned); do { segment = mi_tagged_segment_ptr(ts); if (segment != NULL) { mi_segment_t* anext = mi_atomic_load_ptr_relaxed(mi_segment_t, &segment->abandoned_next); next = mi_tagged_segment(anext, ts); // note: reads the segment's `abandoned_next` field so should not be decommitted } } while (segment != NULL && !mi_atomic_cas_weak_acq_rel(&abandoned, &ts, next)); mi_atomic_decrement_relaxed(&abandoned_readers); // release reader lock if (segment != NULL) { mi_atomic_store_ptr_release(mi_segment_t, &segment->abandoned_next, NULL); mi_atomic_decrement_relaxed(&abandoned_count); } return segment; } /* ----------------------------------------------------------- Abandon segment/page ----------------------------------------------------------- */ static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld) { mi_assert_internal(segment->used == segment->abandoned); mi_assert_internal(segment->used > 0); mi_assert_internal(mi_atomic_load_ptr_relaxed(mi_segment_t, &segment->abandoned_next) == NULL); mi_assert_internal(segment->abandoned_visits == 0); mi_assert_expensive(mi_segment_is_valid(segment,tld)); // remove the free pages from the free page queues mi_slice_t* slice = &segment->slices[0]; const mi_slice_t* end = mi_segment_slices_end(segment); while (slice < end) { mi_assert_internal(slice->slice_count > 0); mi_assert_internal(slice->slice_offset == 0); if (slice->xblock_size == 0) { // a free page mi_segment_span_remove_from_queue(slice,tld); slice->xblock_size = 0; // but keep it free } slice = slice + slice->slice_count; } // perform delayed decommits mi_segment_delayed_decommit(segment, mi_option_is_enabled(mi_option_abandoned_page_decommit) /* force? */, tld->stats); // all pages in the segment are abandoned; add it to the abandoned list _mi_stat_increase(&tld->stats->segments_abandoned, 1); mi_segments_track_size(-((long)mi_segment_size(segment)), tld); segment->thread_id = 0; mi_atomic_store_ptr_release(mi_segment_t, &segment->abandoned_next, NULL); segment->abandoned_visits = 1; // from 0 to 1 to signify it is abandoned mi_abandoned_push(segment); } void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld) { mi_assert(page != NULL); mi_assert_internal(mi_page_thread_free_flag(page)==MI_NEVER_DELAYED_FREE); mi_assert_internal(mi_page_heap(page) == NULL); mi_segment_t* segment = _mi_page_segment(page); mi_assert_expensive(mi_segment_is_valid(segment,tld)); segment->abandoned++; _mi_stat_increase(&tld->stats->pages_abandoned, 1); mi_assert_internal(segment->abandoned <= segment->used); if (segment->used == segment->abandoned) { // all pages are abandoned, abandon the entire segment mi_segment_abandon(segment, tld); } } /* ----------------------------------------------------------- Reclaim abandoned pages ----------------------------------------------------------- */ static mi_slice_t* mi_slices_start_iterate(mi_segment_t* segment, const mi_slice_t** end) { mi_slice_t* slice = &segment->slices[0]; *end = mi_segment_slices_end(segment); mi_assert_internal(slice->slice_count>0 && slice->xblock_size>0); // segment allocated page slice = slice + slice->slice_count; // skip the first segment allocated page return slice; } // Possibly free pages and check if free space is available static bool mi_segment_check_free(mi_segment_t* segment, size_t slices_needed, size_t block_size, mi_segments_tld_t* tld) { mi_assert_internal(block_size < MI_HUGE_BLOCK_SIZE); mi_assert_internal(mi_segment_is_abandoned(segment)); bool has_page = false; // for all slices const mi_slice_t* end; mi_slice_t* slice = mi_slices_start_iterate(segment, &end); while (slice < end) { mi_assert_internal(slice->slice_count > 0); mi_assert_internal(slice->slice_offset == 0); if (mi_slice_is_used(slice)) { // used page // ensure used count is up to date and collect potential concurrent frees mi_page_t* const page = mi_slice_to_page(slice); _mi_page_free_collect(page, false); if (mi_page_all_free(page)) { // if this page is all free now, free it without adding to any queues (yet) mi_assert_internal(page->next == NULL && page->prev==NULL); _mi_stat_decrease(&tld->stats->pages_abandoned, 1); segment->abandoned--; slice = mi_segment_page_clear(page, tld); // re-assign slice due to coalesce! mi_assert_internal(!mi_slice_is_used(slice)); if (slice->slice_count >= slices_needed) { has_page = true; } } else { if (page->xblock_size == block_size && mi_page_has_any_available(page)) { // a page has available free blocks of the right size has_page = true; } } } else { // empty span if (slice->slice_count >= slices_needed) { has_page = true; } } slice = slice + slice->slice_count; } return has_page; } // Reclaim an abandoned segment; returns NULL if the segment was freed // set `right_page_reclaimed` to `true` if it reclaimed a page of the right `block_size` that was not full. static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap, size_t requested_block_size, bool* right_page_reclaimed, mi_segments_tld_t* tld) { mi_assert_internal(mi_atomic_load_ptr_relaxed(mi_segment_t, &segment->abandoned_next) == NULL); mi_assert_expensive(mi_segment_is_valid(segment, tld)); if (right_page_reclaimed != NULL) { *right_page_reclaimed = false; } segment->thread_id = _mi_thread_id(); segment->abandoned_visits = 0; mi_segments_track_size((long)mi_segment_size(segment), tld); mi_assert_internal(segment->next == NULL); _mi_stat_decrease(&tld->stats->segments_abandoned, 1); // for all slices const mi_slice_t* end; mi_slice_t* slice = mi_slices_start_iterate(segment, &end); while (slice < end) { mi_assert_internal(slice->slice_count > 0); mi_assert_internal(slice->slice_offset == 0); if (mi_slice_is_used(slice)) { // in use: reclaim the page in our heap mi_page_t* page = mi_slice_to_page(slice); mi_assert_internal(!page->is_reset); mi_assert_internal(page->is_committed); mi_assert_internal(mi_page_thread_free_flag(page)==MI_NEVER_DELAYED_FREE); mi_assert_internal(mi_page_heap(page) == NULL); mi_assert_internal(page->next == NULL && page->prev==NULL); _mi_stat_decrease(&tld->stats->pages_abandoned, 1); segment->abandoned--; // set the heap again and allow delayed free again mi_page_set_heap(page, heap); _mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE, true); // override never (after heap is set) _mi_page_free_collect(page, false); // ensure used count is up to date if (mi_page_all_free(page)) { // if everything free by now, free the page slice = mi_segment_page_clear(page, tld); // set slice again due to coalesceing } else { // otherwise reclaim it into the heap _mi_page_reclaim(heap, page); if (requested_block_size == page->xblock_size && mi_page_has_any_available(page)) { if (right_page_reclaimed != NULL) { *right_page_reclaimed = true; } } } } else { // the span is free, add it to our page queues slice = mi_segment_span_free_coalesce(slice, tld); // set slice again due to coalesceing } mi_assert_internal(slice->slice_count>0 && slice->slice_offset==0); slice = slice + slice->slice_count; } mi_assert(segment->abandoned == 0); if (segment->used == 0) { // due to page_clear mi_assert_internal(right_page_reclaimed == NULL || !(*right_page_reclaimed)); mi_segment_free(segment, false, tld); return NULL; } else { return segment; } } void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld) { mi_segment_t* segment; while ((segment = mi_abandoned_pop()) != NULL) { mi_segment_reclaim(segment, heap, 0, NULL, tld); } } static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t needed_slices, size_t block_size, bool* reclaimed, mi_segments_tld_t* tld) { *reclaimed = false; mi_segment_t* segment; long max_tries = mi_option_get_clamp(mi_option_max_segment_reclaim, 8, 1024); // limit the work to bound allocation times while ((max_tries-- > 0) && ((segment = mi_abandoned_pop()) != NULL)) { segment->abandoned_visits++; bool has_page = mi_segment_check_free(segment,needed_slices,block_size,tld); // try to free up pages (due to concurrent frees) if (segment->used == 0) { // free the segment (by forced reclaim) to make it available to other threads. // note1: we prefer to free a segment as that might lead to reclaiming another // segment that is still partially used. // note2: we could in principle optimize this by skipping reclaim and directly // freeing but that would violate some invariants temporarily) mi_segment_reclaim(segment, heap, 0, NULL, tld); } else if (has_page) { // found a large enough free span, or a page of the right block_size with free space // we return the result of reclaim (which is usually `segment`) as it might free // the segment due to concurrent frees (in which case `NULL` is returned). return mi_segment_reclaim(segment, heap, block_size, reclaimed, tld); } else if (segment->abandoned_visits > 3) { // always reclaim on 3rd visit to limit the abandoned queue length. mi_segment_reclaim(segment, heap, 0, NULL, tld); } else { // otherwise, push on the visited list so it gets not looked at too quickly again mi_segment_delayed_decommit(segment, true /* force? */, tld->stats); // forced decommit if needed as we may not visit soon again mi_abandoned_visited_push(segment); } } return NULL; } void _mi_abandoned_collect(mi_heap_t* heap, bool force, mi_segments_tld_t* tld) { mi_segment_t* segment; int max_tries = (force ? 16*1024 : 1024); // limit latency if (force) { mi_abandoned_visited_revisit(); } while ((max_tries-- > 0) && ((segment = mi_abandoned_pop()) != NULL)) { mi_segment_check_free(segment,0,0,tld); // try to free up pages (due to concurrent frees) if (segment->used == 0) { // free the segment (by forced reclaim) to make it available to other threads. // note: we could in principle optimize this by skipping reclaim and directly // freeing but that would violate some invariants temporarily) mi_segment_reclaim(segment, heap, 0, NULL, tld); } else { // otherwise, decommit if needed and push on the visited list // note: forced decommit can be expensive if many threads are destroyed/created as in mstress. mi_segment_delayed_decommit(segment, force, tld->stats); mi_abandoned_visited_push(segment); } } } /* ----------------------------------------------------------- Reclaim or allocate ----------------------------------------------------------- */ static mi_segment_t* mi_segment_reclaim_or_alloc(mi_heap_t* heap, size_t needed_slices, size_t block_size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) { mi_assert_internal(block_size < MI_HUGE_BLOCK_SIZE); mi_assert_internal(block_size <= MI_LARGE_OBJ_SIZE_MAX); // 1. try to reclaim an abandoned segment bool reclaimed; mi_segment_t* segment = mi_segment_try_reclaim(heap, needed_slices, block_size, &reclaimed, tld); if (reclaimed) { // reclaimed the right page right into the heap mi_assert_internal(segment != NULL); return NULL; // pretend out-of-memory as the page will be in the page queue of the heap with available blocks } else if (segment != NULL) { // reclaimed a segment with a large enough empty span in it return segment; } // 2. otherwise allocate a fresh segment return mi_segment_alloc(0, tld, os_tld, NULL); } /* ----------------------------------------------------------- Page allocation ----------------------------------------------------------- */ static mi_page_t* mi_segments_page_alloc(mi_heap_t* heap, mi_page_kind_t page_kind, size_t required, size_t block_size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) { mi_assert_internal(required <= MI_LARGE_OBJ_SIZE_MAX && page_kind <= MI_PAGE_LARGE); // find a free page size_t page_size = _mi_align_up(required, (required > MI_MEDIUM_PAGE_SIZE ? MI_MEDIUM_PAGE_SIZE : MI_SEGMENT_SLICE_SIZE)); size_t slices_needed = page_size / MI_SEGMENT_SLICE_SIZE; mi_assert_internal(slices_needed * MI_SEGMENT_SLICE_SIZE == page_size); mi_page_t* page = mi_segments_page_find_and_allocate(slices_needed, tld); //(required <= MI_SMALL_SIZE_MAX ? 0 : slices_needed), tld); if (page==NULL) { // no free page, allocate a new segment and try again if (mi_segment_reclaim_or_alloc(heap, slices_needed, block_size, tld, os_tld) == NULL) { // OOM or reclaimed a good page in the heap return NULL; } else { // otherwise try again return mi_segments_page_alloc(heap, page_kind, required, block_size, tld, os_tld); } } mi_assert_internal(page != NULL && page->slice_count*MI_SEGMENT_SLICE_SIZE == page_size); mi_assert_internal(_mi_ptr_segment(page)->thread_id == _mi_thread_id()); mi_segment_delayed_decommit(_mi_ptr_segment(page), false, tld->stats); return page; } /* ----------------------------------------------------------- Huge page allocation ----------------------------------------------------------- */ static mi_page_t* mi_segment_huge_page_alloc(size_t size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) { mi_page_t* page = NULL; mi_segment_t* segment = mi_segment_alloc(size,tld,os_tld,&page); if (segment == NULL || page==NULL) return NULL; mi_assert_internal(segment->used==1); mi_assert_internal(mi_page_block_size(page) >= size); segment->thread_id = 0; // huge segments are immediately abandoned return page; } // free huge block from another thread void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block_t* block) { // huge page segments are always abandoned and can be freed immediately by any thread mi_assert_internal(segment->kind==MI_SEGMENT_HUGE); mi_assert_internal(segment == _mi_page_segment(page)); mi_assert_internal(mi_atomic_load_relaxed(&segment->thread_id)==0); // claim it and free mi_heap_t* heap = mi_heap_get_default(); // issue #221; don't use the internal get_default_heap as we need to ensure the thread is initialized. // paranoia: if this it the last reference, the cas should always succeed size_t expected_tid = 0; if (mi_atomic_cas_strong_acq_rel(&segment->thread_id, &expected_tid, heap->thread_id)) { mi_block_set_next(page, block, page->free); page->free = block; page->used--; page->is_zero = false; mi_assert(page->used == 0); mi_tld_t* tld = heap->tld; _mi_segment_page_free(page, true, &tld->segments); } #if (MI_DEBUG!=0) else { mi_assert_internal(false); } #endif } /* ----------------------------------------------------------- Page allocation and free ----------------------------------------------------------- */ mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) { mi_page_t* page; if (block_size <= MI_SMALL_OBJ_SIZE_MAX) { page = mi_segments_page_alloc(heap,MI_PAGE_SMALL,block_size,block_size,tld,os_tld); } else if (block_size <= MI_MEDIUM_OBJ_SIZE_MAX) { page = mi_segments_page_alloc(heap,MI_PAGE_MEDIUM,MI_MEDIUM_PAGE_SIZE,block_size,tld, os_tld); } else if (block_size <= MI_LARGE_OBJ_SIZE_MAX) { page = mi_segments_page_alloc(heap,MI_PAGE_LARGE,block_size,block_size,tld, os_tld); } else { page = mi_segment_huge_page_alloc(block_size,tld,os_tld); } mi_assert_expensive(page == NULL || mi_segment_is_valid(_mi_page_segment(page),tld)); return page; }
dandycheung/baulk
lib/archive/liblzma/common/common.c
<filename>lib/archive/liblzma/common/common.c /////////////////////////////////////////////////////////////////////////////// // /// \file common.c /// \brief Common functions needed in many places in liblzma // // Author: <NAME> // // This file has been put into the public domain. // You can do whatever you want with this file. // /////////////////////////////////////////////////////////////////////////////// #include "common.h" ///////////// // Version // ///////////// extern LZMA_API(uint32_t) lzma_version_number(void) { return LZMA_VERSION; } extern LZMA_API(const char *) lzma_version_string(void) { return LZMA_VERSION_STRING; } /////////////////////// // Memory allocation // /////////////////////// extern void * lzma_attribute((__malloc__)) lzma_attr_alloc_size(1) lzma_alloc(size_t size, const lzma_allocator *allocator) { // Some malloc() variants return NULL if called with size == 0. if (size == 0) size = 1; void *ptr; if (allocator != NULL && allocator->alloc != NULL) ptr = allocator->alloc(allocator->opaque, 1, size); else ptr = malloc(size); return ptr; } extern void * lzma_attribute((__malloc__)) lzma_attr_alloc_size(1) lzma_alloc_zero(size_t size, const lzma_allocator *allocator) { // Some calloc() variants return NULL if called with size == 0. if (size == 0) size = 1; void *ptr; if (allocator != NULL && allocator->alloc != NULL) { ptr = allocator->alloc(allocator->opaque, 1, size); if (ptr != NULL) memzero(ptr, size); } else { ptr = calloc(1, size); } return ptr; } extern void lzma_free(void *ptr, const lzma_allocator *allocator) { if (allocator != NULL && allocator->free != NULL) allocator->free(allocator->opaque, ptr); else free(ptr); return; } ////////// // Misc // ////////// extern size_t lzma_bufcpy(const uint8_t *restrict in, size_t *restrict in_pos, size_t in_size, uint8_t *restrict out, size_t *restrict out_pos, size_t out_size) { const size_t in_avail = in_size - *in_pos; const size_t out_avail = out_size - *out_pos; const size_t copy_size = my_min(in_avail, out_avail); // Call memcpy() only if there is something to copy. If there is // nothing to copy, in or out might be NULL and then the memcpy() // call would trigger undefined behavior. if (copy_size > 0) memcpy(out + *out_pos, in + *in_pos, copy_size); *in_pos += copy_size; *out_pos += copy_size; return copy_size; } extern lzma_ret lzma_next_filter_init(lzma_next_coder *next, const lzma_allocator *allocator, const lzma_filter_info *filters) { lzma_next_coder_init(filters[0].init, next, allocator); next->id = filters[0].id; return filters[0].init == NULL ? LZMA_OK : filters[0].init(next, allocator, filters); } extern lzma_ret lzma_next_filter_update(lzma_next_coder *next, const lzma_allocator *allocator, const lzma_filter *reversed_filters) { // Check that the application isn't trying to change the Filter ID. // End of filters is indicated with LZMA_VLI_UNKNOWN in both // reversed_filters[0].id and next->id. if (reversed_filters[0].id != next->id) return LZMA_PROG_ERROR; if (reversed_filters[0].id == LZMA_VLI_UNKNOWN) return LZMA_OK; assert(next->update != NULL); return next->update(next->coder, allocator, NULL, reversed_filters); } extern void lzma_next_end(lzma_next_coder *next, const lzma_allocator *allocator) { if (next->init != (uintptr_t)(NULL)) { // To avoid tiny end functions that simply call // lzma_free(coder, allocator), we allow leaving next->end // NULL and call lzma_free() here. if (next->end != NULL) next->end(next->coder, allocator); else lzma_free(next->coder, allocator); // Reset the variables so the we don't accidentally think // that it is an already initialized coder. *next = LZMA_NEXT_CODER_INIT; } return; } ////////////////////////////////////// // External to internal API wrapper // ////////////////////////////////////// extern lzma_ret lzma_strm_init(lzma_stream *strm) { if (strm == NULL) return LZMA_PROG_ERROR; if (strm->internal == NULL) { strm->internal = lzma_alloc(sizeof(lzma_internal), strm->allocator); if (strm->internal == NULL) return LZMA_MEM_ERROR; strm->internal->next = LZMA_NEXT_CODER_INIT; } memzero(strm->internal->supported_actions, sizeof(strm->internal->supported_actions)); strm->internal->sequence = ISEQ_RUN; strm->internal->allow_buf_error = false; strm->total_in = 0; strm->total_out = 0; return LZMA_OK; } extern LZMA_API(lzma_ret) lzma_code(lzma_stream *strm, lzma_action action) { // Sanity checks if ((strm->next_in == NULL && strm->avail_in != 0) || (strm->next_out == NULL && strm->avail_out != 0) || strm->internal == NULL || strm->internal->next.code == NULL || (unsigned int)(action) > LZMA_ACTION_MAX || !strm->internal->supported_actions[action]) return LZMA_PROG_ERROR; // Check if unsupported members have been set to non-zero or non-NULL, // which would indicate that some new feature is wanted. if (strm->reserved_ptr1 != NULL || strm->reserved_ptr2 != NULL || strm->reserved_ptr3 != NULL || strm->reserved_ptr4 != NULL || strm->reserved_int1 != 0 || strm->reserved_int2 != 0 || strm->reserved_int3 != 0 || strm->reserved_int4 != 0 || strm->reserved_enum1 != LZMA_RESERVED_ENUM || strm->reserved_enum2 != LZMA_RESERVED_ENUM) return LZMA_OPTIONS_ERROR; switch (strm->internal->sequence) { case ISEQ_RUN: switch (action) { case LZMA_RUN: break; case LZMA_SYNC_FLUSH: strm->internal->sequence = ISEQ_SYNC_FLUSH; break; case LZMA_FULL_FLUSH: strm->internal->sequence = ISEQ_FULL_FLUSH; break; case LZMA_FINISH: strm->internal->sequence = ISEQ_FINISH; break; case LZMA_FULL_BARRIER: strm->internal->sequence = ISEQ_FULL_BARRIER; break; } break; case ISEQ_SYNC_FLUSH: // The same action must be used until we return // LZMA_STREAM_END, and the amount of input must not change. if (action != LZMA_SYNC_FLUSH || strm->internal->avail_in != strm->avail_in) return LZMA_PROG_ERROR; break; case ISEQ_FULL_FLUSH: if (action != LZMA_FULL_FLUSH || strm->internal->avail_in != strm->avail_in) return LZMA_PROG_ERROR; break; case ISEQ_FINISH: if (action != LZMA_FINISH || strm->internal->avail_in != strm->avail_in) return LZMA_PROG_ERROR; break; case ISEQ_FULL_BARRIER: if (action != LZMA_FULL_BARRIER || strm->internal->avail_in != strm->avail_in) return LZMA_PROG_ERROR; break; case ISEQ_END: return LZMA_STREAM_END; case ISEQ_ERROR: default: return LZMA_PROG_ERROR; } size_t in_pos = 0; size_t out_pos = 0; lzma_ret ret = strm->internal->next.code( strm->internal->next.coder, strm->allocator, strm->next_in, &in_pos, strm->avail_in, strm->next_out, &out_pos, strm->avail_out, action); strm->next_in += in_pos; strm->avail_in -= in_pos; strm->total_in += in_pos; strm->next_out += out_pos; strm->avail_out -= out_pos; strm->total_out += out_pos; strm->internal->avail_in = strm->avail_in; // Cast is needed to silence a warning about LZMA_TIMED_OUT, which // isn't part of lzma_ret enumeration. switch ((unsigned int)(ret)) { case LZMA_OK: // Don't return LZMA_BUF_ERROR when it happens the first time. // This is to avoid returning LZMA_BUF_ERROR when avail_out // was zero but still there was no more data left to written // to next_out. if (out_pos == 0 && in_pos == 0) { if (strm->internal->allow_buf_error) ret = LZMA_BUF_ERROR; else strm->internal->allow_buf_error = true; } else { strm->internal->allow_buf_error = false; } break; case LZMA_TIMED_OUT: strm->internal->allow_buf_error = false; ret = LZMA_OK; break; case LZMA_STREAM_END: if (strm->internal->sequence == ISEQ_SYNC_FLUSH || strm->internal->sequence == ISEQ_FULL_FLUSH || strm->internal->sequence == ISEQ_FULL_BARRIER) strm->internal->sequence = ISEQ_RUN; else strm->internal->sequence = ISEQ_END; // Fall through case LZMA_NO_CHECK: case LZMA_UNSUPPORTED_CHECK: case LZMA_GET_CHECK: case LZMA_MEMLIMIT_ERROR: // Something else than LZMA_OK, but not a fatal error, // that is, coding may be continued (except if ISEQ_END). strm->internal->allow_buf_error = false; break; default: // All the other errors are fatal; coding cannot be continued. assert(ret != LZMA_BUF_ERROR); strm->internal->sequence = ISEQ_ERROR; break; } return ret; } extern LZMA_API(void) lzma_end(lzma_stream *strm) { if (strm != NULL && strm->internal != NULL) { lzma_next_end(&strm->internal->next, strm->allocator); lzma_free(strm->internal, strm->allocator); strm->internal = NULL; } return; } extern LZMA_API(void) lzma_get_progress(lzma_stream *strm, uint64_t *progress_in, uint64_t *progress_out) { if (strm->internal->next.get_progress != NULL) { strm->internal->next.get_progress(strm->internal->next.coder, progress_in, progress_out); } else { *progress_in = strm->total_in; *progress_out = strm->total_out; } return; } extern LZMA_API(lzma_check) lzma_get_check(const lzma_stream *strm) { // Return LZMA_CHECK_NONE if we cannot know the check type. // It's a bug in the application if this happens. if (strm->internal->next.get_check == NULL) return LZMA_CHECK_NONE; return strm->internal->next.get_check(strm->internal->next.coder); } extern LZMA_API(uint64_t) lzma_memusage(const lzma_stream *strm) { uint64_t memusage; uint64_t old_memlimit; if (strm == NULL || strm->internal == NULL || strm->internal->next.memconfig == NULL || strm->internal->next.memconfig( strm->internal->next.coder, &memusage, &old_memlimit, 0) != LZMA_OK) return 0; return memusage; } extern LZMA_API(uint64_t) lzma_memlimit_get(const lzma_stream *strm) { uint64_t old_memlimit; uint64_t memusage; if (strm == NULL || strm->internal == NULL || strm->internal->next.memconfig == NULL || strm->internal->next.memconfig( strm->internal->next.coder, &memusage, &old_memlimit, 0) != LZMA_OK) return 0; return old_memlimit; } extern LZMA_API(lzma_ret) lzma_memlimit_set(lzma_stream *strm, uint64_t new_memlimit) { // Dummy variables to simplify memconfig functions uint64_t old_memlimit; uint64_t memusage; if (strm == NULL || strm->internal == NULL || strm->internal->next.memconfig == NULL) return LZMA_PROG_ERROR; // Zero is a special value that cannot be used as an actual limit. // If 0 was specified, use 1 instead. if (new_memlimit == 0) new_memlimit = 1; return strm->internal->next.memconfig(strm->internal->next.coder, &memusage, &old_memlimit, new_memlimit); }
dandycheung/baulk
lib/archive/zstd/compress/zstd_compress_superblock.c
/* * Copyright (c) <NAME>, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /*-************************************* * Dependencies ***************************************/ #include "zstd_compress_superblock.h" #include "../common/zstd_internal.h" /* ZSTD_getSequenceLength */ #include "hist.h" /* HIST_countFast_wksp */ #include "zstd_compress_internal.h" /* ZSTD_[huf|fse|entropy]CTablesMetadata_t */ #include "zstd_compress_sequences.h" #include "zstd_compress_literals.h" /** ZSTD_compressSubBlock_literal() : * Compresses literals section for a sub-block. * When we have to write the Huffman table we will sometimes choose a header * size larger than necessary. This is because we have to pick the header size * before we know the table size + compressed size, so we have a bound on the * table size. If we guessed incorrectly, we fall back to uncompressed literals. * * We write the header when writeEntropy=1 and set entropyWritten=1 when we succeeded * in writing the header, otherwise it is set to 0. * * hufMetadata->hType has literals block type info. * If it is set_basic, all sub-blocks literals section will be Raw_Literals_Block. * If it is set_rle, all sub-blocks literals section will be RLE_Literals_Block. * If it is set_compressed, first sub-block's literals section will be Compressed_Literals_Block * If it is set_compressed, first sub-block's literals section will be Treeless_Literals_Block * and the following sub-blocks' literals sections will be Treeless_Literals_Block. * @return : compressed size of literals section of a sub-block * Or 0 if it unable to compress. * Or error code */ static size_t ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable, const ZSTD_hufCTablesMetadata_t* hufMetadata, const BYTE* literals, size_t litSize, void* dst, size_t dstSize, const int bmi2, int writeEntropy, int* entropyWritten) { size_t const header = writeEntropy ? 200 : 0; size_t const lhSize = 3 + (litSize >= (1 KB - header)) + (litSize >= (16 KB - header)); BYTE* const ostart = (BYTE*)dst; BYTE* const oend = ostart + dstSize; BYTE* op = ostart + lhSize; U32 const singleStream = lhSize == 3; symbolEncodingType_e hType = writeEntropy ? hufMetadata->hType : set_repeat; size_t cLitSize = 0; (void)bmi2; /* TODO bmi2... */ DEBUGLOG(5, "ZSTD_compressSubBlock_literal (litSize=%zu, lhSize=%zu, writeEntropy=%d)", litSize, lhSize, writeEntropy); *entropyWritten = 0; if (litSize == 0 || hufMetadata->hType == set_basic) { DEBUGLOG(5, "ZSTD_compressSubBlock_literal using raw literal"); return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize); } else if (hufMetadata->hType == set_rle) { DEBUGLOG(5, "ZSTD_compressSubBlock_literal using rle literal"); return ZSTD_compressRleLiteralsBlock(dst, dstSize, literals, litSize); } assert(litSize > 0); assert(hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat); if (writeEntropy && hufMetadata->hType == set_compressed) { ZSTD_memcpy(op, hufMetadata->hufDesBuffer, hufMetadata->hufDesSize); op += hufMetadata->hufDesSize; cLitSize += hufMetadata->hufDesSize; DEBUGLOG(5, "ZSTD_compressSubBlock_literal (hSize=%zu)", hufMetadata->hufDesSize); } /* TODO bmi2 */ { const size_t cSize = singleStream ? HUF_compress1X_usingCTable(op, oend-op, literals, litSize, hufTable) : HUF_compress4X_usingCTable(op, oend-op, literals, litSize, hufTable); op += cSize; cLitSize += cSize; if (cSize == 0 || ERR_isError(cSize)) { DEBUGLOG(5, "Failed to write entropy tables %s", ZSTD_getErrorName(cSize)); return 0; } /* If we expand and we aren't writing a header then emit uncompressed */ if (!writeEntropy && cLitSize >= litSize) { DEBUGLOG(5, "ZSTD_compressSubBlock_literal using raw literal because uncompressible"); return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize); } /* If we are writing headers then allow expansion that doesn't change our header size. */ if (lhSize < (size_t)(3 + (cLitSize >= 1 KB) + (cLitSize >= 16 KB))) { assert(cLitSize > litSize); DEBUGLOG(5, "Literals expanded beyond allowed header size"); return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize); } DEBUGLOG(5, "ZSTD_compressSubBlock_literal (cSize=%zu)", cSize); } /* Build header */ switch(lhSize) { case 3: /* 2 - 2 - 10 - 10 */ { U32 const lhc = hType + ((!singleStream) << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<14); MEM_writeLE24(ostart, lhc); break; } case 4: /* 2 - 2 - 14 - 14 */ { U32 const lhc = hType + (2 << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<18); MEM_writeLE32(ostart, lhc); break; } case 5: /* 2 - 2 - 18 - 18 */ { U32 const lhc = hType + (3 << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<22); MEM_writeLE32(ostart, lhc); ostart[4] = (BYTE)(cLitSize >> 10); break; } default: /* not possible : lhSize is {3,4,5} */ assert(0); } *entropyWritten = 1; DEBUGLOG(5, "Compressed literals: %u -> %u", (U32)litSize, (U32)(op-ostart)); return op-ostart; } static size_t ZSTD_seqDecompressedSize(seqStore_t const* seqStore, const seqDef* sequences, size_t nbSeq, size_t litSize, int lastSequence) { const seqDef* const sstart = sequences; const seqDef* const send = sequences + nbSeq; const seqDef* sp = sstart; size_t matchLengthSum = 0; size_t litLengthSum = 0; (void)(litLengthSum); /* suppress unused variable warning on some environments */ while (send-sp > 0) { ZSTD_sequenceLength const seqLen = ZSTD_getSequenceLength(seqStore, sp); litLengthSum += seqLen.litLength; matchLengthSum += seqLen.matchLength; sp++; } assert(litLengthSum <= litSize); if (!lastSequence) { assert(litLengthSum == litSize); } return matchLengthSum + litSize; } /** ZSTD_compressSubBlock_sequences() : * Compresses sequences section for a sub-block. * fseMetadata->llType, fseMetadata->ofType, and fseMetadata->mlType have * symbol compression modes for the super-block. * The first successfully compressed block will have these in its header. * We set entropyWritten=1 when we succeed in compressing the sequences. * The following sub-blocks will always have repeat mode. * @return : compressed size of sequences section of a sub-block * Or 0 if it is unable to compress * Or error code. */ static size_t ZSTD_compressSubBlock_sequences(const ZSTD_fseCTables_t* fseTables, const ZSTD_fseCTablesMetadata_t* fseMetadata, const seqDef* sequences, size_t nbSeq, const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode, const ZSTD_CCtx_params* cctxParams, void* dst, size_t dstCapacity, const int bmi2, int writeEntropy, int* entropyWritten) { const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN; BYTE* const ostart = (BYTE*)dst; BYTE* const oend = ostart + dstCapacity; BYTE* op = ostart; BYTE* seqHead; DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (nbSeq=%zu, writeEntropy=%d, longOffsets=%d)", nbSeq, writeEntropy, longOffsets); *entropyWritten = 0; /* Sequences Header */ RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/, dstSize_tooSmall, ""); if (nbSeq < 0x7F) *op++ = (BYTE)nbSeq; else if (nbSeq < LONGNBSEQ) op[0] = (BYTE)((nbSeq>>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2; else op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3; if (nbSeq==0) { return op - ostart; } /* seqHead : flags for FSE encoding type */ seqHead = op++; DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (seqHeadSize=%u)", (unsigned)(op-ostart)); if (writeEntropy) { const U32 LLtype = fseMetadata->llType; const U32 Offtype = fseMetadata->ofType; const U32 MLtype = fseMetadata->mlType; DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (fseTablesSize=%zu)", fseMetadata->fseTablesSize); *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2)); ZSTD_memcpy(op, fseMetadata->fseTablesBuffer, fseMetadata->fseTablesSize); op += fseMetadata->fseTablesSize; } else { const U32 repeat = set_repeat; *seqHead = (BYTE)((repeat<<6) + (repeat<<4) + (repeat<<2)); } { size_t const bitstreamSize = ZSTD_encodeSequences( op, oend - op, fseTables->matchlengthCTable, mlCode, fseTables->offcodeCTable, ofCode, fseTables->litlengthCTable, llCode, sequences, nbSeq, longOffsets, bmi2); FORWARD_IF_ERROR(bitstreamSize, "ZSTD_encodeSequences failed"); op += bitstreamSize; /* zstd versions <= 1.3.4 mistakenly report corruption when * FSE_readNCount() receives a buffer < 4 bytes. * Fixed by https://github.com/facebook/zstd/pull/1146. * This can happen when the last set_compressed table present is 2 * bytes and the bitstream is only one byte. * In this exceedingly rare case, we will simply emit an uncompressed * block, since it isn't worth optimizing. */ #ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION if (writeEntropy && fseMetadata->lastCountSize && fseMetadata->lastCountSize + bitstreamSize < 4) { /* NCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */ assert(fseMetadata->lastCountSize + bitstreamSize == 3); DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by " "emitting an uncompressed block."); return 0; } #endif DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (bitstreamSize=%zu)", bitstreamSize); } /* zstd versions <= 1.4.0 mistakenly report error when * sequences section body size is less than 3 bytes. * Fixed by https://github.com/facebook/zstd/pull/1664. * This can happen when the previous sequences section block is compressed * with rle mode and the current block's sequences section is compressed * with repeat mode where sequences section body size can be 1 byte. */ #ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION if (op-seqHead < 4) { DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.4.0 by emitting " "an uncompressed block when sequences are < 4 bytes"); return 0; } #endif *entropyWritten = 1; return op - ostart; } /** ZSTD_compressSubBlock() : * Compresses a single sub-block. * @return : compressed size of the sub-block * Or 0 if it failed to compress. */ static size_t ZSTD_compressSubBlock(const ZSTD_entropyCTables_t* entropy, const ZSTD_entropyCTablesMetadata_t* entropyMetadata, const seqDef* sequences, size_t nbSeq, const BYTE* literals, size_t litSize, const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode, const ZSTD_CCtx_params* cctxParams, void* dst, size_t dstCapacity, const int bmi2, int writeLitEntropy, int writeSeqEntropy, int* litEntropyWritten, int* seqEntropyWritten, U32 lastBlock) { BYTE* const ostart = (BYTE*)dst; BYTE* const oend = ostart + dstCapacity; BYTE* op = ostart + ZSTD_blockHeaderSize; DEBUGLOG(5, "ZSTD_compressSubBlock (litSize=%zu, nbSeq=%zu, writeLitEntropy=%d, writeSeqEntropy=%d, lastBlock=%d)", litSize, nbSeq, writeLitEntropy, writeSeqEntropy, lastBlock); { size_t cLitSize = ZSTD_compressSubBlock_literal((const HUF_CElt*)entropy->huf.CTable, &entropyMetadata->hufMetadata, literals, litSize, op, oend-op, bmi2, writeLitEntropy, litEntropyWritten); FORWARD_IF_ERROR(cLitSize, "ZSTD_compressSubBlock_literal failed"); if (cLitSize == 0) return 0; op += cLitSize; } { size_t cSeqSize = ZSTD_compressSubBlock_sequences(&entropy->fse, &entropyMetadata->fseMetadata, sequences, nbSeq, llCode, mlCode, ofCode, cctxParams, op, oend-op, bmi2, writeSeqEntropy, seqEntropyWritten); FORWARD_IF_ERROR(cSeqSize, "ZSTD_compressSubBlock_sequences failed"); if (cSeqSize == 0) return 0; op += cSeqSize; } /* Write block header */ { size_t cSize = (op-ostart)-ZSTD_blockHeaderSize; U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3); MEM_writeLE24(ostart, cBlockHeader24); } return op-ostart; } static size_t ZSTD_estimateSubBlockSize_literal(const BYTE* literals, size_t litSize, const ZSTD_hufCTables_t* huf, const ZSTD_hufCTablesMetadata_t* hufMetadata, void* workspace, size_t wkspSize, int writeEntropy) { unsigned* const countWksp = (unsigned*)workspace; unsigned maxSymbolValue = 255; size_t literalSectionHeaderSize = 3; /* Use hard coded size of 3 bytes */ if (hufMetadata->hType == set_basic) return litSize; else if (hufMetadata->hType == set_rle) return 1; else if (hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat) { size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)literals, litSize, workspace, wkspSize); if (ZSTD_isError(largest)) return litSize; { size_t cLitSizeEstimate = HUF_estimateCompressedSize((const HUF_CElt*)huf->CTable, countWksp, maxSymbolValue); if (writeEntropy) cLitSizeEstimate += hufMetadata->hufDesSize; return cLitSizeEstimate + literalSectionHeaderSize; } } assert(0); /* impossible */ return 0; } static size_t ZSTD_estimateSubBlockSize_symbolType(symbolEncodingType_e type, const BYTE* codeTable, unsigned maxCode, size_t nbSeq, const FSE_CTable* fseCTable, const U8* additionalBits, short const* defaultNorm, U32 defaultNormLog, U32 defaultMax, void* workspace, size_t wkspSize) { unsigned* const countWksp = (unsigned*)workspace; const BYTE* ctp = codeTable; const BYTE* const ctStart = ctp; const BYTE* const ctEnd = ctStart + nbSeq; size_t cSymbolTypeSizeEstimateInBits = 0; unsigned max = maxCode; HIST_countFast_wksp(countWksp, &max, codeTable, nbSeq, workspace, wkspSize); /* can't fail */ if (type == set_basic) { /* We selected this encoding type, so it must be valid. */ assert(max <= defaultMax); cSymbolTypeSizeEstimateInBits = max <= defaultMax ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, countWksp, max) : ERROR(GENERIC); } else if (type == set_rle) { cSymbolTypeSizeEstimateInBits = 0; } else if (type == set_compressed || type == set_repeat) { cSymbolTypeSizeEstimateInBits = ZSTD_fseBitCost(fseCTable, countWksp, max); } if (ZSTD_isError(cSymbolTypeSizeEstimateInBits)) return nbSeq * 10; while (ctp < ctEnd) { if (additionalBits) cSymbolTypeSizeEstimateInBits += additionalBits[*ctp]; else cSymbolTypeSizeEstimateInBits += *ctp; /* for offset, offset code is also the number of additional bits */ ctp++; } return cSymbolTypeSizeEstimateInBits / 8; } static size_t ZSTD_estimateSubBlockSize_sequences(const BYTE* ofCodeTable, const BYTE* llCodeTable, const BYTE* mlCodeTable, size_t nbSeq, const ZSTD_fseCTables_t* fseTables, const ZSTD_fseCTablesMetadata_t* fseMetadata, void* workspace, size_t wkspSize, int writeEntropy) { size_t const sequencesSectionHeaderSize = 3; /* Use hard coded size of 3 bytes */ size_t cSeqSizeEstimate = 0; if (nbSeq == 0) return sequencesSectionHeaderSize; cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->ofType, ofCodeTable, MaxOff, nbSeq, fseTables->offcodeCTable, NULL, OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff, workspace, wkspSize); cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->llType, llCodeTable, MaxLL, nbSeq, fseTables->litlengthCTable, LL_bits, LL_defaultNorm, LL_defaultNormLog, MaxLL, workspace, wkspSize); cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->mlType, mlCodeTable, MaxML, nbSeq, fseTables->matchlengthCTable, ML_bits, ML_defaultNorm, ML_defaultNormLog, MaxML, workspace, wkspSize); if (writeEntropy) cSeqSizeEstimate += fseMetadata->fseTablesSize; return cSeqSizeEstimate + sequencesSectionHeaderSize; } static size_t ZSTD_estimateSubBlockSize(const BYTE* literals, size_t litSize, const BYTE* ofCodeTable, const BYTE* llCodeTable, const BYTE* mlCodeTable, size_t nbSeq, const ZSTD_entropyCTables_t* entropy, const ZSTD_entropyCTablesMetadata_t* entropyMetadata, void* workspace, size_t wkspSize, int writeLitEntropy, int writeSeqEntropy) { size_t cSizeEstimate = 0; cSizeEstimate += ZSTD_estimateSubBlockSize_literal(literals, litSize, &entropy->huf, &entropyMetadata->hufMetadata, workspace, wkspSize, writeLitEntropy); cSizeEstimate += ZSTD_estimateSubBlockSize_sequences(ofCodeTable, llCodeTable, mlCodeTable, nbSeq, &entropy->fse, &entropyMetadata->fseMetadata, workspace, wkspSize, writeSeqEntropy); return cSizeEstimate + ZSTD_blockHeaderSize; } static int ZSTD_needSequenceEntropyTables(ZSTD_fseCTablesMetadata_t const* fseMetadata) { if (fseMetadata->llType == set_compressed || fseMetadata->llType == set_rle) return 1; if (fseMetadata->mlType == set_compressed || fseMetadata->mlType == set_rle) return 1; if (fseMetadata->ofType == set_compressed || fseMetadata->ofType == set_rle) return 1; return 0; } /** ZSTD_compressSubBlock_multi() : * Breaks super-block into multiple sub-blocks and compresses them. * Entropy will be written to the first block. * The following blocks will use repeat mode to compress. * All sub-blocks are compressed blocks (no raw or rle blocks). * @return : compressed size of the super block (which is multiple ZSTD blocks) * Or 0 if it failed to compress. */ static size_t ZSTD_compressSubBlock_multi(const seqStore_t* seqStorePtr, const ZSTD_compressedBlockState_t* prevCBlock, ZSTD_compressedBlockState_t* nextCBlock, const ZSTD_entropyCTablesMetadata_t* entropyMetadata, const ZSTD_CCtx_params* cctxParams, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const int bmi2, U32 lastBlock, void* workspace, size_t wkspSize) { const seqDef* const sstart = seqStorePtr->sequencesStart; const seqDef* const send = seqStorePtr->sequences; const seqDef* sp = sstart; const BYTE* const lstart = seqStorePtr->litStart; const BYTE* const lend = seqStorePtr->lit; const BYTE* lp = lstart; BYTE const* ip = (BYTE const*)src; BYTE const* const iend = ip + srcSize; BYTE* const ostart = (BYTE*)dst; BYTE* const oend = ostart + dstCapacity; BYTE* op = ostart; const BYTE* llCodePtr = seqStorePtr->llCode; const BYTE* mlCodePtr = seqStorePtr->mlCode; const BYTE* ofCodePtr = seqStorePtr->ofCode; size_t targetCBlockSize = cctxParams->targetCBlockSize; size_t litSize, seqCount; int writeLitEntropy = entropyMetadata->hufMetadata.hType == set_compressed; int writeSeqEntropy = 1; int lastSequence = 0; DEBUGLOG(5, "ZSTD_compressSubBlock_multi (litSize=%u, nbSeq=%u)", (unsigned)(lend-lp), (unsigned)(send-sstart)); litSize = 0; seqCount = 0; do { size_t cBlockSizeEstimate = 0; if (sstart == send) { lastSequence = 1; } else { const seqDef* const sequence = sp + seqCount; lastSequence = sequence == send - 1; litSize += ZSTD_getSequenceLength(seqStorePtr, sequence).litLength; seqCount++; } if (lastSequence) { assert(lp <= lend); assert(litSize <= (size_t)(lend - lp)); litSize = (size_t)(lend - lp); } /* I think there is an optimization opportunity here. * Calling ZSTD_estimateSubBlockSize for every sequence can be wasteful * since it recalculates estimate from scratch. * For example, it would recount literal distribution and symbol codes every time. */ cBlockSizeEstimate = ZSTD_estimateSubBlockSize(lp, litSize, ofCodePtr, llCodePtr, mlCodePtr, seqCount, &nextCBlock->entropy, entropyMetadata, workspace, wkspSize, writeLitEntropy, writeSeqEntropy); if (cBlockSizeEstimate > targetCBlockSize || lastSequence) { int litEntropyWritten = 0; int seqEntropyWritten = 0; const size_t decompressedSize = ZSTD_seqDecompressedSize(seqStorePtr, sp, seqCount, litSize, lastSequence); const size_t cSize = ZSTD_compressSubBlock(&nextCBlock->entropy, entropyMetadata, sp, seqCount, lp, litSize, llCodePtr, mlCodePtr, ofCodePtr, cctxParams, op, oend-op, bmi2, writeLitEntropy, writeSeqEntropy, &litEntropyWritten, &seqEntropyWritten, lastBlock && lastSequence); FORWARD_IF_ERROR(cSize, "ZSTD_compressSubBlock failed"); if (cSize > 0 && cSize < decompressedSize) { DEBUGLOG(5, "Committed the sub-block"); assert(ip + decompressedSize <= iend); ip += decompressedSize; sp += seqCount; lp += litSize; op += cSize; llCodePtr += seqCount; mlCodePtr += seqCount; ofCodePtr += seqCount; litSize = 0; seqCount = 0; /* Entropy only needs to be written once */ if (litEntropyWritten) { writeLitEntropy = 0; } if (seqEntropyWritten) { writeSeqEntropy = 0; } } } } while (!lastSequence); if (writeLitEntropy) { DEBUGLOG(5, "ZSTD_compressSubBlock_multi has literal entropy tables unwritten"); ZSTD_memcpy(&nextCBlock->entropy.huf, &prevCBlock->entropy.huf, sizeof(prevCBlock->entropy.huf)); } if (writeSeqEntropy && ZSTD_needSequenceEntropyTables(&entropyMetadata->fseMetadata)) { /* If we haven't written our entropy tables, then we've violated our contract and * must emit an uncompressed block. */ DEBUGLOG(5, "ZSTD_compressSubBlock_multi has sequence entropy tables unwritten"); return 0; } if (ip < iend) { size_t const cSize = ZSTD_noCompressBlock(op, oend - op, ip, iend - ip, lastBlock); DEBUGLOG(5, "ZSTD_compressSubBlock_multi last sub-block uncompressed, %zu bytes", (size_t)(iend - ip)); FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed"); assert(cSize != 0); op += cSize; /* We have to regenerate the repcodes because we've skipped some sequences */ if (sp < send) { seqDef const* seq; repcodes_t rep; ZSTD_memcpy(&rep, prevCBlock->rep, sizeof(rep)); for (seq = sstart; seq < sp; ++seq) { ZSTD_updateRep(rep.rep, seq->offBase - 1, ZSTD_getSequenceLength(seqStorePtr, seq).litLength == 0); } ZSTD_memcpy(nextCBlock->rep, &rep, sizeof(rep)); } } DEBUGLOG(5, "ZSTD_compressSubBlock_multi compressed"); return op-ostart; } size_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc, void* dst, size_t dstCapacity, void const* src, size_t srcSize, unsigned lastBlock) { ZSTD_entropyCTablesMetadata_t entropyMetadata; FORWARD_IF_ERROR(ZSTD_buildBlockEntropyStats(&zc->seqStore, &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy, &zc->appliedParams, &entropyMetadata, zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */), ""); return ZSTD_compressSubBlock_multi(&zc->seqStore, zc->blockState.prevCBlock, zc->blockState.nextCBlock, &entropyMetadata, &zc->appliedParams, dst, dstCapacity, src, srcSize, zc->bmi2, lastBlock, zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */); }
dandycheung/baulk
lib/archive/liblzma/common/block_util.c
/////////////////////////////////////////////////////////////////////////////// // /// \file block_util.c /// \brief Utility functions to handle lzma_block // // Author: <NAME> // // This file has been put into the public domain. // You can do whatever you want with this file. // /////////////////////////////////////////////////////////////////////////////// #include "common.h" #include "index.h" extern LZMA_API(lzma_ret) lzma_block_compressed_size(lzma_block *block, lzma_vli unpadded_size) { // Validate everything but Uncompressed Size and filters. if (lzma_block_unpadded_size(block) == 0) return LZMA_PROG_ERROR; const uint32_t container_size = block->header_size + lzma_check_size(block->check); // Validate that Compressed Size will be greater than zero. if (unpadded_size <= container_size) return LZMA_DATA_ERROR; // Calculate what Compressed Size is supposed to be. // If Compressed Size was present in Block Header, // compare that the new value matches it. const lzma_vli compressed_size = unpadded_size - container_size; if (block->compressed_size != LZMA_VLI_UNKNOWN && block->compressed_size != compressed_size) return LZMA_DATA_ERROR; block->compressed_size = compressed_size; return LZMA_OK; } extern LZMA_API(lzma_vli) lzma_block_unpadded_size(const lzma_block *block) { // Validate the values that we are interested in i.e. all but // Uncompressed Size and the filters. // // NOTE: This function is used for validation too, so it is // essential that these checks are always done even if // Compressed Size is unknown. if (block == NULL || block->version > 1 || block->header_size < LZMA_BLOCK_HEADER_SIZE_MIN || block->header_size > LZMA_BLOCK_HEADER_SIZE_MAX || (block->header_size & 3) || !lzma_vli_is_valid(block->compressed_size) || block->compressed_size == 0 || (unsigned int)(block->check) > LZMA_CHECK_ID_MAX) return 0; // If Compressed Size is unknown, return that we cannot know // size of the Block either. if (block->compressed_size == LZMA_VLI_UNKNOWN) return LZMA_VLI_UNKNOWN; // Calculate Unpadded Size and validate it. const lzma_vli unpadded_size = block->compressed_size + block->header_size + lzma_check_size(block->check); assert(unpadded_size >= UNPADDED_SIZE_MIN); if (unpadded_size > UNPADDED_SIZE_MAX) return 0; return unpadded_size; } extern LZMA_API(lzma_vli) lzma_block_total_size(const lzma_block *block) { lzma_vli unpadded_size = lzma_block_unpadded_size(block); if (unpadded_size != LZMA_VLI_UNKNOWN) unpadded_size = vli_ceil4(unpadded_size); return unpadded_size; }
dandycheung/baulk
lib/archive/liblzma/check/crc32_table.c
/////////////////////////////////////////////////////////////////////////////// // /// \file crc32_table.c /// \brief Precalculated CRC32 table with correct endianness // // Author: <NAME> // // This file has been put into the public domain. // You can do whatever you want with this file. // /////////////////////////////////////////////////////////////////////////////// #include "common.h" // Having the declaration here silences clang -Wmissing-variable-declarations. extern const uint32_t lzma_crc32_table[8][256]; #ifdef WORDS_BIGENDIAN # include "crc32_table_be.h" #else # include "crc32_table_le.h" #endif
dandycheung/baulk
lib/archive/zstd/common/portability_macros.h
/* * Copyright (c) Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTD_PORTABILITY_MACROS_H #define ZSTD_PORTABILITY_MACROS_H /** * This header file contains macro defintions to support portability. * This header is shared between C and ASM code, so it MUST only * contain macro definitions. It MUST not contain any C code. * * This header ONLY defines macros to detect platforms/feature support. * */ /* compat. with non-clang compilers */ #ifndef __has_attribute #define __has_attribute(x) 0 #endif /* compat. with non-clang compilers */ #ifndef __has_builtin # define __has_builtin(x) 0 #endif /* compat. with non-clang compilers */ #ifndef __has_feature # define __has_feature(x) 0 #endif /* detects whether we are being compiled under msan */ #ifndef ZSTD_MEMORY_SANITIZER # if __has_feature(memory_sanitizer) # define ZSTD_MEMORY_SANITIZER 1 # else # define ZSTD_MEMORY_SANITIZER 0 # endif #endif /* detects whether we are being compiled under asan */ #ifndef ZSTD_ADDRESS_SANITIZER # if __has_feature(address_sanitizer) # define ZSTD_ADDRESS_SANITIZER 1 # elif defined(__SANITIZE_ADDRESS__) # define ZSTD_ADDRESS_SANITIZER 1 # else # define ZSTD_ADDRESS_SANITIZER 0 # endif #endif /* detects whether we are being compiled under dfsan */ #ifndef ZSTD_DATAFLOW_SANITIZER # if __has_feature(dataflow_sanitizer) # define ZSTD_DATAFLOW_SANITIZER 1 # else # define ZSTD_DATAFLOW_SANITIZER 0 # endif #endif /* Mark the internal assembly functions as hidden */ #ifdef __ELF__ # define ZSTD_HIDE_ASM_FUNCTION(func) .hidden func #else # define ZSTD_HIDE_ASM_FUNCTION(func) #endif /* Enable runtime BMI2 dispatch based on the CPU. * Enabled for clang & gcc >=4.8 on x86 when BMI2 isn't enabled by default. */ #ifndef DYNAMIC_BMI2 #if ((defined(__clang__) && __has_attribute(__target__)) \ || (defined(__GNUC__) \ && (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)))) \ && (defined(__x86_64__) || defined(_M_X64)) \ && !defined(__BMI2__) # define DYNAMIC_BMI2 1 #else # define DYNAMIC_BMI2 0 #endif #endif /** * Only enable assembly for GNUC comptabile compilers, * because other platforms may not support GAS assembly syntax. * * Only enable assembly for Linux / MacOS, other platforms may * work, but they haven't been tested. This could likely be * extended to BSD systems. * * Disable assembly when MSAN is enabled, because MSAN requires * 100% of code to be instrumented to work. */ #if defined(__GNUC__) # if defined(__linux__) || defined(__linux) || defined(__APPLE__) # if ZSTD_MEMORY_SANITIZER # define ZSTD_ASM_SUPPORTED 0 # elif ZSTD_DATAFLOW_SANITIZER # define ZSTD_ASM_SUPPORTED 0 # else # define ZSTD_ASM_SUPPORTED 1 # endif # else # define ZSTD_ASM_SUPPORTED 0 # endif #else # define ZSTD_ASM_SUPPORTED 0 #endif /** * Determines whether we should enable assembly for x86-64 * with BMI2. * * Enable if all of the following conditions hold: * - ASM hasn't been explicitly disabled by defining ZSTD_DISABLE_ASM * - Assembly is supported * - We are compiling for x86-64 and either: * - DYNAMIC_BMI2 is enabled * - BMI2 is supported at compile time */ #if !defined(ZSTD_DISABLE_ASM) && \ ZSTD_ASM_SUPPORTED && \ defined(__x86_64__) && \ (DYNAMIC_BMI2 || defined(__BMI2__)) # define ZSTD_ENABLE_ASM_X86_64_BMI2 1 #else # define ZSTD_ENABLE_ASM_X86_64_BMI2 0 #endif #endif /* ZSTD_PORTABILITY_MACROS_H */
dandycheung/baulk
tools/unscrew/resource.h
/// #ifndef BAULK_UNSCREW_RESOURCE_H #define BAULK_UNSCREW_RESOURCE_H #define ICON_BAULK_APP 256 #define IDC_PROGRESS_BAR 1000 #endif
dandycheung/baulk
lib/archive/liblzma/lzma/lzma_common.h
/////////////////////////////////////////////////////////////////////////////// // /// \file lzma_common.h /// \brief Private definitions common to LZMA encoder and decoder /// // Authors: <NAME> // <NAME> // // This file has been put into the public domain. // You can do whatever you want with this file. // /////////////////////////////////////////////////////////////////////////////// #ifndef LZMA_LZMA_COMMON_H #define LZMA_LZMA_COMMON_H #include "common.h" #include "range_common.h" /////////////////// // Miscellaneous // /////////////////// /// Maximum number of position states. A position state is the lowest pos bits /// number of bits of the current uncompressed offset. In some places there /// are different sets of probabilities for different pos states. #define POS_STATES_MAX (1 << LZMA_PB_MAX) /// Validates lc, lp, and pb. static inline bool is_lclppb_valid(const lzma_options_lzma *options) { return options->lc <= LZMA_LCLP_MAX && options->lp <= LZMA_LCLP_MAX && options->lc + options->lp <= LZMA_LCLP_MAX && options->pb <= LZMA_PB_MAX; } /////////// // State // /////////// /// This enum is used to track which events have occurred most recently and /// in which order. This information is used to predict the next event. /// /// Events: /// - Literal: One 8-bit byte /// - Match: Repeat a chunk of data at some distance /// - Long repeat: Multi-byte match at a recently seen distance /// - Short repeat: One-byte repeat at a recently seen distance /// /// The event names are in from STATE_oldest_older_previous. REP means /// either short or long repeated match, and NONLIT means any non-literal. typedef enum { STATE_LIT_LIT, STATE_MATCH_LIT_LIT, STATE_REP_LIT_LIT, STATE_SHORTREP_LIT_LIT, STATE_MATCH_LIT, STATE_REP_LIT, STATE_SHORTREP_LIT, STATE_LIT_MATCH, STATE_LIT_LONGREP, STATE_LIT_SHORTREP, STATE_NONLIT_MATCH, STATE_NONLIT_REP, } lzma_lzma_state; /// Total number of states #define STATES 12 /// The lowest 7 states indicate that the previous state was a literal. #define LIT_STATES 7 /// Indicate that the latest state was a literal. #define update_literal(state) \ state = ((state) <= STATE_SHORTREP_LIT_LIT \ ? STATE_LIT_LIT \ : ((state) <= STATE_LIT_SHORTREP \ ? (state) - 3 \ : (state) - 6)) /// Indicate that the latest state was a match. #define update_match(state) \ state = ((state) < LIT_STATES ? STATE_LIT_MATCH : STATE_NONLIT_MATCH) /// Indicate that the latest state was a long repeated match. #define update_long_rep(state) \ state = ((state) < LIT_STATES ? STATE_LIT_LONGREP : STATE_NONLIT_REP) /// Indicate that the latest state was a short match. #define update_short_rep(state) \ state = ((state) < LIT_STATES ? STATE_LIT_SHORTREP : STATE_NONLIT_REP) /// Test if the previous state was a literal. #define is_literal_state(state) \ ((state) < LIT_STATES) ///////////// // Literal // ///////////// /// Each literal coder is divided in three sections: /// - 0x001-0x0FF: Without match byte /// - 0x101-0x1FF: With match byte; match bit is 0 /// - 0x201-0x2FF: With match byte; match bit is 1 /// /// Match byte is used when the previous LZMA symbol was something else than /// a literal (that is, it was some kind of match). #define LITERAL_CODER_SIZE 0x300 /// Maximum number of literal coders #define LITERAL_CODERS_MAX (1 << LZMA_LCLP_MAX) /// Locate the literal coder for the next literal byte. The choice depends on /// - the lowest literal_pos_bits bits of the position of the current /// byte; and /// - the highest literal_context_bits bits of the previous byte. #define literal_subcoder(probs, lc, lp_mask, pos, prev_byte) \ ((probs)[(((pos) & (lp_mask)) << (lc)) \ + ((uint32_t)(prev_byte) >> (8U - (lc)))]) static inline void literal_init(probability (*probs)[LITERAL_CODER_SIZE], uint32_t lc, uint32_t lp) { assert(lc + lp <= LZMA_LCLP_MAX); const uint32_t coders = 1U << (lc + lp); for (uint32_t i = 0; i < coders; ++i) for (uint32_t j = 0; j < LITERAL_CODER_SIZE; ++j) bit_reset(probs[i][j]); return; } ////////////////// // Match length // ////////////////// // Minimum length of a match is two bytes. #define MATCH_LEN_MIN 2 // Match length is encoded with 4, 5, or 10 bits. // // Length Bits // 2-9 4 = Choice=0 + 3 bits // 10-17 5 = Choice=1 + Choice2=0 + 3 bits // 18-273 10 = Choice=1 + Choice2=1 + 8 bits #define LEN_LOW_BITS 3 #define LEN_LOW_SYMBOLS (1 << LEN_LOW_BITS) #define LEN_MID_BITS 3 #define LEN_MID_SYMBOLS (1 << LEN_MID_BITS) #define LEN_HIGH_BITS 8 #define LEN_HIGH_SYMBOLS (1 << LEN_HIGH_BITS) #define LEN_SYMBOLS (LEN_LOW_SYMBOLS + LEN_MID_SYMBOLS + LEN_HIGH_SYMBOLS) // Maximum length of a match is 273 which is a result of the encoding // described above. #define MATCH_LEN_MAX (MATCH_LEN_MIN + LEN_SYMBOLS - 1) //////////////////// // Match distance // //////////////////// // Different sets of probabilities are used for match distances that have very // short match length: Lengths of 2, 3, and 4 bytes have a separate set of // probabilities for each length. The matches with longer length use a shared // set of probabilities. #define DIST_STATES 4 // Macro to get the index of the appropriate probability array. #define get_dist_state(len) \ ((len) < DIST_STATES + MATCH_LEN_MIN \ ? (len) - MATCH_LEN_MIN \ : DIST_STATES - 1) // The highest two bits of a match distance (distance slot) are encoded // using six bits. See fastpos.h for more explanation. #define DIST_SLOT_BITS 6 #define DIST_SLOTS (1 << DIST_SLOT_BITS) // Match distances up to 127 are fully encoded using probabilities. Since // the highest two bits (distance slot) are always encoded using six bits, // the distances 0-3 don't need any additional bits to encode, since the // distance slot itself is the same as the actual distance. DIST_MODEL_START // indicates the first distance slot where at least one additional bit is // needed. #define DIST_MODEL_START 4 // Match distances greater than 127 are encoded in three pieces: // - distance slot: the highest two bits // - direct bits: 2-26 bits below the highest two bits // - alignment bits: four lowest bits // // Direct bits don't use any probabilities. // // The distance slot value of 14 is for distances 128-191 (see the table in // fastpos.h to understand why). #define DIST_MODEL_END 14 // Distance slots that indicate a distance <= 127. #define FULL_DISTANCES_BITS (DIST_MODEL_END / 2) #define FULL_DISTANCES (1 << FULL_DISTANCES_BITS) // For match distances greater than 127, only the highest two bits and the // lowest four bits (alignment) is encoded using probabilities. #define ALIGN_BITS 4 #define ALIGN_SIZE (1 << ALIGN_BITS) #define ALIGN_MASK (ALIGN_SIZE - 1) // LZMA remembers the four most recent match distances. Reusing these distances // tends to take less space than re-encoding the actual distance value. #define REPS 4 #endif
dandycheung/baulk
lib/archive/zlib/crc32.c
<reponame>dandycheung/baulk /* crc32.c -- compute the CRC-32 of a data stream * Copyright (C) 1995-2022 <NAME> * For conditions of distribution and use, see copyright notice in zlib.h * * This interleaved implementation of a CRC makes use of pipelined multiple * arithmetic-logic units, commonly found in modern CPU cores. It is due to * Kadatch and Jenkins (2010). See doc/crc-doc.1.0.pdf in this distribution. */ /* @(#) $Id$ */ /* Note on the use of DYNAMIC_CRC_TABLE: there is no mutex or semaphore protection on the static variables used to control the first-use generation of the crc tables. Therefore, if you #define DYNAMIC_CRC_TABLE, you should first call get_crc_table() to initialize the tables before allowing more than one thread to use crc32(). MAKECRCH can be #defined to write out crc32.h. A main() routine is also produced, so that this one source file can be compiled to an executable. */ #ifdef MAKECRCH # include <stdio.h> # ifndef DYNAMIC_CRC_TABLE # define DYNAMIC_CRC_TABLE # endif /* !DYNAMIC_CRC_TABLE */ #endif /* MAKECRCH */ #include "deflate.h" #include "cpu_features.h" #include "zutil.h" /* for Z_U4, Z_U8, z_crc_t, and FAR definitions */ #if defined(CRC32_SIMD_SSE42_PCLMUL) || defined(CRC32_ARMV8_CRC32) #include "crc32_simd.h" #endif /* A CRC of a message is computed on N braids of words in the message, where each word consists of W bytes (4 or 8). If N is 3, for example, then three running sparse CRCs are calculated respectively on each braid, at these indices in the array of words: 0, 3, 6, ..., 1, 4, 7, ..., and 2, 5, 8, ... This is done starting at a word boundary, and continues until as many blocks of N * W bytes as are available have been processed. The results are combined into a single CRC at the end. For this code, N must be in the range 1..6 and W must be 4 or 8. The upper limit on N can be increased if desired by adding more #if blocks, extending the patterns apparent in the code. In addition, crc32.h would need to be regenerated, if the maximum N value is increased. N and W are chosen empirically by benchmarking the execution time on a given processor. The choices for N and W below were based on testing on Intel Kaby Lake i7, AMD Ryzen 7, ARM Cortex-A57, Sparc64-VII, PowerPC POWER9, and MIPS64 Octeon II processors. The Intel, AMD, and ARM processors were all fastest with N=5, W=8. The Sparc, PowerPC, and MIPS64 were all fastest at N=5, W=4. They were all tested with either gcc or clang, all using the -O3 optimization level. Your mileage may vary. */ /* Define N */ #ifdef Z_TESTN # define N Z_TESTN #else # define N 5 #endif #if N < 1 || N > 6 # error N must be in 1..6 #endif /* z_crc_t must be at least 32 bits. z_word_t must be at least as long as z_crc_t. It is assumed here that z_word_t is either 32 bits or 64 bits, and that bytes are eight bits. */ /* Define W and the associated z_word_t type. If W is not defined, then a braided calculation is not used, and the associated tables and code are not compiled. */ #ifdef Z_TESTW # if Z_TESTW-1 != -1 # define W Z_TESTW # endif #else # ifdef MAKECRCH # define W 8 /* required for MAKECRCH */ # else # if defined(__x86_64__) || defined(__aarch64__) # define W 8 # else # define W 4 # endif # endif #endif #ifdef W # if W == 8 && defined(Z_U8) typedef Z_U8 z_word_t; # elif defined(Z_U4) # undef W # define W 4 typedef Z_U4 z_word_t; # else # undef W # endif #endif /* Local functions. */ local z_crc_t multmodp OF((z_crc_t a, z_crc_t b)); local z_crc_t x2nmodp OF((z_off64_t n, unsigned k)); /* If available, use the ARM processor CRC32 instruction. */ #if defined(__aarch64__) && defined(__ARM_FEATURE_CRC32) && W == 8 \ && defined(USE_CANONICAL_ARMV8_CRC32) # define ARMCRC32_CANONICAL_ZLIB #endif #if defined(W) && (!defined(ARMCRC32_CANONICAL_ZLIB) || defined(DYNAMIC_CRC_TABLE)) /* Swap the bytes in a z_word_t to convert between little and big endian. Any self-respecting compiler will optimize this to a single machine byte-swap instruction, if one is available. This assumes that word_t is either 32 bits or 64 bits. */ local z_word_t byte_swap(word) z_word_t word; { # if W == 8 return (word & 0xff00000000000000) >> 56 | (word & 0xff000000000000) >> 40 | (word & 0xff0000000000) >> 24 | (word & 0xff00000000) >> 8 | (word & 0xff000000) << 8 | (word & 0xff0000) << 24 | (word & 0xff00) << 40 | (word & 0xff) << 56; # else /* W == 4 */ return (word & 0xff000000) >> 24 | (word & 0xff0000) >> 8 | (word & 0xff00) << 8 | (word & 0xff) << 24; # endif } #endif /* CRC polynomial. */ #define POLY 0xedb88320 /* p(x) reflected, with x^32 implied */ #ifdef DYNAMIC_CRC_TABLE local z_crc_t FAR crc_table[256]; local z_crc_t FAR x2n_table[32]; local void make_crc_table OF((void)); #ifdef W local z_word_t FAR crc_big_table[256]; local z_crc_t FAR crc_braid_table[W][256]; local z_word_t FAR crc_braid_big_table[W][256]; local void braid OF((z_crc_t [][256], z_word_t [][256], int, int)); #endif #ifdef MAKECRCH local void write_table OF((FILE *, const z_crc_t FAR *, int)); local void write_table32hi OF((FILE *, const z_word_t FAR *, int)); local void write_table64 OF((FILE *, const z_word_t FAR *, int)); #endif /* MAKECRCH */ /* Define a once() function depending on the availability of atomics. If this is compiled with DYNAMIC_CRC_TABLE defined, and if CRCs will be computed in multiple threads, and if atomics are not available, then get_crc_table() must be called to initialize the tables and must return before any threads are allowed to compute or combine CRCs. */ /* Definition of once functionality. */ typedef struct once_s once_t; local void once OF((once_t *, void (*)(void))); /* Check for the availability of atomics. */ #if defined(__STDC__) && __STDC_VERSION__ >= 201112L && \ !defined(__STDC_NO_ATOMICS__) #include <stdatomic.h> /* Structure for once(), which must be initialized with ONCE_INIT. */ struct once_s { atomic_flag begun; atomic_int done; }; #define ONCE_INIT {ATOMIC_FLAG_INIT, 0} /* Run the provided init() function exactly once, even if multiple threads invoke once() at the same time. The state must be a once_t initialized with ONCE_INIT. */ local void once(state, init) once_t *state; void (*init)(void); { if (!atomic_load(&state->done)) { if (atomic_flag_test_and_set(&state->begun)) while (!atomic_load(&state->done)) ; else { init(); atomic_store(&state->done, 1); } } } #else /* no atomics */ /* Structure for once(), which must be initialized with ONCE_INIT. */ struct once_s { volatile int begun; volatile int done; }; #define ONCE_INIT {0, 0} /* Test and set. Alas, not atomic, but tries to minimize the period of vulnerability. */ local int test_and_set OF((int volatile *)); local int test_and_set(flag) int volatile *flag; { int was; was = *flag; *flag = 1; return was; } /* Run the provided init() function once. This is not thread-safe. */ local void once(state, init) once_t *state; void (*init)(void); { if (!state->done) { if (test_and_set(&state->begun)) while (!state->done) ; else { init(); state->done = 1; } } } #endif /* State for once(). */ local once_t made = ONCE_INIT; /* Generate tables for a byte-wise 32-bit CRC calculation on the polynomial: x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x+1. Polynomials over GF(2) are represented in binary, one bit per coefficient, with the lowest powers in the most significant bit. Then adding polynomials is just exclusive-or, and multiplying a polynomial by x is a right shift by one. If we call the above polynomial p, and represent a byte as the polynomial q, also with the lowest power in the most significant bit (so the byte 0xb1 is the polynomial x^7+x^3+x^2+1), then the CRC is (q*x^32) mod p, where a mod b means the remainder after dividing a by b. This calculation is done using the shift-register method of multiplying and taking the remainder. The register is initialized to zero, and for each incoming bit, x^32 is added mod p to the register if the bit is a one (where x^32 mod p is p+x^32 = x^26+...+1), and the register is multiplied mod p by x (which is shifting right by one and adding x^32 mod p if the bit shifted out is a one). We start with the highest power (least significant bit) of q and repeat for all eight bits of q. The table is simply the CRC of all possible eight bit values. This is all the information needed to generate CRCs on data a byte at a time for all combinations of CRC register values and incoming bytes. */ local void make_crc_table() { unsigned i, j, n; z_crc_t p; /* initialize the CRC of bytes tables */ for (i = 0; i < 256; i++) { p = i; for (j = 0; j < 8; j++) p = p & 1 ? (p >> 1) ^ POLY : p >> 1; crc_table[i] = p; #ifdef W crc_big_table[i] = byte_swap(p); #endif } /* initialize the x^2^n mod p(x) table */ p = (z_crc_t)1 << 30; /* x^1 */ x2n_table[0] = p; for (n = 1; n < 32; n++) x2n_table[n] = p = multmodp(p, p); #ifdef W /* initialize the braiding tables -- needs x2n_table[] */ braid(crc_braid_table, crc_braid_big_table, N, W); #endif #ifdef MAKECRCH { /* The crc32.h header file contains tables for both 32-bit and 64-bit z_word_t's, and so requires a 64-bit type be available. In that case, z_word_t must be defined to be 64-bits. This code then also generates and writes out the tables for the case that z_word_t is 32 bits. */ #if !defined(W) || W != 8 # error Need a 64-bit integer type in order to generate crc32.h. #endif FILE *out; int k, n; z_crc_t ltl[8][256]; z_word_t big[8][256]; out = fopen("crc32.h", "w"); if (out == NULL) return; /* write out little-endian CRC table to crc32.h */ fprintf(out, "/* crc32.h -- tables for rapid CRC calculation\n" " * Generated automatically by crc32.c\n */\n" "\n" "local const z_crc_t FAR crc_table[] = {\n" " "); write_table(out, crc_table, 256); fprintf(out, "};\n"); /* write out big-endian CRC table for 64-bit z_word_t to crc32.h */ fprintf(out, "\n" "#ifdef W\n" "\n" "#if W == 8\n" "\n" "local const z_word_t FAR crc_big_table[] = {\n" " "); write_table64(out, crc_big_table, 256); fprintf(out, "};\n"); /* write out big-endian CRC table for 32-bit z_word_t to crc32.h */ fprintf(out, "\n" "#else /* W == 4 */\n" "\n" "local const z_word_t FAR crc_big_table[] = {\n" " "); write_table32hi(out, crc_big_table, 256); fprintf(out, "};\n" "\n" "#endif\n"); /* write out braid tables for each value of N */ for (n = 1; n <= 6; n++) { fprintf(out, "\n" "#if N == %d\n", n); /* compute braid tables for this N and 64-bit word_t */ braid(ltl, big, n, 8); /* write out braid tables for 64-bit z_word_t to crc32.h */ fprintf(out, "\n" "#if W == 8\n" "\n" "local const z_crc_t FAR crc_braid_table[][256] = {\n"); for (k = 0; k < 8; k++) { fprintf(out, " {"); write_table(out, ltl[k], 256); fprintf(out, "}%s", k < 7 ? ",\n" : ""); } fprintf(out, "};\n" "\n" "local const z_word_t FAR crc_braid_big_table[][256] = {\n"); for (k = 0; k < 8; k++) { fprintf(out, " {"); write_table64(out, big[k], 256); fprintf(out, "}%s", k < 7 ? ",\n" : ""); } fprintf(out, "};\n"); /* compute braid tables for this N and 32-bit word_t */ braid(ltl, big, n, 4); /* write out braid tables for 32-bit z_word_t to crc32.h */ fprintf(out, "\n" "#else /* W == 4 */\n" "\n" "local const z_crc_t FAR crc_braid_table[][256] = {\n"); for (k = 0; k < 4; k++) { fprintf(out, " {"); write_table(out, ltl[k], 256); fprintf(out, "}%s", k < 3 ? ",\n" : ""); } fprintf(out, "};\n" "\n" "local const z_word_t FAR crc_braid_big_table[][256] = {\n"); for (k = 0; k < 4; k++) { fprintf(out, " {"); write_table32hi(out, big[k], 256); fprintf(out, "}%s", k < 3 ? ",\n" : ""); } fprintf(out, "};\n" "\n" "#endif\n" "\n" "#endif\n"); } fprintf(out, "\n" "#endif\n"); /* write out zeros operator table to crc32.h */ fprintf(out, "\n" "local const z_crc_t FAR x2n_table[] = {\n" " "); write_table(out, x2n_table, 32); fprintf(out, "};\n"); fclose(out); } #endif /* MAKECRCH */ } #ifdef MAKECRCH /* Write the 32-bit values in table[0..k-1] to out, five per line in hexadecimal separated by commas. */ local void write_table(out, table, k) FILE *out; const z_crc_t FAR *table; int k; { int n; for (n = 0; n < k; n++) fprintf(out, "%s0x%08lx%s", n == 0 || n % 5 ? "" : " ", (unsigned long)(table[n]), n == k - 1 ? "" : (n % 5 == 4 ? ",\n" : ", ")); } /* Write the high 32-bits of each value in table[0..k-1] to out, five per line in hexadecimal separated by commas. */ local void write_table32hi(out, table, k) FILE *out; const z_word_t FAR *table; int k; { int n; for (n = 0; n < k; n++) fprintf(out, "%s0x%08lx%s", n == 0 || n % 5 ? "" : " ", (unsigned long)(table[n] >> 32), n == k - 1 ? "" : (n % 5 == 4 ? ",\n" : ", ")); } /* Write the 64-bit values in table[0..k-1] to out, three per line in hexadecimal separated by commas. This assumes that if there is a 64-bit type, then there is also a long long integer type, and it is at least 64 bits. If not, then the type cast and format string can be adjusted accordingly. */ local void write_table64(out, table, k) FILE *out; const z_word_t FAR *table; int k; { int n; for (n = 0; n < k; n++) fprintf(out, "%s0x%016llx%s", n == 0 || n % 3 ? "" : " ", (unsigned long long)(table[n]), n == k - 1 ? "" : (n % 3 == 2 ? ",\n" : ", ")); } /* Actually do the deed. */ int main() { make_crc_table(); return 0; } #endif /* MAKECRCH */ #ifdef W /* Generate the little and big-endian braid tables for the given n and z_word_t size w. Each array must have room for w blocks of 256 elements. */ local void braid(ltl, big, n, w) z_crc_t ltl[][256]; z_word_t big[][256]; int n; int w; { int k; z_crc_t i, p, q; for (k = 0; k < w; k++) { p = x2nmodp((n * w + 3 - k) << 3, 0); ltl[k][0] = 0; big[w - 1 - k][0] = 0; for (i = 1; i < 256; i++) { ltl[k][i] = q = multmodp(i << 24, p); big[w - 1 - k][i] = byte_swap(q); } } } #endif #else /* !DYNAMIC_CRC_TABLE */ /* ======================================================================== * Tables for byte-wise and braided CRC-32 calculations, and a table of powers * of x for combining CRC-32s, all made by make_crc_table(). */ #include "crc32.h" #endif /* DYNAMIC_CRC_TABLE */ /* ======================================================================== * Routines used for CRC calculation. Some are also required for the table * generation above. */ /* Return a(x) multiplied by b(x) modulo p(x), where p(x) is the CRC polynomial, reflected. For speed, this requires that a not be zero. */ local z_crc_t multmodp(a, b) z_crc_t a; z_crc_t b; { z_crc_t m, p; m = (z_crc_t)1 << 31; p = 0; for (;;) { if (a & m) { p ^= b; if ((a & (m - 1)) == 0) break; } m >>= 1; b = b & 1 ? (b >> 1) ^ POLY : b >> 1; } return p; } /* Return x^(n * 2^k) modulo p(x). Requires that x2n_table[] has been initialized. */ local z_crc_t x2nmodp(n, k) z_off64_t n; unsigned k; { z_crc_t p; p = (z_crc_t)1 << 31; /* x^0 == 1 */ while (n) { if (n & 1) p = multmodp(x2n_table[k & 31], p); n >>= 1; k++; } return p; } /* ========================================================================= * This function can be used by asm versions of crc32(), and to force the * generation of the CRC tables in a threaded application. */ const z_crc_t FAR * ZEXPORT get_crc_table() { #ifdef DYNAMIC_CRC_TABLE once(&made, make_crc_table); #endif /* DYNAMIC_CRC_TABLE */ return (const z_crc_t FAR *)crc_table; } /* ========================================================================= * Use ARM machine instructions if available. This will compute the CRC about * ten times faster than the braided calculation. This code does not check for * the presence of the CRC instruction at run time. __ARM_FEATURE_CRC32 will * only be defined if the compilation specifies an ARM processor architecture * that has the instructions. For example, compiling with -march=armv8.1-a or * -march=armv8-a+crc, or -march=native if the compile machine has the crc32 * instructions. */ #if ARMCRC32_CANONICAL_ZLIB /* Constants empirically determined to maximize speed. These values are from measurements on a Cortex-A57. Your mileage may vary. */ #define Z_BATCH 3990 /* number of words in a batch */ #define Z_BATCH_ZEROS 0xa10d3d0c /* computed from Z_BATCH = 3990 */ #define Z_BATCH_MIN 800 /* fewest words in a final batch */ unsigned long ZEXPORT crc32_z(crc, buf, len) unsigned long crc; const unsigned char FAR *buf; z_size_t len; { z_crc_t val; z_word_t crc1, crc2; const z_word_t *word; z_word_t val0, val1, val2; z_size_t last, last2, i; z_size_t num; /* Return initial CRC, if requested. */ if (buf == Z_NULL) return 0; #ifdef DYNAMIC_CRC_TABLE once(&made, make_crc_table); #endif /* DYNAMIC_CRC_TABLE */ /* Pre-condition the CRC */ crc = (~crc) & 0xffffffff; /* Compute the CRC up to a word boundary. */ while (len && ((z_size_t)buf & 7) != 0) { len--; val = *buf++; __asm__ volatile("crc32b %w0, %w0, %w1" : "+r"(crc) : "r"(val)); } /* Prepare to compute the CRC on full 64-bit words word[0..num-1]. */ word = (z_word_t const *)buf; num = len >> 3; len &= 7; /* Do three interleaved CRCs to realize the throughput of one crc32x instruction per cycle. Each CRC is calcuated on Z_BATCH words. The three CRCs are combined into a single CRC after each set of batches. */ while (num >= 3 * Z_BATCH) { crc1 = 0; crc2 = 0; for (i = 0; i < Z_BATCH; i++) { val0 = word[i]; val1 = word[i + Z_BATCH]; val2 = word[i + 2 * Z_BATCH]; __asm__ volatile("crc32x %w0, %w0, %x1" : "+r"(crc) : "r"(val0)); __asm__ volatile("crc32x %w0, %w0, %x1" : "+r"(crc1) : "r"(val1)); __asm__ volatile("crc32x %w0, %w0, %x1" : "+r"(crc2) : "r"(val2)); } word += 3 * Z_BATCH; num -= 3 * Z_BATCH; crc = multmodp(Z_BATCH_ZEROS, crc) ^ crc1; crc = multmodp(Z_BATCH_ZEROS, crc) ^ crc2; } /* Do one last smaller batch with the remaining words, if there are enough to pay for the combination of CRCs. */ last = num / 3; if (last >= Z_BATCH_MIN) { last2 = last << 1; crc1 = 0; crc2 = 0; for (i = 0; i < last; i++) { val0 = word[i]; val1 = word[i + last]; val2 = word[i + last2]; __asm__ volatile("crc32x %w0, %w0, %x1" : "+r"(crc) : "r"(val0)); __asm__ volatile("crc32x %w0, %w0, %x1" : "+r"(crc1) : "r"(val1)); __asm__ volatile("crc32x %w0, %w0, %x1" : "+r"(crc2) : "r"(val2)); } word += 3 * last; num -= 3 * last; val = x2nmodp(last, 6); crc = multmodp(val, crc) ^ crc1; crc = multmodp(val, crc) ^ crc2; } /* Compute the CRC on any remaining words. */ for (i = 0; i < num; i++) { val0 = word[i]; __asm__ volatile("crc32x %w0, %w0, %x1" : "+r"(crc) : "r"(val0)); } word += num; /* Complete the CRC on any remaining bytes. */ buf = (const unsigned char FAR *)word; while (len) { len--; val = *buf++; __asm__ volatile("crc32b %w0, %w0, %w1" : "+r"(crc) : "r"(val)); } /* Return the CRC, post-conditioned. */ return crc ^ 0xffffffff; } #else #ifdef W /* Return the CRC of the W bytes in the word_t data, taking the least-significant byte of the word as the first byte of data, without any pre or post conditioning. This is used to combine the CRCs of each braid. */ local z_crc_t crc_word(data) z_word_t data; { int k; for (k = 0; k < W; k++) data = (data >> 8) ^ crc_table[data & 0xff]; return (z_crc_t)data; } local z_word_t crc_word_big(data) z_word_t data; { int k; for (k = 0; k < W; k++) data = (data << 8) ^ crc_big_table[(data >> ((W - 1) << 3)) & 0xff]; return data; } #endif /* ========================================================================= */ unsigned long ZEXPORT crc32_z(crc, buf, len) unsigned long crc; const unsigned char FAR *buf; z_size_t len; { /* * zlib convention is to call crc32(0, NULL, 0); before making * calls to crc32(). So this is a good, early (and infrequent) * place to cache CPU features if needed for those later, more * interesting crc32() calls. */ #if defined(CRC32_SIMD_SSE42_PCLMUL) /* * Use x86 sse4.2+pclmul SIMD to compute the crc32. Since this * routine can be freely used, check CPU features here. */ if (buf == Z_NULL) { if (!len) /* Assume user is calling crc32(0, NULL, 0); */ cpu_check_features(); return 0UL; } if (x86_cpu_enable_simd && len >= Z_CRC32_SSE42_MINIMUM_LENGTH) { /* crc32 16-byte chunks */ z_size_t chunk_size = len & ~Z_CRC32_SSE42_CHUNKSIZE_MASK; crc = ~crc32_sse42_simd_(buf, chunk_size, ~(uint32_t)crc); /* check remaining data */ len -= chunk_size; if (!len) return crc; /* Fall into the default crc32 for the remaining data. */ buf += chunk_size; } #else if (buf == Z_NULL) { return 0UL; } #endif /* CRC32_SIMD_SSE42_PCLMUL */ #ifdef DYNAMIC_CRC_TABLE once(&made, make_crc_table); #endif /* DYNAMIC_CRC_TABLE */ /* Pre-condition the CRC */ crc = (~crc) & 0xffffffff; #ifdef W /* If provided enough bytes, do a braided CRC calculation. */ if (len >= N * W + W - 1) { z_size_t blks; z_word_t const *words; unsigned endian; int k; /* Compute the CRC up to a z_word_t boundary. */ while (len && ((z_size_t)buf & (W - 1)) != 0) { len--; crc = (crc >> 8) ^ crc_table[(crc ^ *buf++) & 0xff]; } /* Compute the CRC on as many N z_word_t blocks as are available. */ blks = len / (N * W); len -= blks * N * W; words = (z_word_t const *)buf; /* Do endian check at execution time instead of compile time, since ARM processors can change the endianess at execution time. If the compiler knows what the endianess will be, it can optimize out the check and the unused branch. */ endian = 1; if (*(unsigned char *)&endian) { /* Little endian. */ z_crc_t crc0; z_word_t word0; #if N > 1 z_crc_t crc1; z_word_t word1; #if N > 2 z_crc_t crc2; z_word_t word2; #if N > 3 z_crc_t crc3; z_word_t word3; #if N > 4 z_crc_t crc4; z_word_t word4; #if N > 5 z_crc_t crc5; z_word_t word5; #endif #endif #endif #endif #endif /* Initialize the CRC for each braid. */ crc0 = crc; #if N > 1 crc1 = 0; #if N > 2 crc2 = 0; #if N > 3 crc3 = 0; #if N > 4 crc4 = 0; #if N > 5 crc5 = 0; #endif #endif #endif #endif #endif /* Process the first blks-1 blocks, computing the CRCs on each braid independently. */ while (--blks) { /* Load the word for each braid into registers. */ word0 = crc0 ^ words[0]; #if N > 1 word1 = crc1 ^ words[1]; #if N > 2 word2 = crc2 ^ words[2]; #if N > 3 word3 = crc3 ^ words[3]; #if N > 4 word4 = crc4 ^ words[4]; #if N > 5 word5 = crc5 ^ words[5]; #endif #endif #endif #endif #endif words += N; /* Compute and update the CRC for each word. The loop should get unrolled. */ crc0 = crc_braid_table[0][word0 & 0xff]; #if N > 1 crc1 = crc_braid_table[0][word1 & 0xff]; #if N > 2 crc2 = crc_braid_table[0][word2 & 0xff]; #if N > 3 crc3 = crc_braid_table[0][word3 & 0xff]; #if N > 4 crc4 = crc_braid_table[0][word4 & 0xff]; #if N > 5 crc5 = crc_braid_table[0][word5 & 0xff]; #endif #endif #endif #endif #endif for (k = 1; k < W; k++) { crc0 ^= crc_braid_table[k][(word0 >> (k << 3)) & 0xff]; #if N > 1 crc1 ^= crc_braid_table[k][(word1 >> (k << 3)) & 0xff]; #if N > 2 crc2 ^= crc_braid_table[k][(word2 >> (k << 3)) & 0xff]; #if N > 3 crc3 ^= crc_braid_table[k][(word3 >> (k << 3)) & 0xff]; #if N > 4 crc4 ^= crc_braid_table[k][(word4 >> (k << 3)) & 0xff]; #if N > 5 crc5 ^= crc_braid_table[k][(word5 >> (k << 3)) & 0xff]; #endif #endif #endif #endif #endif } } /* Process the last block, combining the CRCs of the N braids at the same time. */ crc = crc_word(crc0 ^ words[0]); #if N > 1 crc = crc_word(crc1 ^ words[1] ^ crc); #if N > 2 crc = crc_word(crc2 ^ words[2] ^ crc); #if N > 3 crc = crc_word(crc3 ^ words[3] ^ crc); #if N > 4 crc = crc_word(crc4 ^ words[4] ^ crc); #if N > 5 crc = crc_word(crc5 ^ words[5] ^ crc); #endif #endif #endif #endif #endif words += N; } else { /* Big endian. */ z_word_t crc0, word0, comb; #if N > 1 z_word_t crc1, word1; #if N > 2 z_word_t crc2, word2; #if N > 3 z_word_t crc3, word3; #if N > 4 z_word_t crc4, word4; #if N > 5 z_word_t crc5, word5; #endif #endif #endif #endif #endif /* Initialize the CRC for each braid. */ crc0 = byte_swap(crc); #if N > 1 crc1 = 0; #if N > 2 crc2 = 0; #if N > 3 crc3 = 0; #if N > 4 crc4 = 0; #if N > 5 crc5 = 0; #endif #endif #endif #endif #endif /* Process the first blks-1 blocks, computing the CRCs on each braid independently. */ while (--blks) { /* Load the word for each braid into registers. */ word0 = crc0 ^ words[0]; #if N > 1 word1 = crc1 ^ words[1]; #if N > 2 word2 = crc2 ^ words[2]; #if N > 3 word3 = crc3 ^ words[3]; #if N > 4 word4 = crc4 ^ words[4]; #if N > 5 word5 = crc5 ^ words[5]; #endif #endif #endif #endif #endif words += N; /* Compute and update the CRC for each word. The loop should get unrolled. */ crc0 = crc_braid_big_table[0][word0 & 0xff]; #if N > 1 crc1 = crc_braid_big_table[0][word1 & 0xff]; #if N > 2 crc2 = crc_braid_big_table[0][word2 & 0xff]; #if N > 3 crc3 = crc_braid_big_table[0][word3 & 0xff]; #if N > 4 crc4 = crc_braid_big_table[0][word4 & 0xff]; #if N > 5 crc5 = crc_braid_big_table[0][word5 & 0xff]; #endif #endif #endif #endif #endif for (k = 1; k < W; k++) { crc0 ^= crc_braid_big_table[k][(word0 >> (k << 3)) & 0xff]; #if N > 1 crc1 ^= crc_braid_big_table[k][(word1 >> (k << 3)) & 0xff]; #if N > 2 crc2 ^= crc_braid_big_table[k][(word2 >> (k << 3)) & 0xff]; #if N > 3 crc3 ^= crc_braid_big_table[k][(word3 >> (k << 3)) & 0xff]; #if N > 4 crc4 ^= crc_braid_big_table[k][(word4 >> (k << 3)) & 0xff]; #if N > 5 crc5 ^= crc_braid_big_table[k][(word5 >> (k << 3)) & 0xff]; #endif #endif #endif #endif #endif } } /* Process the last block, combining the CRCs of the N braids at the same time. */ comb = crc_word_big(crc0 ^ words[0]); #if N > 1 comb = crc_word_big(crc1 ^ words[1] ^ comb); #if N > 2 comb = crc_word_big(crc2 ^ words[2] ^ comb); #if N > 3 comb = crc_word_big(crc3 ^ words[3] ^ comb); #if N > 4 comb = crc_word_big(crc4 ^ words[4] ^ comb); #if N > 5 comb = crc_word_big(crc5 ^ words[5] ^ comb); #endif #endif #endif #endif #endif words += N; crc = byte_swap(comb); } /* Update the pointer to the remaining bytes to process. */ buf = (unsigned char const *)words; } #endif /* W */ /* Complete the computation of the CRC on any remaining bytes. */ while (len >= 8) { len -= 8; crc = (crc >> 8) ^ crc_table[(crc ^ *buf++) & 0xff]; crc = (crc >> 8) ^ crc_table[(crc ^ *buf++) & 0xff]; crc = (crc >> 8) ^ crc_table[(crc ^ *buf++) & 0xff]; crc = (crc >> 8) ^ crc_table[(crc ^ *buf++) & 0xff]; crc = (crc >> 8) ^ crc_table[(crc ^ *buf++) & 0xff]; crc = (crc >> 8) ^ crc_table[(crc ^ *buf++) & 0xff]; crc = (crc >> 8) ^ crc_table[(crc ^ *buf++) & 0xff]; crc = (crc >> 8) ^ crc_table[(crc ^ *buf++) & 0xff]; } while (len) { len--; crc = (crc >> 8) ^ crc_table[(crc ^ *buf++) & 0xff]; } /* Return the CRC, post-conditioned. */ return crc ^ 0xffffffff; } #endif /* ========================================================================= */ unsigned long ZEXPORT crc32(crc, buf, len) unsigned long crc; const unsigned char FAR *buf; uInt len; { #if defined(CRC32_ARMV8_CRC32) /* We got to verify ARM CPU features, so exploit the common usage pattern * of calling this function with Z_NULL for an initial valid crc value. * This allows to cache the result of the feature check and avoid extraneous * function calls. * TODO: try to move this to crc32_z if we don't loose performance on ARM. */ if (buf == Z_NULL) { if (!len) /* Assume user is calling crc32(0, NULL, 0); */ cpu_check_features(); return 0UL; } if (arm_cpu_enable_crc32) return armv8_crc32_little(crc, buf, len); #endif return crc32_z(crc, buf, len); } /* ========================================================================= */ uLong ZEXPORT crc32_combine64(crc1, crc2, len2) uLong crc1; uLong crc2; z_off64_t len2; { int n; #ifdef DYNAMIC_CRC_TABLE once(&made, make_crc_table); #endif /* DYNAMIC_CRC_TABLE */ return multmodp(x2nmodp(len2, 3), crc1) ^ (crc2 & 0xffffffff); } /* ========================================================================= */ uLong ZEXPORT crc32_combine(crc1, crc2, len2) uLong crc1; uLong crc2; z_off_t len2; { return crc32_combine64(crc1, crc2, len2); } /* ========================================================================= */ uLong ZEXPORT crc32_combine_gen64(len2) z_off64_t len2; { #ifdef DYNAMIC_CRC_TABLE once(&made, make_crc_table); #endif /* DYNAMIC_CRC_TABLE */ return x2nmodp(len2, 3); } /* ========================================================================= */ uLong ZEXPORT crc32_combine_gen(len2) z_off_t len2; { return crc32_combine_gen64(len2); } /* ========================================================================= */ uLong ZEXPORT crc32_combine_op(crc1, crc2, op) uLong crc1; uLong crc2; uLong op; { return multmodp(op, crc1) ^ (crc2 & 0xffffffff); } ZLIB_INTERNAL void crc_reset(deflate_state *const s) { #ifdef CRC32_SIMD_SSE42_PCLMUL if (x86_cpu_enable_simd) { crc_fold_init(s); return; } #endif s->strm->adler = crc32(0L, Z_NULL, 0); } ZLIB_INTERNAL void crc_finalize(deflate_state *const s) { #ifdef CRC32_SIMD_SSE42_PCLMUL if (x86_cpu_enable_simd) s->strm->adler = crc_fold_512to32(s); #endif } ZLIB_INTERNAL void copy_with_crc(z_streamp strm, Bytef *dst, long size) { #ifdef CRC32_SIMD_SSE42_PCLMUL if (x86_cpu_enable_simd) { crc_fold_copy(strm->state, dst, strm->next_in, size); return; } #endif zmemcpy(dst, strm->next_in, size); strm->adler = crc32(strm->adler, dst, size); }