patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -36,7 +36,17 @@ const env = envFromArgs(process.argv);
const profilePath = profileFromArgs(process.argv);
const isDebugMode = !!process.argv && process.argv.indexOf('--debug') >= 0;
-const wrapper = new ElectronAppWrapper(electronApp, env, profilePath, isDebugMode);
+if (env === 'dev' && process.platform === 'win32') {
+ electronApp.setAsDefaultProtocolClient('joplin', process.execPath, [
+ resolve(process.argv[1])
+ ]);
+} else {
+ electronApp.setAsDefaultProtocolClient('joplin');
+}
+
+const initialUrl = process.argv.find((arg) => arg.startsWith('joplin://'));
+
+const wrapper = new ElectronAppWrapper(electronApp, env, profilePath, isDebugMode, initialUrl);
initBridge(wrapper);
| 1 | // This is the basic initialization for the Electron MAIN process
const electronApp = require('electron').app;
const ElectronAppWrapper = require('./ElectronAppWrapper').default;
const { initBridge } = require('./bridge');
const Logger = require('@joplin/lib/Logger').default;
const FsDriverNode = require('@joplin/lib/fs-driver-node').default;
const envFromArgs = require('@joplin/lib/envFromArgs');
const packageInfo = require('./packageInfo.js');
// Electron takes the application name from package.json `name` and
// displays this in the tray icon toolip and message box titles, however in
// our case it's a string like "@joplin/app-desktop". It's also supposed to
// check the productName key but is not doing it, so here set the
// application name to the right string.
electronApp.name = packageInfo.name;
process.on('unhandledRejection', (reason, p) => {
console.error('Unhandled promise rejection', p, 'reason:', reason);
process.exit(1);
});
// Likewise, we want to know if a profile is specified early, in particular
// to save the window state data.
function profileFromArgs(args) {
if (!args) return null;
const profileIndex = args.indexOf('--profile');
if (profileIndex <= 0 || profileIndex >= args.length - 1) return null;
const profileValue = args[profileIndex + 1];
return profileValue ? profileValue : null;
}
Logger.fsDriver_ = new FsDriverNode();
const env = envFromArgs(process.argv);
const profilePath = profileFromArgs(process.argv);
const isDebugMode = !!process.argv && process.argv.indexOf('--debug') >= 0;
const wrapper = new ElectronAppWrapper(electronApp, env, profilePath, isDebugMode);
initBridge(wrapper);
wrapper.start().catch((error) => {
console.error('Electron App fatal error:');
console.error(error);
});
| 1 | 17,712 | Should start with joplin://x-callback-url/. Also maybe create a helper function to check if a url is valid? | laurent22-joplin | js |
@@ -3,7 +3,7 @@ export default [
{
created_at: '2015-09-11T09:44:30.805Z',
created_by: 1,
- id: 1,
+ id: 7,
key: 'title',
type: 'blog',
updated_at: '2015-10-04T16:26:05.195Z', | 1 | /* jscs:disable requireCamelCaseOrUpperCaseIdentifiers */
export default [
{
created_at: '2015-09-11T09:44:30.805Z',
created_by: 1,
id: 1,
key: 'title',
type: 'blog',
updated_at: '2015-10-04T16:26:05.195Z',
updated_by: 1,
uuid: '39e16daf-43fa-4bf0-87d4-44948ba8bf4c',
value: 'Test Blog'
},
{
created_at: '2015-09-11T09:44:30.806Z',
created_by: 1,
id: 2,
key: 'description',
type: 'blog',
updated_at: '2015-10-04T16:26:05.198Z',
updated_by: 1,
uuid: 'e6c8b636-6925-4c4a-a5d9-1dc0870fb8ea',
value: 'Thoughts, stories and ideas.'
},
{
id: 3,
uuid: '4339ce48-b485-418a-acc2-1d34cf17a5e3',
key: 'logo',
value: '/content/images/2013/Nov/logo.png',
type: 'blog',
created_at: '2013-11-25T14:48:11.000Z',
created_by: 1,
updated_at: '2015-10-27T17:39:58.273Z',
updated_by: 1
},
{
id: 4,
uuid: 'e41b6c2a-7f72-45ea-96d8-ee016f06d78b',
key: 'cover',
value: '/content/images/2014/Feb/cover.jpg',
type: 'blog',
created_at: '2013-11-25T14:48:11.000Z',
created_by: 1,
updated_at: '2015-10-27T17:39:58.276Z',
updated_by: 1
},
{
id: 5,
uuid: '4558457e-9f61-47a5-9d45-8b83829bf1cf',
key: 'defaultLang',
value: 'en_US',
type: 'blog',
created_at: '2013-11-25T14:48:11.000Z',
created_by: 1,
updated_at: '2015-10-27T17:39:58.278Z',
updated_by: 1
},
{
created_at: '2015-09-11T09:44:30.809Z',
created_by: 1,
id: 6,
key: 'postsPerPage',
type: 'blog',
updated_at: '2015-10-04T16:26:05.211Z',
updated_by: 1,
uuid: '775e6ca1-bcc3-4347-a53d-15d5d76c04a4',
value: '5'
},
{
id: 7,
uuid: '3c93b240-d22b-473f-9063-537023e06c2d',
key: 'forceI18n',
value: 'true',
type: 'blog',
created_at: '2013-11-25T14:48:11.000Z',
created_by: 1,
updated_at: '2015-10-27T17:39:58.280Z',
updated_by: 1
},
{
id: 8,
uuid: '4e58389f-f173-4387-b28c-0435623882ad',
key: 'activeTheme',
value: 'casper',
type: 'theme',
created_at: '2013-11-25T14:48:11.000Z',
created_by: 1,
updated_at: '2015-10-27T17:39:58.284Z',
updated_by: 1
},
{
id: 9,
uuid: '8052c2bf-9c19-4d6c-8944-7465321d00be',
key: 'permalinks',
value: '/:slug/',
type: 'blog',
created_at: '2014-01-14T12:01:51.000Z',
created_by: 1,
updated_at: '2015-10-27T17:39:58.282Z',
updated_by: 1
},
{
created_at: '2015-09-11T09:44:30.809Z',
created_by: 1,
id: 10,
key: 'ghost_head',
type: 'blog',
updated_at: '2015-09-23T13:32:49.858Z',
updated_by: 1,
uuid: 'df7f3151-bc08-4a77-be9d-dd315b630d51',
value: ''
},
{
created_at: '2015-09-11T09:44:30.809Z',
created_by: 1,
id: 11,
key: 'ghost_foot',
type: 'blog',
updated_at: '2015-09-23T13:32:49.858Z',
updated_by: 1,
uuid: '0649d45e-828b-4dd0-8381-3dff6d1d5ddb',
value: ''
},
{
id: 12,
uuid: 'd806f358-7996-4c74-b153-8876959c4b70',
key: 'labs',
value: '{"subscribers":true,"internalTags":true}',
type: 'blog',
created_at: '2015-01-12T18:29:01.000Z',
created_by: 1,
updated_at: '2015-10-27T17:39:58.288Z',
updated_by: 1
},
{
created_at: '2015-09-11T09:44:30.810Z',
created_by: 1,
id: 13,
key: 'navigation',
type: 'blog',
updated_at: '2015-09-23T13:32:49.868Z',
updated_by: 1,
uuid: '4cc51d1c-fcbd-47e6-a71b-fdd1abb223fc',
value: JSON.stringify([
{label: 'Home', url: '/'},
{label: 'About', url: '/about'}
])
},
{
created_at: '2015-09-11T09:44:30.810Z',
created_by: 1,
id: 14,
key: 'isPrivate',
type: 'blog',
updated_at: '2015-09-23T13:32:49.868Z',
updated_by: 1,
uuid: 'e306ec3e-d079-11e5-ab30-625662870761',
value: false
},
{
created_at: '2015-09-11T09:44:30.810Z',
created_by: 1,
id: 15,
key: 'password',
type: 'blog',
updated_at: '2015-09-23T13:32:49.868Z',
updated_by: 1,
uuid: 'f8e8cbda-d079-11e5-ab30-625662870761',
value: ''
},
{
created_at: '2016-05-05T15:04:03.115Z',
created_by: 1,
id: 17,
key: 'slack',
type: 'blog',
updated_at: '2016-05-05T18:33:09.168Z',
updated_by: 1,
uuid: 'dd4ebaa8-dedb-40ff-a663-ec64a92d4111',
value: '[{"url":""}]'
},
{
created_at: '2016-05-05T15:40:12.133Z',
created_by: 1,
id: 23,
key: 'facebook',
type: 'blog',
updated_at: '2016-05-08T15:20:25.953Z',
updated_by: 1,
uuid: 'd4387e5c-3230-46dd-a89b-0d8a40365c35',
value: 'test'
},
{
created_at: '2016-05-05T15:40:12.134Z',
created_by: 1,
id: 24,
key: 'twitter',
type: 'blog',
updated_at: '2016-05-08T15:20:25.954Z',
updated_by: 1,
uuid: '5130441f-e4c7-4750-9692-a22d841ab049',
value: '@test'
},
{
created_at: '2015-09-11T09:44:30.810Z',
created_by: 1,
id: 16,
key: 'activeTimezone',
type: 'blog',
updated_at: '2015-09-23T13:32:49.868Z',
updated_by: 1,
uuid: '310c9169-9613-48b0-8bc4-d1e1c9be85b8',
value: 'Etc/UTC'
},
{
key: 'availableThemes',
id: 18,
value: [
{
name: 'casper',
package: {
name: 'Blog',
version: '1.0'
},
active: true
},
{
name: 'foo',
package: {
name: 'Foo',
version: '0.1'
}
},
{
name: 'bar'
}
],
type: 'theme'
}
];
| 1 | 7,745 | PR works great. Just out of curiosity: why all the id changes? | TryGhost-Admin | js |
@@ -56,6 +56,14 @@ struct flb_service_config service_configs[] = {
FLB_CONF_TYPE_INT,
offsetof(struct flb_config, grace)},
+ {FLB_CONF_STR_BACKOFF_BASE,
+ FLB_CONF_TYPE_INT,
+ offsetof(struct flb_config, backoff_base)},
+
+ {FLB_CONF_STR_BACKOFF_CAP,
+ FLB_CONF_TYPE_INT,
+ offsetof(struct flb_config, backoff_cap)},
+
{FLB_CONF_STR_DAEMON,
FLB_CONF_TYPE_BOOL,
offsetof(struct flb_config, daemon)}, | 1 | /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
/* Fluent Bit
* ==========
* Copyright (C) 2019-2020 The Fluent Bit Authors
* Copyright (C) 2015-2018 Treasure Data Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <stdlib.h>
#include <signal.h>
#include <stddef.h>
#include <monkey/mk_core.h>
#include <fluent-bit/flb_info.h>
#include <fluent-bit/flb_mem.h>
#include <fluent-bit/flb_str.h>
#include <fluent-bit/flb_kv.h>
#include <fluent-bit/flb_env.h>
#include <fluent-bit/flb_macros.h>
#include <fluent-bit/flb_config.h>
#include <fluent-bit/flb_parser.h>
#include <fluent-bit/flb_plugin.h>
#include <fluent-bit/flb_plugins.h>
#include <fluent-bit/flb_slist.h>
#include <fluent-bit/flb_io_tls.h>
#include <fluent-bit/flb_kernel.h>
#include <fluent-bit/flb_worker.h>
#include <fluent-bit/flb_scheduler.h>
#include <fluent-bit/flb_http_server.h>
#include <fluent-bit/flb_plugin.h>
#include <fluent-bit/flb_utils.h>
const char *FLB_CONF_ENV_LOGLEVEL = "FLB_LOG_LEVEL";
int flb_regex_init();
struct flb_service_config service_configs[] = {
{FLB_CONF_STR_FLUSH,
FLB_CONF_TYPE_DOUBLE,
offsetof(struct flb_config, flush)},
{FLB_CONF_STR_GRACE,
FLB_CONF_TYPE_INT,
offsetof(struct flb_config, grace)},
{FLB_CONF_STR_DAEMON,
FLB_CONF_TYPE_BOOL,
offsetof(struct flb_config, daemon)},
{FLB_CONF_STR_LOGFILE,
FLB_CONF_TYPE_STR,
offsetof(struct flb_config, log_file)},
{FLB_CONF_STR_PARSERS_FILE,
FLB_CONF_TYPE_STR,
offsetof(struct flb_config, parsers_file)},
{FLB_CONF_STR_PLUGINS_FILE,
FLB_CONF_TYPE_STR,
offsetof(struct flb_config, plugins_file)},
{FLB_CONF_STR_LOGLEVEL,
FLB_CONF_TYPE_STR,
offsetof(struct flb_config, log)},
#ifdef FLB_HAVE_HTTP_SERVER
{FLB_CONF_STR_HTTP_SERVER,
FLB_CONF_TYPE_BOOL,
offsetof(struct flb_config, http_server)},
{FLB_CONF_STR_HTTP_LISTEN,
FLB_CONF_TYPE_STR,
offsetof(struct flb_config, http_listen)},
{FLB_CONF_STR_HTTP_PORT,
FLB_CONF_TYPE_STR,
offsetof(struct flb_config, http_port)},
#endif
/* Storage */
{FLB_CONF_STORAGE_PATH,
FLB_CONF_TYPE_STR,
offsetof(struct flb_config, storage_path)},
{FLB_CONF_STORAGE_SYNC,
FLB_CONF_TYPE_STR,
offsetof(struct flb_config, storage_sync)},
{FLB_CONF_STORAGE_METRICS,
FLB_CONF_TYPE_BOOL,
offsetof(struct flb_config, storage_metrics)},
{FLB_CONF_STORAGE_CHECKSUM,
FLB_CONF_TYPE_BOOL,
offsetof(struct flb_config, storage_checksum)},
{FLB_CONF_STORAGE_BL_MEM_LIMIT,
FLB_CONF_TYPE_STR,
offsetof(struct flb_config, storage_bl_mem_limit)},
{FLB_CONF_STORAGE_MAX_CHUNKS_UP,
FLB_CONF_TYPE_INT,
offsetof(struct flb_config, storage_max_chunks_up)},
/* Coroutines */
{FLB_CONF_STR_CORO_STACK_SIZE,
FLB_CONF_TYPE_INT,
offsetof(struct flb_config, coro_stack_size)},
#ifdef FLB_HAVE_STREAM_PROCESSOR
{FLB_CONF_STR_STREAMS_FILE,
FLB_CONF_TYPE_STR,
offsetof(struct flb_config, stream_processor_file)},
#endif
{NULL, FLB_CONF_TYPE_OTHER, 0} /* end of array */
};
struct flb_config *flb_config_init()
{
int ret;
struct flb_config *config;
config = flb_calloc(1, sizeof(struct flb_config));
if (!config) {
flb_errno();
return NULL;
}
MK_EVENT_ZERO(&config->ch_event);
MK_EVENT_ZERO(&config->event_flush);
MK_EVENT_ZERO(&config->event_shutdown);
/* is data ingestion active ? */
config->is_ingestion_active = FLB_TRUE;
/* Is the engine (event loop) actively running ? */
config->is_running = FLB_TRUE;
/* Flush */
config->flush = FLB_CONFIG_FLUSH_SECS;
config->daemon = FLB_FALSE;
config->init_time = time(NULL);
config->kernel = flb_kernel_info();
config->verbose = 3;
config->grace = 5;
config->exit_status_code = 0;
#ifdef FLB_HAVE_HTTP_SERVER
config->http_ctx = NULL;
config->http_server = FLB_FALSE;
config->http_listen = flb_strdup(FLB_CONFIG_HTTP_LISTEN);
config->http_port = flb_strdup(FLB_CONFIG_HTTP_PORT);
#endif
config->http_proxy = getenv("HTTP_PROXY");
if (config->http_proxy != NULL && strcmp(config->http_proxy, "") == 0) {
/* Proxy should not be set when the `HTTP_PROXY` is set to "" */
config->http_proxy = NULL;
}
config->cio = NULL;
config->storage_path = NULL;
config->storage_input_plugin = NULL;
#ifdef FLB_HAVE_SQLDB
mk_list_init(&config->sqldb_list);
#endif
#ifdef FLB_HAVE_LUAJIT
mk_list_init(&config->luajit_list);
#endif
#ifdef FLB_HAVE_STREAM_PROCESSOR
flb_slist_create(&config->stream_processor_tasks);
#endif
/* Set default coroutines stack size */
config->coro_stack_size = FLB_THREAD_STACK_SIZE;
/* Initialize linked lists */
mk_list_init(&config->collectors);
mk_list_init(&config->in_plugins);
mk_list_init(&config->parser_plugins);
mk_list_init(&config->filter_plugins);
mk_list_init(&config->out_plugins);
mk_list_init(&config->inputs);
mk_list_init(&config->parsers);
mk_list_init(&config->filters);
mk_list_init(&config->outputs);
mk_list_init(&config->proxies);
mk_list_init(&config->workers);
mk_list_init(&config->upstreams);
memset(&config->tasks_map, '\0', sizeof(config->tasks_map));
/* Environment */
config->env = flb_env_create();
/* Register static plugins */
ret = flb_plugins_register(config);
if (ret == -1) {
flb_error("[config] plugins registration failed");
flb_config_exit(config);
return NULL;
}
/* Create environment for dynamic plugins */
config->dso_plugins = flb_plugin_create();
/* Ignoring SIGPIPE on Windows (scary) */
#ifndef _WIN32
/* Ignore SIGPIPE */
signal(SIGPIPE, SIG_IGN);
#endif
/* Prepare worker interface */
flb_worker_init(config);
#ifdef FLB_HAVE_REGEX
/* Regex support */
flb_regex_init();
#endif
return config;
}
void flb_config_exit(struct flb_config *config)
{
struct mk_list *tmp;
struct mk_list *head;
struct flb_input_collector *collector;
if (config->log_file) {
flb_free(config->log_file);
}
if (config->log) {
flb_log_stop(config->log, config);
}
if (config->parsers_file) {
flb_free(config->parsers_file);
}
if (config->plugins_file) {
flb_free(config->plugins_file);
}
if (config->kernel) {
flb_free(config->kernel->s_version.data);
flb_free(config->kernel);
}
/* release resources */
if (config->ch_event.fd) {
mk_event_closesocket(config->ch_event.fd);
}
/* Pipe */
if (config->ch_data[0]) {
mk_event_closesocket(config->ch_data[0]);
mk_event_closesocket(config->ch_data[1]);
}
/* Channel manager */
if (config->ch_manager[0] > 0) {
mk_event_closesocket(config->ch_manager[0]);
if (config->ch_manager[0] != config->ch_manager[1]) {
mk_event_closesocket(config->ch_manager[1]);
}
}
/* Channel notifications */
if (config->ch_notif[0] > 0) {
mk_event_closesocket(config->ch_notif[0]);
if (config->ch_notif[0] != config->ch_notif[1]) {
mk_event_closesocket(config->ch_notif[1]);
}
}
/* Collectors */
mk_list_foreach_safe(head, tmp, &config->collectors) {
collector = mk_list_entry(head, struct flb_input_collector, _head);
if (collector->type == FLB_COLLECT_TIME) {
if (collector->fd_timer > 0) {
mk_event_timeout_destroy(config->evl, &collector->event);
mk_event_closesocket(collector->fd_timer);
}
} else {
mk_event_del(config->evl, &collector->event);
}
mk_list_del(&collector->_head);
flb_free(collector);
}
flb_env_destroy(config->env);
/* Program name */
if (config->program_name) {
flb_sds_destroy(config->program_name);
}
/* Conf path */
if (config->conf_path) {
flb_free(config->conf_path);
}
/* Destroy any DSO context */
flb_plugin_destroy(config->dso_plugins);
/* Workers */
flb_worker_exit(config);
/* Event flush */
if (config->evl) {
mk_event_del(config->evl, &config->event_flush);
}
mk_event_closesocket(config->flush_fd);
/* Release scheduler */
flb_sched_exit(config);
#ifdef FLB_HAVE_HTTP_SERVER
if (config->http_listen) {
flb_free(config->http_listen);
}
if (config->http_port) {
flb_free(config->http_port);
}
#endif
if (config->storage_path) {
flb_free(config->storage_path);
}
if (config->storage_sync) {
flb_free(config->storage_sync);
}
if (config->storage_bl_mem_limit) {
flb_free(config->storage_bl_mem_limit);
}
#ifdef FLB_HAVE_STREAM_PROCESSOR
if (config->stream_processor_file) {
flb_free(config->stream_processor_file);
}
flb_slist_destroy(&config->stream_processor_tasks);
#endif
if (config->evl) {
mk_event_loop_destroy(config->evl);
}
flb_plugins_unregister(config);
flb_free(config);
}
const char *flb_config_prop_get(const char *key, struct mk_list *list)
{
return flb_kv_get_key_value(key, list);
}
static inline int prop_key_check(const char *key, const char *kv, int k_len)
{
size_t len;
len = strnlen(key,256);
if (strncasecmp(key, kv, k_len) == 0 && len == k_len) {
return 0;
}
return -1;
}
static int set_log_level(struct flb_config *config, const char *v_str)
{
if (v_str != NULL) {
if (strcasecmp(v_str, "error") == 0) {
config->verbose = 1;
}
else if (strcasecmp(v_str, "warn") == 0 ||
strcasecmp(v_str, "warning") == 0) {
config->verbose = 2;
}
else if (strcasecmp(v_str, "info") == 0) {
config->verbose = 3;
}
else if (strcasecmp(v_str, "debug") == 0) {
config->verbose = 4;
}
else if (strcasecmp(v_str, "trace") == 0) {
config->verbose = 5;
}
else {
return -1;
}
}
else if (config->log) {
config->verbose = 3;
}
return 0;
}
int set_log_level_from_env(struct flb_config *config)
{
const char *val = NULL;
val = flb_env_get(config->env, FLB_CONF_ENV_LOGLEVEL);
if (val) {
return set_log_level(config, val);
}
return -1;
}
int flb_config_set_property(struct flb_config *config,
const char *k, const char *v)
{
int i=0;
int ret = -1;
int *i_val;
double *d_val;
char **s_val;
size_t len = strnlen(k, 256);
char *key = service_configs[0].key;
flb_sds_t tmp = NULL;
while (key != NULL) {
if (prop_key_check(key, k,len) == 0) {
if (!strncasecmp(key, FLB_CONF_STR_LOGLEVEL, 256)) {
#ifndef FLB_HAVE_STATIC_CONF
if (set_log_level_from_env(config) < 0) {
#endif
tmp = flb_env_var_translate(config->env, v);
if (tmp) {
ret = set_log_level(config, tmp);
flb_sds_destroy(tmp);
tmp = NULL;
}
else {
ret = set_log_level(config, v);
}
#ifndef FLB_HAVE_STATIC_CONF
}
#endif
}
else if (!strncasecmp(key, FLB_CONF_STR_PARSERS_FILE, 32)) {
#ifdef FLB_HAVE_PARSER
tmp = flb_env_var_translate(config->env, v);
ret = flb_parser_conf_file(tmp, config);
flb_sds_destroy(tmp);
tmp = NULL;
#endif
}
else if (!strncasecmp(key, FLB_CONF_STR_PLUGINS_FILE, 32)) {
tmp = flb_env_var_translate(config->env, v);
ret = flb_plugin_load_config_file(tmp, config);
flb_sds_destroy(tmp);
tmp = NULL;
}
else {
ret = 0;
tmp = flb_env_var_translate(config->env, v);
switch(service_configs[i].type) {
case FLB_CONF_TYPE_INT:
i_val = (int*)((char*)config + service_configs[i].offset);
*i_val = atoi(tmp);
flb_sds_destroy(tmp);
break;
case FLB_CONF_TYPE_DOUBLE:
d_val = (double*)((char*)config + service_configs[i].offset);
*d_val = atof(tmp);
flb_sds_destroy(tmp);
break;
case FLB_CONF_TYPE_BOOL:
i_val = (int*)((char*)config+service_configs[i].offset);
*i_val = flb_utils_bool(tmp);
flb_sds_destroy(tmp);
break;
case FLB_CONF_TYPE_STR:
s_val = (char**)((char*)config+service_configs[i].offset);
if ( *s_val != NULL ) {
flb_free(*s_val); /* release before overwriting */
}
*s_val = flb_strdup(tmp);
flb_sds_destroy(tmp);
break;
default:
ret = -1;
}
}
if (ret < 0) {
if (tmp) {
flb_sds_destroy(tmp);
}
return -1;
}
return 0;
}
key = service_configs[++i].key;
}
return 0;
}
int flb_config_set_program_name(struct flb_config *config, char *name)
{
config->program_name = flb_sds_create(name);
if (!config->program_name) {
return -1;
}
return 0;
}
| 1 | 13,316 | config map first value is wrong, it aims to define the "value type", that value will lead to an undefined behavior. | fluent-fluent-bit | c |
@@ -13,7 +13,7 @@
// limitations under the License.
// Package fileblob provides a blob implementation that uses the filesystem.
-// Use OpenBucket to construct a blob.Bucket.
+// Use OpenBucket to construct a *blob.Bucket.
//
// Blob keys are escaped before being used as filenames, and filenames are
// unescaped when they are passed back as blob keys during List. The escape | 1 | // Copyright 2018 The Go Cloud Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package fileblob provides a blob implementation that uses the filesystem.
// Use OpenBucket to construct a blob.Bucket.
//
// Blob keys are escaped before being used as filenames, and filenames are
// unescaped when they are passed back as blob keys during List. The escape
// algorithm is:
// - Alphanumeric characters (A-Z a-z 0-9) are not escaped.
// - Space (' '), dash ('-'), underscore ('_'), and period ('.') are not escaped.
// - Slash ('/') is always escaped to the OS-specific path separator character
// (os.PathSeparator).
// - All other characters are escaped similar to url.PathEscape:
// "%<hex UTF-8 byte>", with capital letters ABCDEF in the hex code.
//
// Filenames that can't be unescaped due to invalid escape sequences
// (e.g., "%%"), or whose unescaped key doesn't escape back to the filename
// (e.g., "~", which unescapes to "~", which escapes back to "%7E" != "~"),
// aren't visible using fileblob.
//
// Open URLs
//
// For blob.Open URLs, fileblob registers for the scheme "file"; URLs start
// with "file://".
//
// The URL's Path is used as the root directory; the URL's Host is ignored.
// If os.PathSeparator != "/", any leading "/" from the Path is dropped.
// No query options are supported. Examples:
// - file:///a/directory
// -> Passes "/a/directory" to OpenBucket.
// - file://localhost/a/directory
// -> Also passes "/a/directory".
// - file:///c:/foo/bar
// -> Passes "c:/foo/bar".
//
// As
//
// fileblob does not support any types for As.
package fileblob // import "gocloud.dev/blob/fileblob"
import (
"context"
"crypto/md5"
"errors"
"fmt"
"hash"
"io"
"io/ioutil"
"net/url"
"os"
"path/filepath"
"strings"
"gocloud.dev/blob"
"gocloud.dev/blob/driver"
)
const defaultPageSize = 1000
func init() {
blob.Register("file", func(_ context.Context, u *url.URL) (driver.Bucket, error) {
path := u.Path
if os.PathSeparator != '/' && strings.HasPrefix(path, "/") {
path = path[1:]
}
return openBucket(path, nil)
})
}
// Options sets options for constructing a *blob.Bucket backed by fileblob.
type Options struct{}
type bucket struct {
dir string
}
// openBucket creates a driver.Bucket that reads and writes to dir.
// dir must exist.
func openBucket(dir string, _ *Options) (driver.Bucket, error) {
dir = filepath.Clean(dir)
info, err := os.Stat(dir)
if err != nil {
return nil, err
}
if !info.IsDir() {
return nil, fmt.Errorf("%s is not a directory", dir)
}
return &bucket{dir}, nil
}
// OpenBucket creates a *blob.Bucket backed by the filesystem and rooted at
// dir, which must exist. See the package documentation for an example.
func OpenBucket(dir string, opts *Options) (*blob.Bucket, error) {
drv, err := openBucket(dir, opts)
if err != nil {
return nil, err
}
return blob.NewBucket(drv), nil
}
// shouldEscape returns true if c should be escaped.
func shouldEscape(c byte) bool {
switch {
case 'A' <= c && c <= 'Z':
return false
case 'a' <= c && c <= 'z':
return false
case '0' <= c && c <= '9':
return false
case c == ' ' || c == '-' || c == '_' || c == '.':
return false
case c == '/':
return false
}
return true
}
// escape returns s escaped per the rules described in the package docstring.
// The code is modified from https://golang.org/src/net/url/url.go.
func escape(s string) string {
hexCount := 0
replaceSlash := false
for i := 0; i < len(s); i++ {
c := s[i]
if shouldEscape(c) {
hexCount++
} else if c == '/' && os.PathSeparator != '/' {
replaceSlash = true
}
}
if hexCount == 0 && !replaceSlash {
return s
}
t := make([]byte, len(s)+2*hexCount)
j := 0
for i := 0; i < len(s); i++ {
switch c := s[i]; {
case c == '/':
t[j] = os.PathSeparator
j++
case shouldEscape(c):
t[j] = '%'
t[j+1] = "0123456789ABCDEF"[c>>4]
t[j+2] = "0123456789ABCDEF"[c&15]
j += 3
default:
t[j] = s[i]
j++
}
}
return string(t)
}
// ishex returns true if c is a valid part of a hexadecimal number.
func ishex(c byte) bool {
switch {
case '0' <= c && c <= '9':
return true
case 'a' <= c && c <= 'f':
return true
case 'A' <= c && c <= 'F':
return true
}
return false
}
// unhex returns the hexadecimal value of the hexadecimal character c.
// For example, unhex('A') returns 10.
func unhex(c byte) byte {
switch {
case '0' <= c && c <= '9':
return c - '0'
case 'a' <= c && c <= 'f':
return c - 'a' + 10
case 'A' <= c && c <= 'F':
return c - 'A' + 10
}
return 0
}
// unescape unescapes s per the rules described in the package docstring.
// It returns an error if s has invalid escape sequences, or if
// escape(unescape(s)) != s.
// The code is modified from https://golang.org/src/net/url/url.go.
func unescape(s string) (string, error) {
// Count %, check that they're well-formed.
n := 0
replacePathSeparator := false
for i := 0; i < len(s); {
switch s[i] {
case '%':
n++
if i+2 >= len(s) || !ishex(s[i+1]) || !ishex(s[i+2]) {
bad := s[i:]
if len(bad) > 3 {
bad = bad[:3]
}
return "", fmt.Errorf("couldn't unescape %q near %q", s, bad)
}
i += 3
case os.PathSeparator:
replacePathSeparator = os.PathSeparator != '/'
i++
default:
i++
}
}
unescaped := s
if n > 0 || replacePathSeparator {
t := make([]byte, len(s)-2*n)
j := 0
for i := 0; i < len(s); {
switch s[i] {
case '%':
t[j] = unhex(s[i+1])<<4 | unhex(s[i+2])
j++
i += 3
case os.PathSeparator:
t[j] = '/'
j++
i++
default:
t[j] = s[i]
j++
i++
}
}
unescaped = string(t)
}
escaped := escape(unescaped)
if escaped != s {
return "", fmt.Errorf("%q unescaped to %q but escaped back to %q instead of itself", s, unescaped, escaped)
}
return unescaped, nil
}
// IsNotExist implements driver.IsNotExist.
func (b *bucket) IsNotExist(err error) bool {
return os.IsNotExist(err)
}
var errNotImplemented = errors.New("not implemented")
// IsNotImplemented implements driver.IsNotImplemented.
func (b *bucket) IsNotImplemented(err error) bool {
return err == errNotImplemented
}
// path returns the full path for a key
func (b *bucket) path(key string) (string, error) {
path := filepath.Join(b.dir, escape(key))
if strings.HasSuffix(path, attrsExt) {
return "", errAttrsExt
}
return path, nil
}
// forKey returns the full path, os.FileInfo, and attributes for key.
func (b *bucket) forKey(key string) (string, os.FileInfo, *xattrs, error) {
path, err := b.path(key)
if err != nil {
return "", nil, nil, err
}
info, err := os.Stat(path)
if err != nil {
return "", nil, nil, err
}
xa, err := getAttrs(path)
if err != nil {
return "", nil, nil, err
}
return path, info, &xa, nil
}
// ListPaged implements driver.ListPaged.
func (b *bucket) ListPaged(ctx context.Context, opts *driver.ListOptions) (*driver.ListPage, error) {
var pageToken string
if len(opts.PageToken) > 0 {
pageToken = string(opts.PageToken)
}
pageSize := opts.PageSize
if pageSize == 0 {
pageSize = defaultPageSize
}
// If opts.Delimiter != "", lastPrefix contains the last "directory" key we
// added. It is used to avoid adding it again; all files in this "directory"
// are collapsed to the single directory entry.
var lastPrefix string
// Do a full recursive scan of the root directory.
var result driver.ListPage
err := filepath.Walk(b.dir, func(path string, info os.FileInfo, err error) error {
if err != nil {
// Couldn't read this file/directory for some reason; just skip it.
return nil
}
// Skip the self-generated attribute files.
if strings.HasSuffix(path, attrsExt) {
return nil
}
// os.Walk returns the root directory; skip it.
if path == b.dir {
return nil
}
// Strip the <b.dir> prefix from path; +1 is to include the separator.
path = path[len(b.dir)+1:]
// Unescape the path to get the key; if this fails, skip.
key, err := unescape(path)
if err != nil {
return nil
}
// Skip all directories. If opts.Delimiter is set, we'll create
// pseudo-directories later.
// Note that returning nil means that we'll still recurse into it;
// we're just not adding a result for the directory itself.
if info.IsDir() {
key += "/"
// Avoid recursing into subdirectories if the directory name already
// doesn't match the prefix; any files in it are guaranteed not to match.
if len(key) > len(opts.Prefix) && !strings.HasPrefix(key, opts.Prefix) {
return filepath.SkipDir
}
// Similarly, avoid recursing into subdirectories if we're making
// "directories" and all of the files in this subdirectory are guaranteed
// to collapse to a "directory" that we've already added.
if lastPrefix != "" && strings.HasPrefix(key, lastPrefix) {
return filepath.SkipDir
}
return nil
}
// Skip files/directories that don't match the Prefix.
if !strings.HasPrefix(key, opts.Prefix) {
return nil
}
var md5 []byte
if xa, err := getAttrs(path); err == nil {
// Note: we only have the MD5 hash for blobs that we wrote.
// For other blobs, md5 will remain nil.
md5 = xa.MD5
}
obj := &driver.ListObject{
Key: key,
ModTime: info.ModTime(),
Size: info.Size(),
MD5: md5,
}
// If using Delimiter, collapse "directories".
if opts.Delimiter != "" {
// Strip the prefix, which may contain Delimiter.
keyWithoutPrefix := key[len(opts.Prefix):]
// See if the key still contains Delimiter.
// If no, it's a file and we just include it.
// If yes, it's a file in a "sub-directory" and we want to collapse
// all files in that "sub-directory" into a single "directory" result.
if idx := strings.Index(keyWithoutPrefix, opts.Delimiter); idx != -1 {
prefix := opts.Prefix + keyWithoutPrefix[0:idx+len(opts.Delimiter)]
// We've already included this "directory"; don't add it.
if prefix == lastPrefix {
return nil
}
// Update the object to be a "directory".
obj = &driver.ListObject{
Key: prefix,
IsDir: true,
}
lastPrefix = prefix
}
}
// If there's a pageToken, skip anything before it.
if pageToken != "" && obj.Key <= pageToken {
return nil
}
// If we've already got a full page of results, set NextPageToken and stop.
if len(result.Objects) == pageSize {
result.NextPageToken = []byte(result.Objects[pageSize-1].Key)
return io.EOF
}
result.Objects = append(result.Objects, obj)
return nil
})
if err != nil && err != io.EOF {
return nil, err
}
return &result, nil
}
// As implements driver.As.
func (b *bucket) As(i interface{}) bool { return false }
// As implements driver.ErrorAs.
func (b *bucket) ErrorAs(err error, i interface{}) bool { return false }
// Attributes implements driver.Attributes.
func (b *bucket) Attributes(ctx context.Context, key string) (driver.Attributes, error) {
_, info, xa, err := b.forKey(key)
if err != nil {
return driver.Attributes{}, err
}
return driver.Attributes{
CacheControl: xa.CacheControl,
ContentDisposition: xa.ContentDisposition,
ContentEncoding: xa.ContentEncoding,
ContentLanguage: xa.ContentLanguage,
ContentType: xa.ContentType,
Metadata: xa.Metadata,
ModTime: info.ModTime(),
Size: info.Size(),
MD5: xa.MD5,
}, nil
}
// NewRangeReader implements driver.NewRangeReader.
func (b *bucket) NewRangeReader(ctx context.Context, key string, offset, length int64, opts *driver.ReaderOptions) (driver.Reader, error) {
path, info, xa, err := b.forKey(key)
if err != nil {
return nil, err
}
f, err := os.Open(path)
if err != nil {
return nil, err
}
if offset > 0 {
if _, err := f.Seek(offset, io.SeekStart); err != nil {
return nil, err
}
}
r := io.Reader(f)
if length >= 0 {
r = io.LimitReader(r, length)
}
return &reader{
r: r,
c: f,
attrs: driver.ReaderAttributes{
ContentType: xa.ContentType,
ModTime: info.ModTime(),
Size: info.Size(),
},
}, nil
}
type reader struct {
r io.Reader
c io.Closer
attrs driver.ReaderAttributes
}
func (r *reader) Read(p []byte) (int, error) {
if r.r == nil {
return 0, io.EOF
}
return r.r.Read(p)
}
func (r *reader) Close() error {
if r.c == nil {
return nil
}
return r.c.Close()
}
func (r *reader) Attributes() driver.ReaderAttributes {
return r.attrs
}
func (r *reader) As(i interface{}) bool { return false }
// NewTypedWriter implements driver.NewTypedWriter.
func (b *bucket) NewTypedWriter(ctx context.Context, key string, contentType string, opts *driver.WriterOptions) (driver.Writer, error) {
path, err := b.path(key)
if err != nil {
return nil, err
}
if err := os.MkdirAll(filepath.Dir(path), 0777); err != nil {
return nil, err
}
f, err := ioutil.TempFile("", "fileblob")
if err != nil {
return nil, err
}
if opts.BeforeWrite != nil {
if err := opts.BeforeWrite(func(interface{}) bool { return false }); err != nil {
return nil, err
}
}
var metadata map[string]string
if len(opts.Metadata) > 0 {
metadata = opts.Metadata
}
attrs := xattrs{
CacheControl: opts.CacheControl,
ContentDisposition: opts.ContentDisposition,
ContentEncoding: opts.ContentEncoding,
ContentLanguage: opts.ContentLanguage,
ContentType: contentType,
Metadata: metadata,
}
w := &writer{
ctx: ctx,
f: f,
path: path,
attrs: attrs,
contentMD5: opts.ContentMD5,
md5hash: md5.New(),
}
return w, nil
}
type writer struct {
ctx context.Context
f *os.File
path string
attrs xattrs
contentMD5 []byte
// We compute the MD5 hash so that we can store it with the file attributes,
// not for verification.
md5hash hash.Hash
}
func (w *writer) Write(p []byte) (n int, err error) {
if _, err := w.md5hash.Write(p); err != nil {
return 0, err
}
return w.f.Write(p)
}
func (w *writer) Close() error {
err := w.f.Close()
if err != nil {
return err
}
// Always delete the temp file. On success, it will have been renamed so
// the Remove will fail.
defer func() {
_ = os.Remove(w.f.Name())
}()
// Check if the write was cancelled.
if err := w.ctx.Err(); err != nil {
return err
}
md5sum := w.md5hash.Sum(nil)
w.attrs.MD5 = md5sum
// Write the attributes file.
if err := setAttrs(w.path, w.attrs); err != nil {
return err
}
// Rename the temp file to path.
if err := os.Rename(w.f.Name(), w.path); err != nil {
_ = os.Remove(w.path + attrsExt)
return err
}
return nil
}
// Delete implements driver.Delete.
func (b *bucket) Delete(ctx context.Context, key string) error {
path, err := b.path(key)
if err != nil {
return err
}
err = os.Remove(path)
if err != nil {
return err
}
if err = os.Remove(path + attrsExt); err != nil && !os.IsNotExist(err) {
return err
}
return nil
}
func (b *bucket) SignedURL(ctx context.Context, key string, opts *driver.SignedURLOptions) (string, error) {
// TODO(Issue #546): Implemented SignedURL for fileblob.
return "", errNotImplemented
}
| 1 | 13,638 | This doesn't make sense. The thing being constructed is a blob.Bucket. "Constructing" the pointer is trivial. | google-go-cloud | go |
@@ -2,7 +2,9 @@
# For details: https://github.com/PyCQA/pylint/blob/main/LICENSE
import functools
+import optparse # pylint: disable=deprecated-module
+from pylint.lint import PyLinter
from pylint.testutils.checker_test_case import CheckerTestCase
| 1 | # Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/main/LICENSE
import functools
from pylint.testutils.checker_test_case import CheckerTestCase
def set_config(**kwargs):
"""Decorator for setting config values on a checker.
Passing the args and kwargs back to the test function itself
allows this decorator to be used on parametrized test cases.
"""
def _wrapper(fun):
@functools.wraps(fun)
def _forward(self, *args, **test_function_kwargs):
for key, value in kwargs.items():
setattr(self.checker.config, key, value)
if isinstance(self, CheckerTestCase):
# reopen checker in case, it may be interested in configuration change
self.checker.open()
fun(self, *args, **test_function_kwargs)
return _forward
return _wrapper
| 1 | 16,274 | Outch. A configuration/parsing refactor is on the radar but this seems more pressing. | PyCQA-pylint | py |
@@ -18,7 +18,9 @@ export default Component.extend({
stats: null,
chartData: null,
chartOptions: null,
- startDateLabel: '',
+ startDateLabel: computed('membersStats.days', function () {
+ return moment(new Date()).add(-this.membersStats.days + 1, 'days').format(DATE_FORMAT);
+ }),
selectedRange: computed('membersStats.days', function () {
const availableRanges = this.availableRanges; | 1 | /* global Chart */
import Component from '@ember/component';
import moment from 'moment';
import {action} from '@ember/object';
import {computed, get} from '@ember/object';
import {inject as service} from '@ember/service';
import {task} from 'ember-concurrency';
const DATE_FORMAT = 'D MMM YYYY';
export default Component.extend({
ajax: service(),
membersStats: service(),
// public attrs
nightShift: false,
stats: null,
chartData: null,
chartOptions: null,
startDateLabel: '',
selectedRange: computed('membersStats.days', function () {
const availableRanges = this.availableRanges;
return availableRanges.findBy('days', this.membersStats.days);
}),
availableRanges: computed(function () {
return [{
name: '30 days',
days: '30'
}, {
name: '90 days',
days: '90'
}, {
name: '365 days',
days: '365'
}, {
name: 'All time',
days: 'all-time'
}];
}),
// Lifecycle ---------------------------------------------------------------
init() {
this._super(...arguments);
this.setChartJSDefaults();
},
didReceiveAttrs() {
if (this._lastNightShift !== undefined && this.nightShift !== this._lastNightShift) {
this.setChartOptions();
}
this._lastNightShift = this.nightShift;
},
// Actions -----------------------------------------------------------------
changeDateRange: action(function (range) {
this.membersStats.days = get(range, 'days');
this.fetchStatsTask.perform();
}),
// Tasks -------------------------------------------------------------------
fetchStatsTask: task(function* () {
this.set('stats', null);
let stats = yield this.membersStats.fetch();
if (stats) {
this.set('stats', stats);
this.setChartOptions({
rangeInDays: Object.keys(stats.total_on_date).length
});
this.setChartData({
dateLabels: Object.keys(stats.total_on_date),
dateValues: Object.values(stats.total_on_date)
});
}
}),
// Internal ----------------------------------------------------------------
setChartData({dateLabels, dateValues}) {
this.set('chartData', {
labels: dateLabels,
datasets: [{
label: 'Total members',
cubicInterpolationMode: 'monotone',
data: dateValues,
fill: false,
backgroundColor: 'rgba(62,176,239,.9)',
pointRadius: 0,
pointHitRadius: 10,
borderColor: 'rgba(62,176,239,.9)',
borderJoinStyle: 'round'
}]
});
},
setChartOptions({rangeInDays}) {
let maxTicksAllowed = this.getTicksForRange(rangeInDays);
this.setChartJSDefaults();
this.set('chartOptions', {
responsive: true,
maintainAspectRatio: false,
layout: {
padding: {
top: 5, // Needed otherwise the top dot is cut
right: 10,
bottom: 5,
left: 10
}
},
title: {
display: false
},
tooltips: {
intersect: false,
mode: 'index',
displayColors: false,
backgroundColor: '#343f44',
xPadding: 7,
yPadding: 7,
cornerRadius: 5,
caretSize: 7,
caretPadding: 5,
bodyFontSize: 13,
titleFontStyle: 'normal',
titleFontColor: 'rgba(255, 255, 255, 0.7)',
titleMarginBottom: 4,
callbacks: {
label: function (tooltipItems, data) {
return data.datasets[0].label + `: ` + data.datasets[0].data[tooltipItems.index].toString().replace(/\B(?=(\d{3})+(?!\d))/g, ',');
},
title: function (tooltipItems) {
return moment(tooltipItems[0].xLabel).format(DATE_FORMAT);
}
}
},
hover: {
mode: 'index',
intersect: false,
animationDuration: 120
},
legend: {
display: false
},
scales: {
xAxes: [{
labelString: 'Date',
gridLines: {
drawTicks: false,
color: (this.nightShift ? '#333F44' : '#E5EFF5'),
zeroLineColor: (this.nightShift ? '#333F44' : '#E5EFF5')
},
ticks: {
display: false,
maxRotation: 0,
minRotation: 0,
padding: 6,
autoSkip: false,
maxTicksLimit: 10,
callback: function (value, index, values) {
let step = (values.length - 1) / (maxTicksAllowed);
let steps = [];
for (let i = 0; i < maxTicksAllowed; i++) {
steps.push(Math.round(i * step));
}
if (index === 0) {
return value;
}
if (index === (values.length - 1)) {
return 'Today';
}
if (steps.includes(index)) {
return '';
}
}
}
}],
yAxes: [{
gridLines: {
drawTicks: false,
display: false,
drawBorder: false
},
ticks: {
maxTicksLimit: 5,
fontColor: '#9baeb8',
padding: 8,
precision: 0,
callback: function (value) {
return value.toString().replace(/\B(?=(\d{3})+(?!\d))/g, ',');
}
}
}]
}
});
},
getTicksForRange(rangeInDays) {
if (rangeInDays <= 30) {
return 6;
} else if (rangeInDays <= 90) {
return 18;
} else {
return 24;
}
},
setChartJSDefaults() {
Chart.defaults.LineWithLine = Chart.defaults.line;
Chart.controllers.LineWithLine = Chart.controllers.line.extend({
draw: function (ease) {
Chart.controllers.line.prototype.draw.call(this, ease);
if (this.chart.tooltip._active && this.chart.tooltip._active.length) {
let activePoint = this.chart.tooltip._active[0];
let ctx = this.chart.ctx;
let x = activePoint.tooltipPosition().x;
let topY = this.chart.scales['y-axis-0'].top;
let bottomY = this.chart.scales['y-axis-0'].bottom;
// draw line
ctx.save();
ctx.beginPath();
ctx.moveTo(x, topY);
ctx.lineTo(x, bottomY);
ctx.lineWidth = 1;
ctx.strokeStyle = (this.nightShift ? 'rgba(62, 176, 239, 0.65)' : 'rgba(62, 176, 239, 0.8)');
ctx.stroke();
ctx.restore();
}
}
});
}
});
| 1 | 9,449 | Unfortunately this won't work when the "All time" range is chosen because `memberStats.days` will be `"all-time"` and not a number. What you could do instead is to use `memberStats.stats` as the dependent key and use `Object.keys(memberStats.stats.total_on_date)[0]` to grab the first date in the range and build the label from there. | TryGhost-Admin | js |
@@ -1,10 +1,11 @@
require "rails_helper"
-feature "subscriber views video trail" do
+feature "user views video trail" do
scenario "and marks a video as complete" do
trail = create_video_trail
- sign_in_as_user_with_subscription
+ sign_in
+ click_on I18n.t("pages.landing.hero_call_to_action")
click_on trail.name
within trail_steps_list do | 1 | require "rails_helper"
feature "subscriber views video trail" do
scenario "and marks a video as complete" do
trail = create_video_trail
sign_in_as_user_with_subscription
click_on trail.name
within trail_steps_list do
expect(first_video).to have_status("next-up")
expect(second_video).to have_status("unstarted")
end
click_on first_video_name(trail)
click_mark_as_complete_button
within trail_steps_list do
expect(first_video).to have_status("complete")
expect(second_video).to have_status("next-up")
end
end
scenario "and can seek to a specific point in the video", js: true do
video = create(:video, wistia_id: "hello", notes: "# Hello\n\n## Topic")
marker = create(:marker, video: video, anchor: "topic")
visit video_path(video, as: create(:subscriber))
within "#topic" do
click_jump_to_topic_in_video_button
end
expect(current_url).to include("#" + marker.anchor)
end
def first_video
find(".exercise:first-of-type")
end
def second_video
find(".exercise:nth-of-type(2)")
end
def first_video_name(trail)
trail.videos.first.name
end
def click_mark_as_complete_button
click_on I18n.t("videos.show.mark-as-complete")
end
def trail_steps_list
".exercises-container"
end
def click_jump_to_topic_in_video_button
click_on I18n.t("videos.seek_buttons.jump-to-topic-in-video")
end
def create_video_trail
create(:trail, :published, :with_topic, name: "Video Trail").tap do |trail|
2.times do
video = create(:video, watchable: trail)
create(:step, trail: trail, completeable: video)
end
end
end
matcher :have_status do |status|
match do |node|
node[:class].include? "#{status}-exercise"
end
end
end
| 1 | 18,764 | Style/StringLiterals: Prefer double-quoted strings unless you need single quotes to avoid extra backslashes for escaping. | thoughtbot-upcase | rb |
@@ -206,7 +206,6 @@ describe('Configure Options', function() {
iframe.src = '/test/mock/frames/context.html';
iframe.onload = function() {
axe.configure(config);
- iframe.contentWindow.axe.configure(config);
axe.run(
'#target', | 1 | describe('Configure Options', function() {
'use strict';
var target = document.querySelector('#target');
afterEach(function() {
axe.reset();
target.innerHTML = '';
});
describe('Check', function() {
describe('aria-allowed-attr', function() {
it('should allow an attribute supplied in options', function(done) {
target.setAttribute('role', 'separator');
target.setAttribute('aria-valuenow', '0');
axe.configure({
checks: [
{
id: 'aria-allowed-attr',
options: { separator: ['aria-valuenow'] }
}
]
});
axe.run(
target,
{
runOnly: {
type: 'rule',
values: ['aria-allowed-attr']
}
},
function(error, results) {
assert.lengthOf(results.violations, 0, 'violations');
done();
}
);
});
it('should not normalize external check options', function(done) {
target.setAttribute('lang', 'en');
axe.configure({
checks: [
{
id: 'dylang',
options: ['dylan'],
evaluate:
'function (node, options) {\n var lang = (node.getAttribute("lang") || "").trim().toLowerCase();\n var xmlLang = (node.getAttribute("xml:lang") || "").trim().toLowerCase();\n var invalid = [];\n (options || []).forEach(function(cc) {\n cc = cc.toLowerCase();\n if (lang && (lang === cc || lang.indexOf(cc.toLowerCase() + "-") === 0)) {\n lang = null;\n }\n if (xmlLang && (xmlLang === cc || xmlLang.indexOf(cc.toLowerCase() + "-") === 0)) {\n xmlLang = null;\n }\n });\n if (xmlLang) {\n invalid.push(\'xml:lang="\' + xmlLang + \'"\');\n }\n if (lang) {\n invalid.push(\'lang="\' + lang + \'"\');\n }\n if (invalid.length) {\n this.data(invalid);\n return true;\n }\n return false;\n }',
messages: {
pass: 'Good language',
fail: 'You mst use the DYLAN language'
}
}
],
rules: [
{
id: 'dylang',
metadata: {
description:
"Ensures lang attributes have the value of 'dylan'",
help: "lang attribute must have the value of 'dylan'"
},
selector: '#target',
any: [],
all: [],
none: ['dylang'],
tags: ['wcag2aa']
}
],
data: {
rules: {
dylang: {
description:
"Ensures lang attributes have the value of 'dylan'",
help: "lang attribute must have the value of 'dylan'"
}
}
}
});
axe.run(
'#target',
{
runOnly: {
type: 'rule',
values: ['dylang']
}
},
function(err, results) {
try {
assert.isNull(err);
assert.lengthOf(results.violations, 1, 'violations');
done();
} catch (e) {
done(e);
}
}
);
});
});
describe('aria-required-attr', function() {
it('should report unique attributes when supplied from options', function(done) {
target.setAttribute('role', 'slider');
axe.configure({
checks: [
{
id: 'aria-required-attr',
options: { slider: ['aria-snuggles'] }
}
]
});
axe.run(
'#target',
{
runOnly: {
type: 'rule',
values: ['aria-required-attr']
}
},
function(error, results) {
assert.lengthOf(results.violations, 1, 'violations');
assert.sameMembers(results.violations[0].nodes[0].any[0].data, [
'aria-snuggles'
]);
done();
}
);
});
});
});
describe('disableOtherRules', function() {
it('disables rules that are not in the `rules` array', function(done) {
axe.configure({
disableOtherRules: true,
rules: [
{
id: 'html-has-lang',
enabled: true
},
{
id: 'html-lang-valid',
enabled: false
}
]
});
axe.run(function(error, results) {
assert.isNull(error);
assert.lengthOf(results.passes, 1, 'passes');
assert.equal(results.passes[0].id, 'html-has-lang');
assert.lengthOf(results.violations, 0, 'violations');
assert.lengthOf(results.incomplete, 0, 'incomplete');
assert.lengthOf(results.inapplicable, 0, 'inapplicable');
done();
});
});
});
describe('noHtml', function() {
var captureError = axe.testUtils.captureError;
it('prevents html property on nodes', function(done) {
target.setAttribute('role', 'slider');
axe.configure({
noHtml: true,
checks: [
{
id: 'aria-required-attr',
options: { slider: ['aria-snuggles'] }
}
]
});
axe.run(
'#target',
{
runOnly: {
type: 'rule',
values: ['aria-required-attr']
}
},
captureError(function(error, results) {
assert.isNull(error);
assert.isNull(results.violations[0].nodes[0].html);
done();
}, done)
);
});
it('prevents html property on nodes from iframes', function(done) {
var config = {
noHtml: true,
rules: [
{
id: 'div#target',
// purposefully don't match so the first result is from
// the iframe
selector: 'foo'
}
]
};
var iframe = document.createElement('iframe');
iframe.src = '/test/mock/frames/context.html';
iframe.onload = function() {
axe.configure(config);
iframe.contentWindow.axe.configure(config);
axe.run(
'#target',
{
runOnly: {
type: 'rule',
values: ['div#target']
}
},
captureError(function(error, results) {
assert.isNull(error);
assert.deepEqual(results.passes[0].nodes[0].target, [
'iframe',
'#target'
]);
assert.isNull(results.passes[0].nodes[0].html);
done();
}, done)
);
};
target.appendChild(iframe);
});
it('prevents html property in postMesage', function(done) {
var config = {
noHtml: true,
rules: [
{
id: 'div#target',
// purposefully don't match so the first result is from
// the iframe
selector: 'foo'
}
]
};
var iframe = document.createElement('iframe');
iframe.src = '/test/mock/frames/noHtml-config.html';
iframe.onload = function() {
axe.configure(config);
iframe.contentWindow.axe.configure(config);
axe.run('#target', {
runOnly: {
type: 'rule',
values: ['div#target']
}
});
};
target.appendChild(iframe);
window.addEventListener('message', function(e) {
var data = JSON.parse(e.data);
if (Array.isArray(data.payload)) {
try {
assert.isNull(data.payload[0].nodes[0].node.source);
done();
} catch (e) {
done(e);
}
}
});
});
});
});
| 1 | 17,220 | We need this left in as `axe.configure` is for the top-level page `axe` and we still need to configure the axe script inside the iframe as well (so both have to be configured), | dequelabs-axe-core | js |
@@ -26,6 +26,11 @@ namespace AutoRest.Swagger.Validation
/// </summary>
public override ValidationCategory ValidationCategory => ValidationCategory.RPCViolation;
+ /// <summary>
+ /// What kind of change implementing this rule can cause.
+ /// </summary>
+ public override ValidationChangesImpact ValidationChangesImpact => ValidationChangesImpact.ServiceImpactingChanges;
+
public override IEnumerable<ValidationMessage> GetValidationMessages(Dictionary<string, Dictionary<string, Operation>> entity, RuleContext context)
{
// get all operation objects that are either of get or post type | 1 | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
using System.Collections.Generic;
using System.Text.RegularExpressions;
using AutoRest.Core.Logging;
using AutoRest.Core.Properties;
using AutoRest.Swagger.Validation.Core;
using AutoRest.Swagger.Model;
using AutoRest.Swagger.Model.Utilities;
using System.Linq;
namespace AutoRest.Swagger.Validation
{
public class CollectionObjectPropertiesNamingValidation : TypedRule<Dictionary<string, Dictionary<string, Operation>>>
{
private readonly Regex ListRegex = new Regex(@".+_List([^_]*)$", RegexOptions.IgnoreCase);
/// <summary>
/// Id of the Rule.
/// </summary>
public override string Id => "M3008";
/// <summary>
/// Violation category of the Rule.
/// </summary>
public override ValidationCategory ValidationCategory => ValidationCategory.RPCViolation;
public override IEnumerable<ValidationMessage> GetValidationMessages(Dictionary<string, Dictionary<string, Operation>> entity, RuleContext context)
{
// get all operation objects that are either of get or post type
ServiceDefinition serviceDefinition = context.Root;
var listOperations = entity.Values.SelectMany(opDict => opDict.Where(pair => pair.Key.ToLower().Equals("get") || pair.Key.ToLower().Equals("post")));
foreach (var opPair in listOperations)
{
// if the operation id is not of type _list* or does not return an array type, skip
if (!ListRegex.IsMatch(opPair.Value.OperationId) || !ValidationUtilities.IsXmsPageableResponseOperation(opPair.Value))
{
continue;
}
string collType = opPair.Value.Responses.GetValueOrNull("200")?.Schema?.Reference?.StripDefinitionPath();
// if no response type defined skip
if (collType == null)
{
continue;
}
var collTypeDef = serviceDefinition.Definitions[collType];
// if collection object has 2 properties or less (x-ms-pageable objects can have the nextlink prop)
// and the object does not have a property named "value", show the warning
if ((collTypeDef.Properties?.Count <= 2) && collTypeDef.Properties.All(prop => !(prop.Key.ToLower().Equals("value") && prop.Value.Type == DataType.Array)))
{
yield return new ValidationMessage(new FileObjectPath(context.File, context.Path), this, collType, opPair.Value.OperationId);
}
}
}
/// <summary>
/// The template message for this Rule.
/// </summary>
/// <remarks>
/// This may contain placeholders '{0}' for parameterized messages.
/// </remarks>
public override string MessageTemplate => Resources.CollectionObjectPropertiesNamingMessage;
/// <summary>
/// The severity of this message (ie, debug/info/warning/error/fatal, etc)
/// </summary>
public override Category Severity => Category.Error;
}
} | 1 | 24,908 | is this rule serviceimpacting? I'm thinking it's mostly around the modeling of pageable in the swagger, thoughts? | Azure-autorest | java |
@@ -141,7 +141,7 @@ func (ts *TriggerStatus) MarkDependencyNotConfigured() {
"DependencyNotConfigured", "Dependency has not yet been reconciled.")
}
-func (ts *TriggerStatus) PropagateDependencyStatus(ks *duckv1.KResource) {
+func (ts *TriggerStatus) PropagateDependencyStatus(ks *duckv1.Source) {
kc := ks.Status.GetCondition(apis.ConditionReady)
if kc == nil {
ts.MarkDependencyNotConfigured() | 1 | /*
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
corev1 "k8s.io/api/core/v1"
eventingv1beta1 "knative.dev/eventing/pkg/apis/eventing/v1beta1"
"knative.dev/pkg/apis"
duckv1 "knative.dev/pkg/apis/duck/v1"
)
var triggerCondSet = apis.NewLivingConditionSet(
eventingv1beta1.TriggerConditionBroker,
eventingv1beta1.TriggerConditionDependency,
eventingv1beta1.TriggerConditionSubscriberResolved,
TriggerConditionTopic,
TriggerConditionSubscription,
)
const (
TriggerConditionTopic apis.ConditionType = "TopicReady"
TriggerConditionSubscription apis.ConditionType = "SubscriptionReady"
)
// GetCondition returns the condition currently associated with the given type, or nil.
func (ts *TriggerStatus) GetCondition(t apis.ConditionType) *apis.Condition {
return triggerCondSet.Manage(ts).GetCondition(t)
}
// GetTopLevelCondition returns the top level Condition.
func (ts *TriggerStatus) GetTopLevelCondition() *apis.Condition {
return triggerCondSet.Manage(ts).GetTopLevelCondition()
}
// IsReady returns true if the resource is ready overall.
func (ts *TriggerStatus) IsReady() bool {
return triggerCondSet.Manage(ts).IsHappy()
}
// InitializeConditions sets relevant unset conditions to Unknown state.
func (ts *TriggerStatus) InitializeConditions() {
triggerCondSet.Manage(ts).InitializeConditions()
}
func (ts *TriggerStatus) PropagateBrokerStatus(bs *BrokerStatus) {
bc := bs.GetTopLevelCondition()
if bc == nil {
ts.MarkBrokerNotConfigured()
return
}
switch {
case bc.Status == corev1.ConditionUnknown:
ts.MarkBrokerUnknown("Broker/"+bc.Reason, bc.Message)
case bc.Status == corev1.ConditionTrue:
triggerCondSet.Manage(ts).MarkTrue(eventingv1beta1.TriggerConditionBroker)
case bc.Status == corev1.ConditionFalse:
ts.MarkBrokerFailed("Broker/"+bc.Reason, bc.Message)
default:
ts.MarkBrokerUnknown("BrokerUnknown", "The status of Broker is invalid: %v", bc.Status)
}
}
func (ts *TriggerStatus) MarkBrokerFailed(reason, messageFormat string, messageA ...interface{}) {
triggerCondSet.Manage(ts).MarkFalse(eventingv1beta1.TriggerConditionBroker, reason, messageFormat, messageA...)
}
func (ts *TriggerStatus) MarkBrokerUnknown(reason, messageFormat string, messageA ...interface{}) {
triggerCondSet.Manage(ts).MarkUnknown(eventingv1beta1.TriggerConditionBroker, reason, messageFormat, messageA...)
}
func (ts *TriggerStatus) MarkBrokerNotConfigured() {
triggerCondSet.Manage(ts).MarkUnknown(eventingv1beta1.TriggerConditionBroker,
"BrokerNotConfigured", "Broker has not yet been reconciled.")
}
func (bs *TriggerStatus) MarkTopicFailed(reason, format string, args ...interface{}) {
triggerCondSet.Manage(bs).MarkFalse(TriggerConditionTopic, reason, format, args...)
}
func (bs *TriggerStatus) MarkTopicUnknown(reason, format string, args ...interface{}) {
triggerCondSet.Manage(bs).MarkUnknown(TriggerConditionTopic, reason, format, args...)
}
func (bs *TriggerStatus) MarkTopicReady() {
triggerCondSet.Manage(bs).MarkTrue(TriggerConditionTopic)
}
func (bs *TriggerStatus) MarkSubscriptionFailed(reason, format string, args ...interface{}) {
triggerCondSet.Manage(bs).MarkFalse(TriggerConditionSubscription, reason, format, args...)
}
func (bs *TriggerStatus) MarkSubscriptionUnknown(reason, format string, args ...interface{}) {
triggerCondSet.Manage(bs).MarkUnknown(TriggerConditionSubscription, reason, format, args...)
}
func (bs *TriggerStatus) MarkSubscriptionReady() {
triggerCondSet.Manage(bs).MarkTrue(TriggerConditionSubscription)
}
func (ts *TriggerStatus) MarkSubscriberResolvedSucceeded() {
triggerCondSet.Manage(ts).MarkTrue(eventingv1beta1.TriggerConditionSubscriberResolved)
}
func (ts *TriggerStatus) MarkSubscriberResolvedFailed(reason, messageFormat string, messageA ...interface{}) {
triggerCondSet.Manage(ts).MarkFalse(eventingv1beta1.TriggerConditionSubscriberResolved, reason, messageFormat, messageA...)
}
func (ts *TriggerStatus) MarkSubscriberResolvedUnknown(reason, messageFormat string, messageA ...interface{}) {
triggerCondSet.Manage(ts).MarkUnknown(eventingv1beta1.TriggerConditionSubscriberResolved, reason, messageFormat, messageA...)
}
func (ts *TriggerStatus) MarkDependencySucceeded() {
triggerCondSet.Manage(ts).MarkTrue(eventingv1beta1.TriggerConditionDependency)
}
func (ts *TriggerStatus) MarkDependencyFailed(reason, messageFormat string, messageA ...interface{}) {
triggerCondSet.Manage(ts).MarkFalse(eventingv1beta1.TriggerConditionDependency, reason, messageFormat, messageA...)
}
func (ts *TriggerStatus) MarkDependencyUnknown(reason, messageFormat string, messageA ...interface{}) {
triggerCondSet.Manage(ts).MarkUnknown(eventingv1beta1.TriggerConditionDependency, reason, messageFormat, messageA...)
}
func (ts *TriggerStatus) MarkDependencyNotConfigured() {
triggerCondSet.Manage(ts).MarkUnknown(eventingv1beta1.TriggerConditionDependency,
"DependencyNotConfigured", "Dependency has not yet been reconciled.")
}
func (ts *TriggerStatus) PropagateDependencyStatus(ks *duckv1.KResource) {
kc := ks.Status.GetCondition(apis.ConditionReady)
if kc == nil {
ts.MarkDependencyNotConfigured()
return
}
switch {
case kc.Status == corev1.ConditionUnknown:
ts.MarkDependencyUnknown(kc.Reason, kc.Message)
case kc.Status == corev1.ConditionTrue:
ts.MarkDependencySucceeded()
case kc.Status == corev1.ConditionFalse:
ts.MarkDependencyFailed(kc.Reason, kc.Message)
default:
ts.MarkDependencyUnknown("DependencyUnknown", "The status of Dependency is invalid: %v", kc.Status)
}
}
| 1 | 18,742 | same here, replace `ks` and `kc` to something more related to Source. | google-knative-gcp | go |
@@ -0,0 +1,19 @@
+using System;
+using System.Collections.Generic;
+using System.Globalization;
+
+namespace MvvmCross.Platform.Converters
+{
+ public class MvxDictionaryValueConverter<TKey, TValue> : MvxValueConverter<TKey, TValue>
+ {
+ protected override TValue Convert(TKey value, Type targetType, object parameter, CultureInfo culture)
+ {
+ if (parameter is Dictionary<TKey, TValue> dict)
+ {
+ TValue x = dict[value];
+ return x;
+ }
+ throw new ArgumentException($"Could not cast {parameter.GetType().Name} to {typeof(Dictionary<TKey, TValue>).Name}");
+ }
+ }
+} | 1 | 1 | 13,518 | We should probably ask here if `dict` contains a key with the appropiate value, and leave a trace in case it doesn't, to make it easier to debug for developers. | MvvmCross-MvvmCross | .cs |
|
@@ -147,6 +147,11 @@ module Beaker
}
} }
let(:subcommand_file) {@subcommand_file || {:level => 'fifth'}}
+ let(:homedir_file) {@homedir_file || {:level => 'sixth',
+ :ssh => {
+ :auth_methods => 'auth_home_123'
+ }
+ }}
let(:presets) { {:level => 'lowest',
:ssh => {
:config => 'config123', | 1 | require "spec_helper"
module Beaker
module Options
describe Parser do
let(:parser) { Parser.new }
let(:opts_path) { File.join(File.expand_path(File.dirname(__FILE__)), "data", "opts.txt") }
let(:hosts_path) { File.join(File.expand_path(File.dirname(__FILE__)), "data", "hosts.cfg") }
it "supports usage function" do
expect { parser.usage }.to_not raise_error
end
describe 'parse_git_repos' do
it "transforms arguments of <PROJECT_NAME>/<REF> to <GIT_BASE_URL>/<lowercased_project_name>#<REF>" do
opts = ["PUPPET/3.1"]
expect(parser.parse_git_repos(opts)).to be === ["#{parser.repo}/puppet.git#3.1"]
end
it "recognizes PROJECT_NAMEs of PUPPET, FACTER, HIERA, and HIERA-PUPPET" do
projects = [['puppet', 'my_branch', 'PUPPET/my_branch'],
['facter', 'my_branch', 'FACTER/my_branch'],
['hiera', 'my_branch', 'HIERA/my_branch'],
['hiera-puppet', 'my_branch', 'HIERA-PUPPET/my_branch']]
projects.each do |project, ref, input|
expect(parser.parse_git_repos([input])).to be === ["#{parser.repo}/#{project}.git##{ref}"]
end
end
end
describe 'split_arg' do
it "can split comma separated list into an array" do
arg = "file1,file2,file3"
expect(parser.split_arg(arg)).to be === ["file1", "file2", "file3"]
end
it "can use an existing Array as an acceptable argument" do
arg = ["file1", "file2", "file3"]
expect(parser.split_arg(arg)).to be === ["file1", "file2", "file3"]
end
it "can generate an array from a single value" do
arg = "i'mjustastring"
expect(parser.split_arg(arg)).to be === ["i'mjustastring"]
end
end
context 'testing path traversing' do
let(:test_dir) { 'tmp/tests' }
let(:rb_test) { File.expand_path(test_dir + '/my_ruby_file.rb') }
let(:pl_test) { File.expand_path(test_dir + '/my_perl_file.pl') }
let(:sh_test) { File.expand_path(test_dir + '/my_shell_file.sh') }
let(:rb_other) { File.expand_path(test_dir + '/other/my_other_ruby_file.rb') }
it 'only collects ruby files as test files' do
files = [rb_test, pl_test, sh_test, rb_other]
create_files(files)
expect(parser.file_list([File.expand_path(test_dir)])).to be === [rb_test, rb_other]
end
it 'raises an error when no ruby files are found' do
files = [pl_test, sh_test]
create_files(files)
expect { parser.file_list([File.expand_path(test_dir)]) }.to raise_error(ArgumentError)
end
it 'raises an error when no paths are specified for searching' do
@files = ''
expect { parser.file_list('') }.to raise_error(ArgumentError)
end
end
context 'combining split_arg and file_list maintain test file ordering' do
let(:test_dir) { 'tmp/tests' }
let(:other_test_dir) { 'tmp/tests2' }
before :each do
files = [
'00_EnvSetup.rb', '035_StopFirewall.rb', '05_HieraSetup.rb',
'01_TestSetup.rb', '03_PuppetMasterSanity.rb',
'06_InstallModules.rb', '02_PuppetUserAndGroup.rb',
'04_ValidateSignCert.rb', '07_InstallCACerts.rb']
@lone_file = '08_foss.rb'
@fileset1 = files.shuffle.map { |file| test_dir + '/' + file }
@fileset2 = files.shuffle.map { |file| other_test_dir + '/' + file }
@sorted_expanded_fileset1 = @fileset1.map { |f| File.expand_path(f) }.sort
@sorted_expanded_fileset2 = @fileset2.map { |f| File.expand_path(f) }.sort
create_files(@fileset1)
create_files(@fileset2)
create_files([@lone_file])
end
it "when provided a file followed by dir, runs the file first" do
arg = "#{@lone_file},#{test_dir}"
output = parser.file_list(parser.split_arg(arg))
expect(output).to be === [@lone_file, @sorted_expanded_fileset1].flatten
end
it "when provided a dir followed by a file, runs the file last" do
arg = "#{test_dir},#{@lone_file}"
output = parser.file_list(parser.split_arg(arg))
expect(output).to be === [@sorted_expanded_fileset1, @lone_file].flatten
end
it "correctly orders files in a directory" do
arg = "#{test_dir}"
output = parser.file_list(parser.split_arg(arg))
expect(output).to be === @sorted_expanded_fileset1
end
it "when provided two directories orders each directory separately" do
arg = "#{test_dir}/,#{other_test_dir}/"
output = parser.file_list(parser.split_arg(arg))
expect(output).to be === @sorted_expanded_fileset1 + @sorted_expanded_fileset2
end
end
describe '#parse_args' do
before { FakeFS.deactivate! }
it 'pulls the args into key called :command_line' do
my_args = ['--log-level', 'debug', '-h', hosts_path]
expect(parser.parse_args(my_args)[:command_line]).to include(my_args.join(' '))
expect(parser.attribution[:command_line]).to be == 'cmd'
expect(parser.attribution[:hosts_file]).to be == 'cmd'
expect(parser.attribution[:log_level]).to be == 'cmd'
expect(parser.attribution[:pe_dir]).to be == 'preset'
end
describe 'does prioritization correctly' do
let(:env) { @env || {:level => 'highest'} }
let(:argv) { @argv || {:level => 'second'} }
let(:host_file) { @host_file || {:level => 'third'} }
let(:opt_file) { @opt_file || {:level => 'fourth',
:ssh => {
:auth_methods => 'auth123',
:user_known_hosts_file => 'hosts123'
}
} }
let(:subcommand_file) {@subcommand_file || {:level => 'fifth'}}
let(:presets) { {:level => 'lowest',
:ssh => {
:config => 'config123',
:paranoid => 'paranoid123',
:port => 'port123',
:forward_agent => 'forwardagent123',
:keys => 'keys123',
:keepalive => 'keepalive123'
}
} }
before :each do
expect(parser).to receive(:normalize_args).and_return(true)
end
def mock_out_parsing
presets_obj = double()
allow(presets_obj).to receive(:presets).and_return(presets)
allow(presets_obj).to receive(:env_vars).and_return(env)
parser.instance_variable_set(:@presets, presets_obj)
command_line_parser_obj = double()
allow(command_line_parser_obj).to receive(:parse).and_return(argv)
parser.instance_variable_set(:@command_line_parser, command_line_parser_obj)
allow(OptionsFileParser).to receive(:parse_options_file).and_return(opt_file)
allow(parser).to receive(:parse_hosts_options).and_return(host_file)
allow(SubcommandOptionsParser).to receive(:parse_subcommand_options).and_return(subcommand_file)
end
it 'presets have the lowest priority' do
@env = @argv = @host_file = @opt_file = @subcommand_file = {}
mock_out_parsing
opts = parser.parse_args([])
attribution = parser.attribution
expect(opts[:level]).to be == 'lowest'
expect(attribution[:level]).to be == 'preset'
end
it 'subcommand_options should have fifth priority' do
@env = @argv = @host_file = @opt_file = {}
mock_out_parsing
opts = parser.parse_args([])
attribution = parser.attribution
expect(opts[:level]).to be == 'fifth'
expect(attribution[:level]).to be == 'subcommand'
end
it 'options file has fourth priority' do
@env = @argv = @host_file = {}
mock_out_parsing
opts = parser.parse_args([])
attribution = parser.attribution
expect(attribution[:ssh]).to be_a(Hash)
expect(attribution[:ssh][:auth_methods]).to be == 'options_file'
expect(attribution[:ssh][:user_known_hosts_file]).to be == 'options_file'
expect(attribution[:ssh][:config]).to be == 'preset'
expect(attribution[:ssh][:paranoid]).to be == 'preset'
expect(attribution[:ssh][:port]).to be == 'preset'
expect(attribution[:ssh][:forward_agent]).to be == 'preset'
expect(attribution[:ssh][:keys]).to be == 'preset'
expect(attribution[:ssh][:keepalive]).to be == 'preset'
expect(opts[:level]).to be == 'fourth'
expect(attribution[:level]).to be == 'options_file'
end
it 'host file CONFIG section has third priority' do
@env = @argv = {}
mock_out_parsing
opts = parser.parse_args([])
attribution = parser.attribution
expect(opts[:level]).to be == 'third'
expect(attribution[:level]).to be == 'host_file'
end
it 'command line arguments have second priority' do
@env = {}
mock_out_parsing
opts = parser.parse_args([])
attribution = parser.attribution
expect(opts[:level]).to be == 'second'
expect(attribution[:level]).to be == 'cmd'
end
it 'env vars have highest priority' do
mock_out_parsing
opts = parser.parse_args([])
attribution = parser.attribution
expect(opts[:level]).to be == 'highest'
expect(attribution[:level]).to be == 'env'
end
end
it "can correctly combine arguments from different sources" do
build_url = 'http://my.build.url/'
type = 'git'
log_level = 'debug'
old_build_url = ENV["BUILD_URL"]
ENV["BUILD_URL"] = build_url
args = ["-h", hosts_path, "--log-level", log_level, "--type", type, "--install", "PUPPET/1.0,HIERA/hello"]
output = parser.parse_args(args)
attribution = parser.attribution
expect(output[:hosts_file]).to be == hosts_path
expect(attribution[:hosts_file]).to be == 'cmd'
expect(output[:jenkins_build_url]).to be == build_url
expect(attribution[:jenkins_build_url]).to be == 'env'
expect(output[:install]).to include('git://github.com/puppetlabs/hiera.git#hello')
expect(attribution[:install]).to be == 'runtime'
ENV["BUILD_URL"] = old_build_url
end
it "ensures that fail-mode is one of fast/slow" do
args = ["-h", hosts_path, "--log-level", "debug", "--fail-mode", "nope"]
expect { parser.parse_args(args) }.to raise_error(ArgumentError)
end
end
describe '#parse_hosts_options' do
context 'Hosts file exists' do
before :each do
allow(File).to receive(:exists?).and_return(true)
end
it 'returns the parser\'s output' do
parser.instance_variable_set( :@options, {} )
test_value = 'blaqwetjijl,emikfuj1235'
allow( Beaker::Options::HostsFileParser ).to receive(
:parse_hosts_file
).and_return( test_value )
val1, _ = parser.parse_hosts_options
expect( val1 ).to be === test_value
end
end
context 'Hosts file does not exist' do
require 'beaker-hostgenerator'
before :each do
allow(File).to receive(:exists?).and_return(false)
end
it 'calls beaker-hostgenerator to get hosts information' do
parser.instance_variable_set( :@options, {
:hosts_file => 'notafile.yml'
} )
allow( Beaker::Options::HostsFileParser ).to receive(
:parse_hosts_file
).and_raise( Errno::ENOENT )
mock_beaker_hostgenerator_cli = Object.new
cli_execute_return = 'job150865'
expect( mock_beaker_hostgenerator_cli ).to receive(
:execute
).and_return( cli_execute_return )
expect( BeakerHostGenerator::CLI ).to receive(
:new
).and_return( mock_beaker_hostgenerator_cli )
allow( Beaker::Options::HostsFileParser ).to receive(
:parse_hosts_string
).with( cli_execute_return )
parser.parse_hosts_options
end
it 'sets the :hosts_file_generated flag to signal others when needed' do
options_test = {
:hosts_file => 'not_a_file.yml'
}
parser.instance_variable_set( :@options, options_test )
allow( Beaker::Options::HostsFileParser ).to receive(
:parse_hosts_file
).and_raise( Errno::ENOENT )
mock_beaker_hostgenerator_cli = Object.new
allow( mock_beaker_hostgenerator_cli ).to receive( :execute )
allow( BeakerHostGenerator::CLI ).to receive(
:new
).and_return( mock_beaker_hostgenerator_cli )
allow( Beaker::Options::HostsFileParser ).to receive( :parse_hosts_string )
parser.parse_hosts_options
expect( options_test[:hosts_file_generated] ).to be true
end
it 'beaker-hostgenerator failures trigger nice prints & a rethrow' do
options_test = {
:hosts_file => 'not_a_file.yml'
}
parser.instance_variable_set( :@options, options_test )
allow( Beaker::Options::HostsFileParser ).to receive(
:parse_hosts_file
).and_raise( Errno::ENOENT )
mock_beaker_hostgenerator_cli = Object.new
expect( BeakerHostGenerator::CLI ).to receive(
:new
).and_return( mock_beaker_hostgenerator_cli )
expect( mock_beaker_hostgenerator_cli ).to receive(
:execute
).and_raise( BeakerHostGenerator::Exceptions::InvalidNodeSpecError )
expect( Beaker::Options::HostsFileParser ).not_to receive( :parse_hosts_string )
expect( $stdout ).to receive( :puts ).with(
/does not exist/
).ordered
expect( $stderr ).to receive( :puts ).with(
/Exiting with an Error/
).ordered
expect {
parser.parse_hosts_options
}.to raise_error( BeakerHostGenerator::Exceptions::InvalidNodeSpecError )
end
it 'can be passed a nil hosts file and get the default hash back' do
parser.instance_variable_set( :@options, {} )
host_options = parser.parse_hosts_options
expect(host_options[:HOSTS]).to be === {}
end
end
end
context "set_default_host!" do
let(:roles) { @roles || [["master", "agent", "database"], ["agent"]] }
let(:node1) { {:node1 => {:roles => roles[0]}} }
let(:node2) { {:node2 => {:roles => roles[1]}} }
let(:hosts) { node1.merge(node2) }
it "does nothing if the default host is already set" do
@roles = [["master"], ["agent", "default"]]
parser.set_default_host!(hosts)
expect(hosts[:node1][:roles].include?('default')).to be === false
expect(hosts[:node2][:roles].include?('default')).to be === true
end
it "makes the master default" do
@roles = [["master"], ["agent"]]
parser.set_default_host!(hosts)
expect(hosts[:node1][:roles].include?('default')).to be === true
expect(hosts[:node2][:roles].include?('default')).to be === false
end
it "makes a single node default" do
@roles = [["master", "database", "dashboard", "agent"]]
parser.set_default_host!(node1)
expect(hosts[:node1][:roles].include?('default')).to be === true
end
it "makes a single non-master node default" do
@roles = [["database", "dashboard", "agent"]]
parser.set_default_host!(node1)
expect(hosts[:node1][:roles].include?('default')).to be === true
end
it "raises an error if two nodes are defined as default" do
@roles = [["master", "default"], ["default"]]
expect { parser.set_default_host!(hosts) }.to raise_error(ArgumentError)
end
end
describe "normalize_args" do
let(:hosts) do
Beaker::Options::OptionsHash.new.merge({
'HOSTS' => {
:master => {
:roles => ["master", "agent", "arbitrary_role"],
:platform => 'el-7-x86_64',
:user => 'root',
},
:agent => {
:roles => ["agent", "default", "other_abitrary_role"],
:platform => 'el-7-x86_64',
:user => 'root',
},
},
'fail_mode' => 'slow',
'preserve_hosts' => 'always',
'host_tags' => {}
})
end
def fake_hosts_file_for_platform(hosts, platform)
hosts['HOSTS'].values.each { |h| h[:platform] = platform }
filename = "hosts_file_#{platform}"
File.open(filename, "w") do |file|
YAML.dump(hosts, file)
end
filename
end
shared_examples_for(:a_platform_supporting_only_agents) do |platform, _type|
it "restricts #{platform} hosts to agent" do
args = []
args << '--hosts' << fake_hosts_file_for_platform(hosts, platform)
expect { parser.parse_args(args) }.to raise_error(ArgumentError, /#{platform}.*may not have roles: master, database, dashboard/)
end
end
context "restricts agents" do
it_should_behave_like(:a_platform_supporting_only_agents, 'windows-version-arch')
it_should_behave_like(:a_platform_supporting_only_agents, 'el-4-arch')
end
context "ssh user" do
it 'uses the ssh[:user] if it is provided' do
hosts['HOSTS'][:master][:ssh] = {:user => 'hello'}
parser.instance_variable_set(:@options, hosts)
parser.normalize_args
expect(hosts['HOSTS'][:master][:user]).to be == 'hello'
end
it 'uses default user if there is an ssh hash, but no ssh[:user]' do
hosts['HOSTS'][:master][:ssh] = {:hello => 'hello'}
parser.instance_variable_set(:@options, hosts)
parser.normalize_args
expect(hosts['HOSTS'][:master][:user]).to be == 'root'
end
it 'uses default user if no ssh hash' do
parser.instance_variable_set(:@options, hosts)
parser.normalize_args
expect(hosts['HOSTS'][:master][:user]).to be == 'root'
end
end
end
describe '#normalize_tags!' do
let (:test_tag_and ) { @test_tag_and || [] }
let (:test_tag_or ) { @test_tag_or || [] }
let (:test_tag_exclude ) { @test_tag_exclude || [] }
let (:options ) {
opts = Beaker::Options::OptionsHash.new
opts[:test_tag_and] = test_tag_and
opts[:test_tag_or] = test_tag_or
opts[:test_tag_exclude] = test_tag_exclude
opts
}
it 'does not error if no tags overlap' do
@test_tag_and = 'can,tommies,potatoes,plant'
@test_tag_or = 'juicy,zoomba,plantation'
@test_tag_exclude = 'joey,long_running,pants'
parser.instance_variable_set(:@options, options)
expect { parser.normalize_test_tags! }.not_to raise_error
end
it 'splits the basic case correctly' do
@test_tag_and = 'can,tommies,potatoes,plant'
@test_tag_or = 'johnny,wordsmith,zebra'
@test_tag_exclude = 'joey,long_running,pants'
parser.instance_variable_set(:@options, options)
parser.normalize_test_tags!
expect(options[:test_tag_and] ).to be === ['can', 'tommies', 'potatoes', 'plant']
expect(options[:test_tag_or] ).to be === ['johnny', 'wordsmith', 'zebra']
expect(options[:test_tag_exclude]).to be === ['joey', 'long_running', 'pants']
end
it 'returns empty arrays for empty strings' do
@test_tag_and = ''
@test_tag_or = ''
@test_tag_exclude = ''
parser.instance_variable_set(:@options, options)
parser.normalize_test_tags!
expect(options[:test_tag_and] ).to be === []
expect(options[:test_tag_or] ).to be === []
expect(options[:test_tag_exclude]).to be === []
end
it 'lowercases all tags correctly for later use' do
@test_tag_and = 'jeRRy_And_tOM,PARka'
@test_tag_or = 'clearLy_They,Neva'
@test_tag_exclude = 'lEet_spEAK,pOland'
parser.instance_variable_set(:@options, options)
parser.normalize_test_tags!
expect(options[:test_tag_and] ).to be === ['jerry_and_tom', 'parka']
expect(options[:test_tag_or] ).to be === ['clearly_they', 'neva']
expect(options[:test_tag_exclude]).to be === ['leet_speak', 'poland']
end
end
describe '#resolve_symlinks' do
let (:options) { Beaker::Options::OptionsHash.new }
it 'calls File.realpath if hosts_file is set' do
options[:hosts_file] = opts_path
parser.instance_variable_set(:@options, options)
parser.resolve_symlinks!
expect(parser.instance_variable_get(:@options)[:hosts_file]).to be === opts_path
end
it 'does not throw an error if hosts_file is not set' do
options[:hosts_file] = nil
parser.instance_variable_set(:@options, options)
expect { parser.resolve_symlinks! }.to_not raise_error
end
end
describe '#get_hypervisors' do
it 'returns a unique list' do
hosts_dupe = {
'vm1' => {hypervisor: 'hi'},
'vm2' => {hypervisor: 'hi'},
'vm3' => {hypervisor: 'bye'}
}
hosts_single = {'vm1' => {hypervisor: 'hi'}}
expect(parser.get_hypervisors(hosts_dupe)).to eq(%w(hi bye))
expect(parser.get_hypervisors(hosts_single)).to eq(%w(hi))
end
end
describe '#get_roles' do
it 'returns a unique list' do
roles_dupe = {
'vm1' => {roles: ['master']},
'vm2' => {roles: %w(database dashboard)},
'vm3' => {roles: ['bye']}
}
roles_single = {'vm1' => {roles: ['hi']}}
expect(parser.get_roles(roles_dupe)).to eq([['master'], %w(database dashboard), ['bye']])
expect(parser.get_roles(roles_single)).to eq([['hi']])
end
end
describe '#check_hypervisor_config' do
let (:options) { Beaker::Options::OptionsHash.new }
let (:invalid_file) { '/tmp/doesnotexist_visor.yml' }
before :each do
FakeFS.deactivate!
end
it 'checks ec2_yaml when blimpy' do
options[:ec2_yaml] = hosts_path
options[:dot_fog] = invalid_file
parser.instance_variable_set(:@options, options)
expect { parser.check_hypervisor_config('blimpy') }.to_not raise_error
end
it 'throws an error if ec2_yaml for blimpy is invalid' do
options[:ec2_yaml] = invalid_file
options[:dot_fog] = hosts_path
parser.instance_variable_set(:@options, options)
expect { parser.check_hypervisor_config('blimpy') }.to raise_error(ArgumentError, /required by blimpy/)
end
%w(aix solaris vcloud).each do |visor|
it "checks dot_fog when #{visor}" do
options[:ec2_yaml] = invalid_file
options[:dot_fog] = hosts_path
parser.instance_variable_set(:@options, options)
expect { parser.check_hypervisor_config(visor) }.to_not raise_error
end
it "throws an error if dot_fog for #{visor} is invalid" do
options[:ec2_yaml] = hosts_path
options[:dot_fog] = invalid_file
parser.instance_variable_set(:@options, options)
expect { parser.check_hypervisor_config(visor) }.to raise_error(ArgumentError, /required by #{visor}/)
end
end
it 'does not throw error on unknown visor' do
expect { parser.check_hypervisor_config('unknown_visor') }.to_not raise_error
end
end
end
end
end
| 1 | 15,092 | this should only be indented 2 spaces | voxpupuli-beaker | rb |
@@ -84,7 +84,10 @@ func TestLoadEnvConfig_Creds(t *testing.T) {
os.Setenv(k, v)
}
- cfg := loadEnvConfig()
+ cfg, err := loadEnvConfig()
+ if err != nil {
+ t.Errorf("failed to load env config, %v", err)
+ }
if !reflect.DeepEqual(c.Val, cfg.Creds) {
t.Errorf("expect credentials to match.\n%s",
awstesting.SprintExpectActual(c.Val, cfg.Creds)) | 1 | // +build go1.7
package session
import (
"os"
"reflect"
"strconv"
"testing"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/awstesting"
"github.com/aws/aws-sdk-go/internal/sdktesting"
"github.com/aws/aws-sdk-go/internal/shareddefaults"
)
func TestLoadEnvConfig_Creds(t *testing.T) {
cases := []struct {
Env map[string]string
Val credentials.Value
}{
{
Env: map[string]string{
"AWS_ACCESS_KEY": "AKID",
},
Val: credentials.Value{},
},
{
Env: map[string]string{
"AWS_ACCESS_KEY_ID": "AKID",
},
Val: credentials.Value{},
},
{
Env: map[string]string{
"AWS_SECRET_KEY": "SECRET",
},
Val: credentials.Value{},
},
{
Env: map[string]string{
"AWS_SECRET_ACCESS_KEY": "SECRET",
},
Val: credentials.Value{},
},
{
Env: map[string]string{
"AWS_ACCESS_KEY_ID": "AKID",
"AWS_SECRET_ACCESS_KEY": "SECRET",
},
Val: credentials.Value{
AccessKeyID: "AKID", SecretAccessKey: "SECRET",
ProviderName: "EnvConfigCredentials",
},
},
{
Env: map[string]string{
"AWS_ACCESS_KEY": "AKID",
"AWS_SECRET_KEY": "SECRET",
},
Val: credentials.Value{
AccessKeyID: "AKID", SecretAccessKey: "SECRET",
ProviderName: "EnvConfigCredentials",
},
},
{
Env: map[string]string{
"AWS_ACCESS_KEY": "AKID",
"AWS_SECRET_KEY": "SECRET",
"AWS_SESSION_TOKEN": "TOKEN",
},
Val: credentials.Value{
AccessKeyID: "AKID", SecretAccessKey: "SECRET", SessionToken: "TOKEN",
ProviderName: "EnvConfigCredentials",
},
},
}
for i, c := range cases {
t.Run(strconv.Itoa(i), func(t *testing.T) {
restoreEnvFn := sdktesting.StashEnv()
defer restoreEnvFn()
for k, v := range c.Env {
os.Setenv(k, v)
}
cfg := loadEnvConfig()
if !reflect.DeepEqual(c.Val, cfg.Creds) {
t.Errorf("expect credentials to match.\n%s",
awstesting.SprintExpectActual(c.Val, cfg.Creds))
}
})
}
}
func TestLoadEnvConfig(t *testing.T) {
restoreEnvFn := sdktesting.StashEnv()
defer restoreEnvFn()
cases := []struct {
Env map[string]string
UseSharedConfigCall bool
Config envConfig
}{
{
Env: map[string]string{
"AWS_REGION": "region",
"AWS_PROFILE": "profile",
},
Config: envConfig{
Region: "region", Profile: "profile",
SharedCredentialsFile: shareddefaults.SharedCredentialsFilename(),
SharedConfigFile: shareddefaults.SharedConfigFilename(),
},
},
{
Env: map[string]string{
"AWS_REGION": "region",
"AWS_DEFAULT_REGION": "default_region",
"AWS_PROFILE": "profile",
"AWS_DEFAULT_PROFILE": "default_profile",
},
Config: envConfig{
Region: "region", Profile: "profile",
SharedCredentialsFile: shareddefaults.SharedCredentialsFilename(),
SharedConfigFile: shareddefaults.SharedConfigFilename(),
},
},
{
Env: map[string]string{
"AWS_REGION": "region",
"AWS_DEFAULT_REGION": "default_region",
"AWS_PROFILE": "profile",
"AWS_DEFAULT_PROFILE": "default_profile",
"AWS_SDK_LOAD_CONFIG": "1",
},
Config: envConfig{
Region: "region", Profile: "profile",
EnableSharedConfig: true,
SharedCredentialsFile: shareddefaults.SharedCredentialsFilename(),
SharedConfigFile: shareddefaults.SharedConfigFilename(),
},
},
{
Env: map[string]string{
"AWS_DEFAULT_REGION": "default_region",
"AWS_DEFAULT_PROFILE": "default_profile",
},
Config: envConfig{
SharedCredentialsFile: shareddefaults.SharedCredentialsFilename(),
SharedConfigFile: shareddefaults.SharedConfigFilename(),
},
},
{
Env: map[string]string{
"AWS_DEFAULT_REGION": "default_region",
"AWS_DEFAULT_PROFILE": "default_profile",
"AWS_SDK_LOAD_CONFIG": "1",
},
Config: envConfig{
Region: "default_region", Profile: "default_profile",
EnableSharedConfig: true,
SharedCredentialsFile: shareddefaults.SharedCredentialsFilename(),
SharedConfigFile: shareddefaults.SharedConfigFilename(),
},
},
{
Env: map[string]string{
"AWS_REGION": "region",
"AWS_PROFILE": "profile",
},
Config: envConfig{
Region: "region", Profile: "profile",
EnableSharedConfig: true,
SharedCredentialsFile: shareddefaults.SharedCredentialsFilename(),
SharedConfigFile: shareddefaults.SharedConfigFilename(),
},
UseSharedConfigCall: true,
},
{
Env: map[string]string{
"AWS_REGION": "region",
"AWS_DEFAULT_REGION": "default_region",
"AWS_PROFILE": "profile",
"AWS_DEFAULT_PROFILE": "default_profile",
},
Config: envConfig{
Region: "region", Profile: "profile",
EnableSharedConfig: true,
SharedCredentialsFile: shareddefaults.SharedCredentialsFilename(),
SharedConfigFile: shareddefaults.SharedConfigFilename(),
},
UseSharedConfigCall: true,
},
{
Env: map[string]string{
"AWS_REGION": "region",
"AWS_DEFAULT_REGION": "default_region",
"AWS_PROFILE": "profile",
"AWS_DEFAULT_PROFILE": "default_profile",
"AWS_SDK_LOAD_CONFIG": "1",
},
Config: envConfig{
Region: "region", Profile: "profile",
EnableSharedConfig: true,
SharedCredentialsFile: shareddefaults.SharedCredentialsFilename(),
SharedConfigFile: shareddefaults.SharedConfigFilename(),
},
UseSharedConfigCall: true,
},
{
Env: map[string]string{
"AWS_DEFAULT_REGION": "default_region",
"AWS_DEFAULT_PROFILE": "default_profile",
},
Config: envConfig{
Region: "default_region", Profile: "default_profile",
EnableSharedConfig: true,
SharedCredentialsFile: shareddefaults.SharedCredentialsFilename(),
SharedConfigFile: shareddefaults.SharedConfigFilename(),
},
UseSharedConfigCall: true,
},
{
Env: map[string]string{
"AWS_DEFAULT_REGION": "default_region",
"AWS_DEFAULT_PROFILE": "default_profile",
"AWS_SDK_LOAD_CONFIG": "1",
},
Config: envConfig{
Region: "default_region", Profile: "default_profile",
EnableSharedConfig: true,
SharedCredentialsFile: shareddefaults.SharedCredentialsFilename(),
SharedConfigFile: shareddefaults.SharedConfigFilename(),
},
UseSharedConfigCall: true,
},
{
Env: map[string]string{
"AWS_CA_BUNDLE": "custom_ca_bundle",
},
Config: envConfig{
CustomCABundle: "custom_ca_bundle",
SharedCredentialsFile: shareddefaults.SharedCredentialsFilename(),
SharedConfigFile: shareddefaults.SharedConfigFilename(),
},
},
{
Env: map[string]string{
"AWS_CA_BUNDLE": "custom_ca_bundle",
},
Config: envConfig{
CustomCABundle: "custom_ca_bundle",
EnableSharedConfig: true,
SharedCredentialsFile: shareddefaults.SharedCredentialsFilename(),
SharedConfigFile: shareddefaults.SharedConfigFilename(),
},
UseSharedConfigCall: true,
},
{
Env: map[string]string{
"AWS_SHARED_CREDENTIALS_FILE": "/path/to/credentials/file",
"AWS_CONFIG_FILE": "/path/to/config/file",
},
Config: envConfig{
SharedCredentialsFile: "/path/to/credentials/file",
SharedConfigFile: "/path/to/config/file",
},
},
}
for i, c := range cases {
t.Run(strconv.Itoa(i), func(t *testing.T) {
restoreEnvFn = sdktesting.StashEnv()
defer restoreEnvFn()
for k, v := range c.Env {
os.Setenv(k, v)
}
var cfg envConfig
if c.UseSharedConfigCall {
cfg = loadSharedEnvConfig()
} else {
cfg = loadEnvConfig()
}
if !reflect.DeepEqual(c.Config, cfg) {
t.Errorf("expect config to match.\n%s",
awstesting.SprintExpectActual(c.Config, cfg))
}
})
}
}
func TestSetEnvValue(t *testing.T) {
restoreEnvFn := sdktesting.StashEnv()
defer restoreEnvFn()
os.Setenv("empty_key", "")
os.Setenv("second_key", "2")
os.Setenv("third_key", "3")
var dst string
setFromEnvVal(&dst, []string{
"empty_key", "first_key", "second_key", "third_key",
})
if e, a := "2", dst; e != a {
t.Errorf("expect %s value from environment, got %s", e, a)
}
}
| 1 | 9,844 | Nit, these Err not nils should be `t.Fatalf` not error. We want to the test to stop if this error occurs. | aws-aws-sdk-go | go |
@@ -262,6 +262,10 @@ public class DatasetPage implements java.io.Serializable {
private Boolean hasRsyncScript = false;
+ private Boolean hasTabular = false;
+
+ private Boolean downloadOriginalTracker = false;
+
List<ExternalTool> configureTools = new ArrayList<>();
List<ExternalTool> exploreTools = new ArrayList<>();
Map<Long, List<ExternalTool>> configureToolsByFileId = new HashMap<>(); | 1 | package edu.harvard.iq.dataverse;
import edu.harvard.iq.dataverse.provenance.ProvPopupFragmentBean;
import edu.harvard.iq.dataverse.api.AbstractApiBean;
import edu.harvard.iq.dataverse.authorization.AuthenticationServiceBean;
import edu.harvard.iq.dataverse.authorization.Permission;
import edu.harvard.iq.dataverse.authorization.providers.builtin.BuiltinUserServiceBean;
import edu.harvard.iq.dataverse.authorization.users.AuthenticatedUser;
import edu.harvard.iq.dataverse.authorization.users.PrivateUrlUser;
import edu.harvard.iq.dataverse.dataaccess.StorageIO;
import edu.harvard.iq.dataverse.dataaccess.ImageThumbConverter;
import edu.harvard.iq.dataverse.dataaccess.SwiftAccessIO;
import edu.harvard.iq.dataverse.datacapturemodule.DataCaptureModuleUtil;
import edu.harvard.iq.dataverse.datacapturemodule.ScriptRequestResponse;
import edu.harvard.iq.dataverse.dataset.DatasetThumbnail;
import edu.harvard.iq.dataverse.dataset.DatasetUtil;
import edu.harvard.iq.dataverse.datavariable.VariableServiceBean;
import edu.harvard.iq.dataverse.engine.command.Command;
import edu.harvard.iq.dataverse.engine.command.exception.CommandException;
import edu.harvard.iq.dataverse.engine.command.impl.CreatePrivateUrlCommand;
import edu.harvard.iq.dataverse.engine.command.impl.DeaccessionDatasetVersionCommand;
import edu.harvard.iq.dataverse.engine.command.impl.DeleteDatasetVersionCommand;
import edu.harvard.iq.dataverse.engine.command.impl.DeletePrivateUrlCommand;
import edu.harvard.iq.dataverse.engine.command.impl.DestroyDatasetCommand;
import edu.harvard.iq.dataverse.engine.command.impl.GetPrivateUrlCommand;
import edu.harvard.iq.dataverse.engine.command.impl.LinkDatasetCommand;
import edu.harvard.iq.dataverse.engine.command.impl.PublishDatasetCommand;
import edu.harvard.iq.dataverse.engine.command.impl.PublishDataverseCommand;
import edu.harvard.iq.dataverse.engine.command.impl.UpdateDatasetVersionCommand;
import edu.harvard.iq.dataverse.export.ExportException;
import edu.harvard.iq.dataverse.export.ExportService;
import edu.harvard.iq.dataverse.export.spi.Exporter;
import edu.harvard.iq.dataverse.ingest.IngestRequest;
import edu.harvard.iq.dataverse.ingest.IngestServiceBean;
import edu.harvard.iq.dataverse.metadataimport.ForeignMetadataImportServiceBean;
import edu.harvard.iq.dataverse.privateurl.PrivateUrl;
import edu.harvard.iq.dataverse.privateurl.PrivateUrlServiceBean;
import edu.harvard.iq.dataverse.privateurl.PrivateUrlUtil;
import edu.harvard.iq.dataverse.search.SearchFilesServiceBean;
import edu.harvard.iq.dataverse.search.SortBy;
import edu.harvard.iq.dataverse.settings.SettingsServiceBean;
import edu.harvard.iq.dataverse.util.BundleUtil;
import edu.harvard.iq.dataverse.util.FileSortFieldAndOrder;
import edu.harvard.iq.dataverse.util.FileUtil;
import edu.harvard.iq.dataverse.util.JsfHelper;
import static edu.harvard.iq.dataverse.util.JsfHelper.JH;
import edu.harvard.iq.dataverse.util.StringUtil;
import edu.harvard.iq.dataverse.util.SystemConfig;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.sql.Timestamp;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.logging.Logger;
import javax.ejb.EJB;
import javax.ejb.EJBException;
import javax.faces.application.FacesMessage;
import javax.faces.context.FacesContext;
import javax.faces.event.ActionEvent;
import javax.faces.event.ValueChangeEvent;
import javax.faces.view.ViewScoped;
import javax.inject.Inject;
import javax.inject.Named;
import org.primefaces.event.FileUploadEvent;
import org.primefaces.model.UploadedFile;
import javax.validation.ConstraintViolation;
import org.apache.commons.httpclient.HttpClient;
import org.primefaces.context.RequestContext;
import java.util.Arrays;
import java.util.HashSet;
import javax.faces.model.SelectItem;
import java.util.logging.Level;
import edu.harvard.iq.dataverse.datasetutility.WorldMapPermissionHelper;
import edu.harvard.iq.dataverse.engine.command.exception.IllegalCommandException;
import edu.harvard.iq.dataverse.engine.command.impl.CreateNewDatasetCommand;
import edu.harvard.iq.dataverse.engine.command.impl.DeleteDataFileCommand;
import edu.harvard.iq.dataverse.engine.command.impl.GetLatestPublishedDatasetVersionCommand;
import edu.harvard.iq.dataverse.engine.command.impl.RequestRsyncScriptCommand;
import edu.harvard.iq.dataverse.engine.command.impl.PublishDatasetResult;
import edu.harvard.iq.dataverse.engine.command.impl.RestrictFileCommand;
import edu.harvard.iq.dataverse.engine.command.impl.ReturnDatasetToAuthorCommand;
import edu.harvard.iq.dataverse.engine.command.impl.SubmitDatasetForReviewCommand;
import edu.harvard.iq.dataverse.externaltools.ExternalTool;
import edu.harvard.iq.dataverse.externaltools.ExternalToolServiceBean;
import edu.harvard.iq.dataverse.export.SchemaDotOrgExporter;
import java.util.Collections;
import javax.faces.event.AjaxBehaviorEvent;
import javax.servlet.ServletOutputStream;
import javax.servlet.http.HttpServletResponse;
import org.apache.commons.lang.StringEscapeUtils;
import org.primefaces.component.tabview.TabView;
import org.primefaces.event.CloseEvent;
import org.primefaces.event.TabChangeEvent;
import org.primefaces.event.data.PageEvent;
/**
*
* @author gdurand
*/
@ViewScoped
@Named("DatasetPage")
public class DatasetPage implements java.io.Serializable {
private static final Logger logger = Logger.getLogger(DatasetPage.class.getCanonicalName());
public enum EditMode {
CREATE, INFO, FILE, METADATA, LICENSE
};
public enum DisplayMode {
INIT, SAVE
};
@EJB
DatasetServiceBean datasetService;
@EJB
DatasetVersionServiceBean datasetVersionService;
@EJB
DataFileServiceBean datafileService;
@EJB
PermissionServiceBean permissionService;
@EJB
DataverseServiceBean dataverseService;
@EJB
DatasetFieldServiceBean fieldService;
@EJB
VariableServiceBean variableService;
@EJB
IngestServiceBean ingestService;
@EJB
ForeignMetadataImportServiceBean metadataImportService;
@EJB
EjbDataverseEngine commandEngine;
@Inject
DataverseSession session;
@EJB
UserNotificationServiceBean userNotificationService;
@EJB
MapLayerMetadataServiceBean mapLayerMetadataService;
@EJB
BuiltinUserServiceBean builtinUserService;
@EJB
DataverseFieldTypeInputLevelServiceBean dataverseFieldTypeInputLevelService;
@EJB
SettingsServiceBean settingsService;
@EJB
AuthenticationServiceBean authService;
@EJB
SystemConfig systemConfig;
@EJB
GuestbookResponseServiceBean guestbookResponseService;
@EJB
FileDownloadServiceBean fileDownloadService;
@EJB
DataverseLinkingServiceBean dvLinkingService;
@EJB
DatasetLinkingServiceBean dsLinkingService;
@EJB
SearchFilesServiceBean searchFilesService;
@EJB
DataverseRoleServiceBean dataverseRoleService;
@EJB
PrivateUrlServiceBean privateUrlService;
@EJB
ExternalToolServiceBean externalToolService;
@Inject
DataverseRequestServiceBean dvRequestService;
@Inject
DatasetVersionUI datasetVersionUI;
@Inject
PermissionsWrapper permissionsWrapper;
@Inject
FileDownloadHelper fileDownloadHelper;
@Inject
WorldMapPermissionHelper worldMapPermissionHelper;
@Inject
ThumbnailServiceWrapper thumbnailServiceWrapper;
@Inject
SettingsWrapper settingsWrapper;
@Inject
ProvPopupFragmentBean provPopupFragmentBean;
private Dataset dataset = new Dataset();
private EditMode editMode;
private boolean bulkFileDeleteInProgress = false;
private Long ownerId;
private Long versionId;
private int selectedTabIndex;
private List<DataFile> newFiles = new ArrayList<>();
private DatasetVersion workingVersion;
private int releaseRadio = 1;
private int deaccessionRadio = 0;
private int deaccessionReasonRadio = 0;
private String datasetNextMajorVersion = "1.0";
private String datasetNextMinorVersion = "";
private String dropBoxSelection = "";
private String deaccessionReasonText = "";
private String displayCitation;
private String deaccessionForwardURLFor = "";
private String showVersionList = "false";
private List<Template> dataverseTemplates = new ArrayList<>();
private Template defaultTemplate;
private Template selectedTemplate;
/**
* In the file listing, the page the user is on. This is zero-indexed so if
* the user clicks page 2 in the UI, this will be 1.
*/
private int filePaginatorPage;
private int rowsPerPage;
private String persistentId;
private String version;
private String protocol = "";
private String authority = "";
private String customFields="";
private boolean noDVsAtAll = false;
private boolean noDVsRemaining = false;
private boolean stateChanged = false;
private List<Dataverse> dataversesForLinking = new ArrayList<>();
private Long linkingDataverseId;
private List<SelectItem> linkingDVSelectItems;
private Dataverse linkingDataverse;
// Version tab lists
private List<DatasetVersion> versionTabList = new ArrayList<>();
private List<DatasetVersion> versionTabListForPostLoad = new ArrayList<>();
// Used to store results of permissions checks
private final Map<String, Boolean> datasetPermissionMap = new HashMap<>(); // { Permission human_name : Boolean }
private DataFile selectedDownloadFile;
private Long maxFileUploadSizeInBytes = null;
private String dataverseSiteUrl = "";
private boolean removeUnusedTags;
private Boolean hasRsyncScript = false;
List<ExternalTool> configureTools = new ArrayList<>();
List<ExternalTool> exploreTools = new ArrayList<>();
Map<Long, List<ExternalTool>> configureToolsByFileId = new HashMap<>();
Map<Long, List<ExternalTool>> exploreToolsByFileId = new HashMap<>();
public Boolean isHasRsyncScript() {
return hasRsyncScript;
}
public void setHasRsyncScript(Boolean hasRsyncScript) {
this.hasRsyncScript = hasRsyncScript;
}
/**
* The contents of the script.
*/
private String rsyncScript = "";
public String getRsyncScript() {
return rsyncScript;
}
public void setRsyncScript(String rsyncScript) {
this.rsyncScript = rsyncScript;
}
private String rsyncScriptFilename;
public String getRsyncScriptFilename() {
return rsyncScriptFilename;
}
private String thumbnailString = null;
// This is the Dataset-level thumbnail;
// it's either the thumbnail of the designated datafile,
// or scaled down uploaded "logo" file, or randomly selected
// image datafile from this dataset.
public String getThumbnailString() {
// This method gets called 30 (!) times, just to load the page!
// - so let's cache that string the first time it's called.
if (thumbnailString != null) {
if ("".equals(thumbnailString)) {
return null;
}
return thumbnailString;
}
if (!readOnly) {
DatasetThumbnail datasetThumbnail = dataset.getDatasetThumbnail();
if (datasetThumbnail == null) {
thumbnailString = "";
return null;
}
if (datasetThumbnail.isFromDataFile()) {
if (!datasetThumbnail.getDataFile().equals(dataset.getThumbnailFile())) {
datasetService.assignDatasetThumbnailByNativeQuery(dataset, datasetThumbnail.getDataFile());
// refresh the dataset:
dataset = datasetService.find(dataset.getId());
}
}
thumbnailString = datasetThumbnail.getBase64image();
} else {
thumbnailString = thumbnailServiceWrapper.getDatasetCardImageAsBase64Url(dataset, workingVersion.getId(),!workingVersion.isDraft());
if (thumbnailString == null) {
thumbnailString = "";
return null;
}
}
return thumbnailString;
}
public void setThumbnailString(String thumbnailString) {
//Dummy method
}
public boolean isRemoveUnusedTags() {
return removeUnusedTags;
}
public void setRemoveUnusedTags(boolean removeUnusedTags) {
this.removeUnusedTags = removeUnusedTags;
}
private List<FileMetadata> fileMetadatas;
private String fileSortField;
private String fileSortOrder;
private LazyFileMetadataDataModel lazyModel;
public LazyFileMetadataDataModel getLazyModel() {
return lazyModel;
}
public void setLazyModel(LazyFileMetadataDataModel lazyModel) {
this.lazyModel = lazyModel;
}
public List<Entry<String,String>> getCartList() {
if (session.getUser() instanceof AuthenticatedUser) {
return ((AuthenticatedUser) session.getUser()).getCart().getContents();
}
return null;
}
public boolean checkCartForItem(String title, String persistentId) {
if (session.getUser() instanceof AuthenticatedUser) {
return ((AuthenticatedUser) session.getUser()).getCart().checkCartForItem(title, persistentId);
}
return false;
}
public void addItemtoCart(String title, String persistentId) throws Exception{
if (canComputeAllFiles(true)) {
if (session.getUser() instanceof AuthenticatedUser) {
AuthenticatedUser authUser = (AuthenticatedUser) session.getUser();
try {
authUser.getCart().addItem(title, persistentId);
JsfHelper.addSuccessMessage(BundleUtil.getStringFromBundle("dataset.compute.computeBatch.success"));
} catch (Exception ex){
JsfHelper.addErrorMessage(BundleUtil.getStringFromBundle("dataset.compute.computeBatch.failure"));
}
}
}
}
public void removeCartItem(String title, String persistentId) throws Exception {
if (session.getUser() instanceof AuthenticatedUser) {
AuthenticatedUser authUser = (AuthenticatedUser) session.getUser();
try {
authUser.getCart().removeItem(title, persistentId);
JsfHelper.addSuccessMessage(BundleUtil.getStringFromBundle("dataset.compute.computeBatch.success"));
} catch (Exception ex){
JsfHelper.addErrorMessage(BundleUtil.getStringFromBundle("dataset.compute.computeBatch.failure"));
}
}
}
public void clearCart() throws Exception {
if (session.getUser() instanceof AuthenticatedUser) {
AuthenticatedUser authUser = (AuthenticatedUser) session.getUser();
try {
authUser.getCart().clear();
JsfHelper.addSuccessMessage(BundleUtil.getStringFromBundle("dataset.compute.computeBatch.success"));
} catch (Exception ex){
JsfHelper.addErrorMessage(BundleUtil.getStringFromBundle("dataset.compute.computeBatch.failure"));
}
}
}
public boolean isCartEmpty() {
if (session.getUser() instanceof AuthenticatedUser) {
AuthenticatedUser authUser = (AuthenticatedUser) session.getUser();
return authUser.getCart().getContents().isEmpty();
}
return true;
}
public String getCartComputeUrl() {
if (session.getUser() instanceof AuthenticatedUser) {
AuthenticatedUser authUser = (AuthenticatedUser) session.getUser();
String url = settingsWrapper.getValueForKey(SettingsServiceBean.Key.ComputeBaseUrl);
if (url == null) {
return "";
}
// url indicates that you are computing with multiple datasets
url += "/multiparty?";
List<Entry<String,String>> contents = authUser.getCart().getContents();
for (Entry<String,String> entry : contents) {
String persistentIdUrl = entry.getValue();
url += persistentIdUrl + "&";
}
return url.substring(0, url.length() - 1);
}
return "";
}
private String fileLabelSearchTerm;
public String getFileLabelSearchTerm() {
return fileLabelSearchTerm;
}
public void setFileLabelSearchTerm(String fileLabelSearchTerm) {
if (fileLabelSearchTerm != null) {
this.fileLabelSearchTerm = fileLabelSearchTerm.trim();
}
}
private List<FileMetadata> fileMetadatasSearch;
public List<FileMetadata> getFileMetadatasSearch() {
return fileMetadatasSearch;
}
public void setFileMetadatasSearch(List<FileMetadata> fileMetadatasSearch) {
this.fileMetadatasSearch = fileMetadatasSearch;
}
public void updateFileSearch(){
logger.info("updating file search list");
if (readOnly) {
this.fileMetadatasSearch = selectFileMetadatasForDisplay(this.fileLabelSearchTerm);
} else {
this.fileMetadatasSearch = datafileService.findFileMetadataByDatasetVersionIdLabelSearchTerm(workingVersion.getId(), this.fileLabelSearchTerm, "", "");
}
}
private Long numberOfFilesToShow = (long) 25;
public Long getNumberOfFilesToShow() {
return numberOfFilesToShow;
}
public void setNumberOfFilesToShow(Long numberOfFilesToShow) {
this.numberOfFilesToShow = numberOfFilesToShow;
}
public void showAll(){
setNumberOfFilesToShow(new Long(fileMetadatasSearch.size()));
}
private List<FileMetadata> selectFileMetadatasForDisplay(String searchTerm) {
Set<Long> searchResultsIdSet = null;
if (searchTerm != null && !searchTerm.equals("")) {
List<Integer> searchResultsIdList = datafileService.findFileMetadataIdsByDatasetVersionIdLabelSearchTerm(workingVersion.getId(), searchTerm, "", "");
searchResultsIdSet = new HashSet<>();
for (Integer id : searchResultsIdList) {
searchResultsIdSet.add(id.longValue());
}
}
List<FileMetadata> retList = new ArrayList<>();
for (FileMetadata fileMetadata : workingVersion.getFileMetadatasSorted()) {
if (searchResultsIdSet == null || searchResultsIdSet.contains(fileMetadata.getId())) {
retList.add(fileMetadata);
}
}
return retList;
}
/*
Save the setting locally so db isn't hit repeatedly
This may be "null", signifying unlimited download size
*/
public Long getMaxFileUploadSizeInBytes(){
return this.maxFileUploadSizeInBytes;
}
public boolean isUnlimitedUploadFileSize(){
if (this.maxFileUploadSizeInBytes == null){
return true;
}
return false;
}
public String getDataverseSiteUrl() {
return this.dataverseSiteUrl;
}
public void setDataverseSiteUrl(String dataverseSiteUrl) {
this.dataverseSiteUrl = dataverseSiteUrl;
}
public DataFile getInitialDataFile() {
if (workingVersion.getFileMetadatas() != null && workingVersion.getFileMetadatas().size() > 0) {
return workingVersion.getFileMetadatas().get(0).getDataFile();
}
return null;
}
public SwiftAccessIO getSwiftObject() {
try {
StorageIO<DataFile> storageIO = getInitialDataFile() == null ? null : getInitialDataFile().getStorageIO();
if (storageIO != null && storageIO instanceof SwiftAccessIO) {
return (SwiftAccessIO)storageIO;
} else {
logger.fine("DatasetPage: Failed to cast storageIO as SwiftAccessIO (most likely because storageIO is a FileAccessIO)");
}
} catch (IOException e) {
logger.fine("DatasetPage: Failed to get storageIO");
}
return null;
}
public String getSwiftContainerName() throws IOException {
SwiftAccessIO swiftObject = getSwiftObject();
try {
swiftObject.open();
return swiftObject.getSwiftContainerName();
} catch (Exception e){
logger.info("DatasetPage: Failed to open swift object");
}
return "";
}
public void setSwiftContainerName(String name){
}
//This function applies to an entire dataset
private boolean isSwiftStorage() {
//containers without datafiles will not be stored in swift storage
if (getInitialDataFile() != null){
for (FileMetadata fmd : workingVersion.getFileMetadatas()) {
//if any of the datafiles are stored in swift
if (fmd.getDataFile().getStorageIdentifier().startsWith("swift://")) {
return true;
}
}
}
return false;
}
//This function applies to a single datafile
private boolean isSwiftStorage(FileMetadata metadata){
if (metadata.getDataFile().getStorageIdentifier().startsWith("swift://")) {
return true;
}
return false;
}
private Boolean showComputeButtonForDataset = null;
//This function applies to an entire dataset
public boolean showComputeButton() {
if (showComputeButtonForDataset != null) {
return showComputeButtonForDataset;
}
if (isSwiftStorage() && (settingsService.getValueForKey(SettingsServiceBean.Key.ComputeBaseUrl) != null)) {
showComputeButtonForDataset = true;
} else {
showComputeButtonForDataset = false;
}
return showComputeButtonForDataset;
}
private Map<Long, Boolean> showComputeButtonForFile = new HashMap<>();
//this function applies to a single datafile
public boolean showComputeButton(FileMetadata metadata) {
Long fileId = metadata.getDataFile().getId();
if (fileId == null) {
return false;
}
if (showComputeButtonForFile.containsKey(fileId)) {
return showComputeButtonForFile.get(fileId);
}
boolean result = isSwiftStorage(metadata)
&& settingsService.getValueForKey(SettingsServiceBean.Key.ComputeBaseUrl) != null;
showComputeButtonForFile.put(fileId, result);
return result;
}
public boolean canComputeAllFiles(boolean isCartCompute){
for (FileMetadata fmd : workingVersion.getFileMetadatas()) {
if (!fileDownloadHelper.canDownloadFile(fmd)) {
RequestContext requestContext = RequestContext.getCurrentInstance();
requestContext.execute("PF('computeInvalid').show()");
return false;
}
}
if (!isCartCompute) {
try {
FacesContext.getCurrentInstance().getExternalContext().redirect(getComputeUrl());
} catch (IOException ioex) {
logger.warning("Failed to issue a redirect.");
}
}
return true;
}
/*
in getComputeUrl(), we are sending the container/dataset name and the exipiry and signature
for the temporary url of only ONE datafile within the dataset. This is because in the
ceph version of swift, we are only able to generate the temporary url for a single object
within a container.
Ideally, we want a temporary url for an entire container/dataset, so perhaps this could instead
be handled on the compute environment end.
Additionally, we have to think about the implications this could have with dataset versioning,
since we currently store all files (even from old versions) in the same container.
--SF
*/
public String getComputeUrl() throws IOException {
return settingsWrapper.getValueForKey(SettingsServiceBean.Key.ComputeBaseUrl) + "?" + this.getPersistentId();
//WHEN we are able to get a temp url for a dataset
//return settingsWrapper.getValueForKey(SettingsServiceBean.Key.ComputeBaseUrl) + "?containerName=" + swiftObject.getSwiftContainerName() + "&temp_url_sig=" + swiftObject.getTempUrlSignature() + "&temp_url_expires=" + swiftObject.getTempUrlExpiry();
}
//For a single file
public String getComputeUrl(FileMetadata metadata) {
SwiftAccessIO swiftObject = null;
try {
StorageIO<DataFile> storageIO = metadata.getDataFile().getStorageIO();
if (storageIO != null && storageIO instanceof SwiftAccessIO) {
swiftObject = (SwiftAccessIO)storageIO;
swiftObject.open();
}
} catch (IOException e) {
logger.info("DatasetPage: Failed to get storageIO");
}
if (settingsWrapper.isTrueForKey(SettingsServiceBean.Key.PublicInstall, false)) {
return settingsWrapper.getValueForKey(SettingsServiceBean.Key.ComputeBaseUrl) + "?" + this.getPersistentId() + "=" + swiftObject.getSwiftFileName();
}
return settingsWrapper.getValueForKey(SettingsServiceBean.Key.ComputeBaseUrl) + "?" + this.getPersistentId() + "=" + swiftObject.getSwiftFileName() + "&temp_url_sig=" + swiftObject.getTempUrlSignature() + "&temp_url_expires=" + swiftObject.getTempUrlExpiry();
}
public String getCloudEnvironmentName() {
return settingsWrapper.getValueForKey(SettingsServiceBean.Key.CloudEnvironmentName);
}
public DataFile getSelectedDownloadFile() {
return selectedDownloadFile;
}
public void setSelectedDownloadFile(DataFile selectedDownloadFile) {
this.selectedDownloadFile = selectedDownloadFile;
}
public List<DataFile> getNewFiles() {
return newFiles;
}
public void setNewFiles(List<DataFile> newFiles) {
this.newFiles = newFiles;
}
public Dataverse getLinkingDataverse() {
return linkingDataverse;
}
public void setLinkingDataverse(Dataverse linkingDataverse) {
this.linkingDataverse = linkingDataverse;
}
public List<SelectItem> getLinkingDVSelectItems() {
return linkingDVSelectItems;
}
public void setLinkingDVSelectItems(List<SelectItem> linkingDVSelectItems) {
this.linkingDVSelectItems = linkingDVSelectItems;
}
public Long getLinkingDataverseId() {
return linkingDataverseId;
}
public void setLinkingDataverseId(Long linkingDataverseId) {
this.linkingDataverseId = linkingDataverseId;
}
public List<Dataverse> getDataversesForLinking() {
return dataversesForLinking;
}
public void setDataversesForLinking(List<Dataverse> dataversesForLinking) {
this.dataversesForLinking = dataversesForLinking;
}
public void updateReleasedVersions(){
setReleasedVersionTabList(resetReleasedVersionTabList());
}
public void updateLinkableDataverses() {
dataversesForLinking = new ArrayList<>();
linkingDVSelectItems = new ArrayList<>();
//Since this is a super user we are getting all dataverses
dataversesForLinking = dataverseService.findAll();
if (dataversesForLinking.isEmpty()) {
setNoDVsAtAll(true);
return;
}
dataversesForLinking.remove(dataset.getOwner());
Dataverse testDV = dataset.getOwner();
while(testDV.getOwner() != null){
dataversesForLinking.remove(testDV.getOwner());
testDV = testDV.getOwner();
}
for (Dataverse removeLinked : dsLinkingService.findLinkingDataverses(dataset.getId())) {
dataversesForLinking.remove(removeLinked);
}
for (Dataverse removeLinked : dvLinkingService.findLinkingDataverses(dataset.getOwner().getId())) {
dataversesForLinking.remove(removeLinked);
}
if (dataversesForLinking.isEmpty()) {
setNoDVsRemaining(true);
return;
}
for (Dataverse selectDV : dataversesForLinking) {
linkingDVSelectItems.add(new SelectItem(selectDV.getId(), selectDV.getDisplayName()));
}
if (!dataversesForLinking.isEmpty() && dataversesForLinking.size() == 1 && dataversesForLinking.get(0) != null) {
linkingDataverse = dataversesForLinking.get(0);
linkingDataverseId = linkingDataverse.getId();
}
}
public void updateSelectedLinkingDV(ValueChangeEvent event) {
linkingDataverseId = (Long) event.getNewValue();
}
public boolean isNoDVsAtAll() {
return noDVsAtAll;
}
public void setNoDVsAtAll(boolean noDVsAtAll) {
this.noDVsAtAll = noDVsAtAll;
}
public boolean isNoDVsRemaining() {
return noDVsRemaining;
}
private Map<Long, String> datafileThumbnailsMap = new HashMap<>();
public boolean isThumbnailAvailable(FileMetadata fileMetadata) {
// new and optimized logic:
// - check download permission here (should be cached - so it's free!)
// - only then check if the thumbnail is available/exists.
// then cache the results!
Long dataFileId = fileMetadata.getDataFile().getId();
if (datafileThumbnailsMap.containsKey(dataFileId)) {
return !"".equals(datafileThumbnailsMap.get(dataFileId));
}
if (!FileUtil.isThumbnailSupported(fileMetadata.getDataFile())) {
datafileThumbnailsMap.put(dataFileId, "");
return false;
}
if (!this.fileDownloadHelper.canDownloadFile(fileMetadata)) {
datafileThumbnailsMap.put(dataFileId, "");
return false;
}
String thumbnailAsBase64 = ImageThumbConverter.getImageThumbnailAsBase64(fileMetadata.getDataFile(), ImageThumbConverter.DEFAULT_THUMBNAIL_SIZE);
//if (datafileService.isThumbnailAvailable(fileMetadata.getDataFile())) {
if (!StringUtil.isEmpty(thumbnailAsBase64)) {
datafileThumbnailsMap.put(dataFileId, thumbnailAsBase64);
return true;
}
datafileThumbnailsMap.put(dataFileId, "");
return false;
}
public String getDataFileThumbnailAsBase64(FileMetadata fileMetadata) {
return datafileThumbnailsMap.get(fileMetadata.getDataFile().getId());
}
// Another convenience method - to cache Update Permission on the dataset:
public boolean canUpdateDataset() {
return permissionsWrapper.canUpdateDataset(dvRequestService.getDataverseRequest(), this.dataset);
}
public boolean canPublishDataverse() {
return permissionsWrapper.canIssuePublishDataverseCommand(dataset.getOwner());
}
public boolean canViewUnpublishedDataset() {
return permissionsWrapper.canViewUnpublishedDataset( dvRequestService.getDataverseRequest(), dataset);
}
/*
* 4.2.1 optimization.
* HOWEVER, this doesn't appear to be saving us anything!
* i.e., it's just as cheap to use session.getUser().isAuthenticated()
* every time; it doesn't do any new db lookups.
*/
public boolean isSessionUserAuthenticated() {
return session.getUser().isAuthenticated();
}
/**
* For use in the Dataset page
* @return
*/
public boolean isSuperUser(){
if (!this.isSessionUserAuthenticated()){
return false;
}
if (this.session.getUser().isSuperuser()){
return true;
}
return false;
}
/*
TODO/OPTIMIZATION: This is still costing us N SELECT FROM GuestbookResponse queries,
where N is the number of files. This could of course be replaced by a query that'll
look up all N at once... Not sure if it's worth it; especially now that N
will always be 10, for the initial page load. -- L.A. 4.2.1
*/
public Long getGuestbookResponseCount(FileMetadata fileMetadata) {
return guestbookResponseService.getCountGuestbookResponsesByDataFileId(fileMetadata.getDataFile().getId());
}
/**
* Check Dataset related permissions
*
* @param permissionToCheck
* @return
*/
public boolean doesSessionUserHaveDataSetPermission(Permission permissionToCheck){
if (permissionToCheck == null){
return false;
}
String permName = permissionToCheck.getHumanName();
// Has this check already been done?
//
if (this.datasetPermissionMap.containsKey(permName)){
// Yes, return previous answer
return this.datasetPermissionMap.get(permName);
}
// Check the permission
//
boolean hasPermission = this.permissionService.userOn(this.session.getUser(), this.dataset).has(permissionToCheck);
// Save the permission
this.datasetPermissionMap.put(permName, hasPermission);
// return true/false
return hasPermission;
}
public void setNoDVsRemaining(boolean noDVsRemaining) {
this.noDVsRemaining = noDVsRemaining;
}
private final Map<Long, MapLayerMetadata> mapLayerMetadataLookup = new HashMap<>();
private GuestbookResponse guestbookResponse;
private Guestbook selectedGuestbook;
public GuestbookResponse getGuestbookResponse() {
return guestbookResponse;
}
public void setGuestbookResponse(GuestbookResponse guestbookResponse) {
this.guestbookResponse = guestbookResponse;
}
public Guestbook getSelectedGuestbook() {
return selectedGuestbook;
}
public void setSelectedGuestbook(Guestbook selectedGuestbook) {
this.selectedGuestbook = selectedGuestbook;
}
public void viewSelectedGuestbook(Guestbook selectedGuestbook) {
this.selectedGuestbook = selectedGuestbook;
}
public void reset() {
dataset.setGuestbook(null);
}
public int getFilePaginatorPage() {
return filePaginatorPage;
}
public void setFilePaginatorPage(int filePaginatorPage) {
this.filePaginatorPage = filePaginatorPage;
}
public int getRowsPerPage() {
return rowsPerPage;
}
public void setRowsPerPage(int rowsPerPage) {
this.rowsPerPage = rowsPerPage;
}
public String getGlobalId() {
return persistentId;
}
public String getPersistentId() {
return persistentId;
}
public void setPersistentId(String persistentId) {
this.persistentId = persistentId;
}
public String getVersion() {
return version;
}
public void setVersion(String version) {
this.version = version;
}
public String getShowVersionList() {
return showVersionList;
}
public void setShowVersionList(String showVersionList) {
this.showVersionList = showVersionList;
}
public String getShowOtherText() {
return showOtherText;
}
public void setShowOtherText(String showOtherText) {
this.showOtherText = showOtherText;
}
private String showOtherText = "false";
public String getDeaccessionForwardURLFor() {
return deaccessionForwardURLFor;
}
public void setDeaccessionForwardURLFor(String deaccessionForwardURLFor) {
this.deaccessionForwardURLFor = deaccessionForwardURLFor;
}
private DatasetVersionDifference datasetVersionDifference;
public String getDeaccessionReasonText() {
return deaccessionReasonText;
}
public void setDeaccessionReasonText(String deaccessionReasonText) {
this.deaccessionReasonText = deaccessionReasonText;
}
public String getDisplayCitation() {
//displayCitation = dataset.getCitation(false, workingVersion);
return displayCitation;
}
public void setDisplayCitation(String displayCitation) {
this.displayCitation = displayCitation;
}
public String getDropBoxSelection() {
return dropBoxSelection;
}
public String getDropBoxKey() {
// Site-specific DropBox application registration key is configured
// via a JVM option under glassfish.
//if (true)return "some-test-key"; // for debugging
String configuredDropBoxKey = System.getProperty("dataverse.dropbox.key");
if (configuredDropBoxKey != null) {
return configuredDropBoxKey;
}
return "";
}
public void setDropBoxSelection(String dropBoxSelection) {
this.dropBoxSelection = dropBoxSelection;
}
public Dataset getDataset() {
return dataset;
}
public void setDataset(Dataset dataset) {
this.dataset = dataset;
}
public DatasetVersion getWorkingVersion() {
return workingVersion;
}
public EditMode getEditMode() {
return editMode;
}
public void setEditMode(EditMode editMode) {
this.editMode = editMode;
}
public Long getOwnerId() {
return ownerId;
}
public void setOwnerId(Long ownerId) {
this.ownerId = ownerId;
}
public Long getVersionId() {
return versionId;
}
public void setVersionId(Long versionId) {
this.versionId = versionId;
}
public int getSelectedTabIndex() {
return selectedTabIndex;
}
public void setSelectedTabIndex(int selectedTabIndex) {
this.selectedTabIndex = selectedTabIndex;
}
public int getReleaseRadio() {
return releaseRadio;
}
public void setReleaseRadio(int releaseRadio) {
this.releaseRadio = releaseRadio;
}
public String getDatasetNextMajorVersion() {
return datasetNextMajorVersion;
}
public void setDatasetNextMajorVersion(String datasetNextMajorVersion) {
this.datasetNextMajorVersion = datasetNextMajorVersion;
}
public String getDatasetNextMinorVersion() {
return datasetNextMinorVersion;
}
public void setDatasetNextMinorVersion(String datasetNextMinorVersion) {
this.datasetNextMinorVersion = datasetNextMinorVersion;
}
public int getDeaccessionReasonRadio() {
return deaccessionReasonRadio;
}
public void setDeaccessionReasonRadio(int deaccessionReasonRadio) {
this.deaccessionReasonRadio = deaccessionReasonRadio;
}
public int getDeaccessionRadio() {
return deaccessionRadio;
}
public void setDeaccessionRadio(int deaccessionRadio) {
this.deaccessionRadio = deaccessionRadio;
}
public List<Template> getDataverseTemplates() {
return dataverseTemplates;
}
public void setDataverseTemplates(List<Template> dataverseTemplates) {
this.dataverseTemplates = dataverseTemplates;
}
public Template getDefaultTemplate() {
return defaultTemplate;
}
public void setDefaultTemplate(Template defaultTemplate) {
this.defaultTemplate = defaultTemplate;
}
public Template getSelectedTemplate() {
return selectedTemplate;
}
public void setSelectedTemplate(Template selectedTemplate) {
this.selectedTemplate = selectedTemplate;
}
public void updateSelectedTemplate(ValueChangeEvent event) {
selectedTemplate = (Template) event.getNewValue();
if (selectedTemplate != null) {
//then create new working version from the selected template
workingVersion.updateDefaultValuesFromTemplate(selectedTemplate);
updateDatasetFieldInputLevels();
} else {
workingVersion.initDefaultValues();
updateDatasetFieldInputLevels();
}
resetVersionUI();
}
/*
// Original
private void updateDatasetFieldInputLevels() {
Long dvIdForInputLevel = ownerId;
if (!dataverseService.find(ownerId).isMetadataBlockRoot()) {
dvIdForInputLevel = dataverseService.find(ownerId).getMetadataRootId();
}
for (DatasetField dsf : workingVersion.getFlatDatasetFields()) {
DataverseFieldTypeInputLevel dsfIl = dataverseFieldTypeInputLevelService.findByDataverseIdDatasetFieldTypeId(dvIdForInputLevel, dsf.getDatasetFieldType().getId());
if (dsfIl != null) {
dsf.setInclude(dsfIl.isInclude());
} else {
dsf.setInclude(true);
}
}
}*/
/***
*
* Note: Updated to retrieve DataverseFieldTypeInputLevel objects in single query
*
*/
private void updateDatasetFieldInputLevels() {
Long dvIdForInputLevel = ownerId;
// OPTIMIZATION (?): replaced "dataverseService.find(ownerId)" with
// simply dataset.getOwner()... saves us a few lookups.
// TODO: could there possibly be any reason we want to look this
// dataverse up by the id here?? -- L.A. 4.2.1
if (!dataset.getOwner().isMetadataBlockRoot()) {
dvIdForInputLevel = dataset.getOwner().getMetadataRootId();
}
/* ---------------------------------------------------------
Map to hold DatasetFields
Format: { DatasetFieldType.id : DatasetField }
--------------------------------------------------------- */
// Initialize Map
Map<Long, DatasetField> mapDatasetFields = new HashMap<>();
// Populate Map
for (DatasetField dsf : workingVersion.getFlatDatasetFields()) {
if (dsf.getDatasetFieldType().getId() != null){
mapDatasetFields.put(dsf.getDatasetFieldType().getId(), dsf);
}
}
/* ---------------------------------------------------------
Retrieve List of DataverseFieldTypeInputLevel objects
Use the DatasetFieldType id's which are the Map's keys
--------------------------------------------------------- */
List<Long> idList = new ArrayList<>(mapDatasetFields.keySet());
List<DataverseFieldTypeInputLevel> dsFieldTypeInputLevels = dataverseFieldTypeInputLevelService.findByDataverseIdAndDatasetFieldTypeIdList(dvIdForInputLevel, idList);
/* ---------------------------------------------------------
Iterate through List of DataverseFieldTypeInputLevel objects
Call "setInclude" on its related DatasetField object
--------------------------------------------------------- */
for (DataverseFieldTypeInputLevel oneDSFieldTypeInputLevel : dsFieldTypeInputLevels){
if (oneDSFieldTypeInputLevel != null) {
// Is the DatasetField in the hash? hash format: { DatasetFieldType.id : DatasetField }
DatasetField dsf = mapDatasetFields.get(oneDSFieldTypeInputLevel.getDatasetFieldType().getId());
if (dsf != null){
// Yes, call "setInclude"
dsf.setInclude(oneDSFieldTypeInputLevel.isInclude());
// remove from hash
mapDatasetFields.remove(oneDSFieldTypeInputLevel.getDatasetFieldType().getId());
}
}
} // end: updateDatasetFieldInputLevels
/* ---------------------------------------------------------
Iterate through any DatasetField objects remaining in the hash
Call "setInclude(true) on each one
--------------------------------------------------------- */
for ( DatasetField dsf : mapDatasetFields.values()) {
if (dsf != null){
dsf.setInclude(true);
}
}
}
public void handleChange() {
logger.fine("handle change");
logger.fine("new value " + selectedTemplate.getId());
}
public void handleChangeButton() {
}
public boolean isShapefileType(FileMetadata fm) {
if (fm == null) {
return false;
}
if (fm.getDataFile() == null) {
return false;
}
return fm.getDataFile().isShapefileType();
}
/*
Check if the FileMetadata.dataFile has an associated MapLayerMetadata object
The MapLayerMetadata objects have been fetched at page inception by "loadMapLayerMetadataLookup()"
*/
public boolean hasMapLayerMetadata(FileMetadata fm) {
if (fm == null) {
return false;
}
if (fm.getDataFile() == null) {
return false;
}
return doesDataFileHaveMapLayerMetadata(fm.getDataFile());
}
/**
* Check if a DataFile has an associated MapLayerMetadata object
*
* The MapLayerMetadata objects have been fetched at page inception by
* "loadMapLayerMetadataLookup()"
*/
private boolean doesDataFileHaveMapLayerMetadata(DataFile df) {
if (df == null) {
return false;
}
if (df.getId() == null) {
return false;
}
return this.mapLayerMetadataLookup.containsKey(df.getId());
}
/**
* Using a DataFile id, retrieve an associated MapLayerMetadata object
*
* The MapLayerMetadata objects have been fetched at page inception by
* "loadMapLayerMetadataLookup()"
*/
public MapLayerMetadata getMapLayerMetadata(DataFile df) {
if (df == null) {
return null;
}
return this.mapLayerMetadataLookup.get(df.getId());
}
private void msg(String s){
// System.out.println(s);
}
/**
* Create a hashmap consisting of { DataFile.id : MapLayerMetadata object}
*
* Very few DataFiles will have associated MapLayerMetadata objects so only
* use 1 query to get them
*/
private void loadMapLayerMetadataLookup() {
if (this.dataset == null) {
}
if (this.dataset.getId() == null) {
return;
}
List<MapLayerMetadata> mapLayerMetadataList = mapLayerMetadataService.getMapLayerMetadataForDataset(this.dataset);
if (mapLayerMetadataList == null) {
return;
}
for (MapLayerMetadata layer_metadata : mapLayerMetadataList) {
mapLayerMetadataLookup.put(layer_metadata.getDataFile().getId(), layer_metadata);
}
}// A DataFile may have a related MapLayerMetadata object
private List<FileMetadata> displayFileMetadata;
public List<FileMetadata> getDisplayFileMetadata() {
return displayFileMetadata;
}
public void setDisplayFileMetadata(List<FileMetadata> displayFileMetadata) {
this.displayFileMetadata = displayFileMetadata;
}
private boolean readOnly = true;
public String init() {
return init(true);
}
public String initCitation() {
return init(false);
}
private String init(boolean initFull) {
//System.out.println("_YE_OLDE_QUERY_COUNTER_"); // for debug purposes
this.maxFileUploadSizeInBytes = systemConfig.getMaxFileUploadSize();
setDataverseSiteUrl(systemConfig.getDataverseSiteUrl());
guestbookResponse = new GuestbookResponse();
String nonNullDefaultIfKeyNotFound = "";
protocol = settingsWrapper.getValueForKey(SettingsServiceBean.Key.Protocol, nonNullDefaultIfKeyNotFound);
authority = settingsWrapper.getValueForKey(SettingsServiceBean.Key.Authority, nonNullDefaultIfKeyNotFound);
if (dataset.getId() != null || versionId != null || persistentId != null) { // view mode for a dataset
DatasetVersionServiceBean.RetrieveDatasetVersionResponse retrieveDatasetVersionResponse = null;
// ---------------------------------------
// Set the workingVersion and Dataset
// ---------------------------------------
if (persistentId != null) {
logger.fine("initializing DatasetPage with persistent ID " + persistentId);
// Set Working Version and Dataset by PersistentID
dataset = datasetService.findByGlobalId(persistentId);
if (dataset == null) {
logger.warning("No such dataset: "+persistentId);
return permissionsWrapper.notFound();
}
logger.fine("retrieved dataset, id="+dataset.getId());
retrieveDatasetVersionResponse = datasetVersionService.selectRequestedVersion(dataset.getVersions(), version);
//retrieveDatasetVersionResponse = datasetVersionService.retrieveDatasetVersionByPersistentId(persistentId, version);
this.workingVersion = retrieveDatasetVersionResponse.getDatasetVersion();
logger.fine("retrieved version: id: " + workingVersion.getId() + ", state: " + this.workingVersion.getVersionState());
} else if (dataset.getId() != null) {
// Set Working Version and Dataset by Datasaet Id and Version
dataset = datasetService.find(dataset.getId());
if (dataset == null) {
logger.warning("No such dataset: "+dataset);
return permissionsWrapper.notFound();
}
//retrieveDatasetVersionResponse = datasetVersionService.retrieveDatasetVersionById(dataset.getId(), version);
retrieveDatasetVersionResponse = datasetVersionService.selectRequestedVersion(dataset.getVersions(), version);
this.workingVersion = retrieveDatasetVersionResponse.getDatasetVersion();
logger.info("retreived version: id: " + workingVersion.getId() + ", state: " + this.workingVersion.getVersionState());
} else if (versionId != null) {
// TODO: 4.2.1 - this method is broken as of now!
// Set Working Version and Dataset by DatasaetVersion Id
//retrieveDatasetVersionResponse = datasetVersionService.retrieveDatasetVersionByVersionId(versionId);
}
if (retrieveDatasetVersionResponse == null) {
return permissionsWrapper.notFound();
}
//this.dataset = this.workingVersion.getDataset();
// end: Set the workingVersion and Dataset
// ---------------------------------------
// Is the DatasetVersion or Dataset null?
//
if (workingVersion == null || this.dataset == null) {
return permissionsWrapper.notFound();
}
// Is the Dataset harvested?
if (dataset.isHarvested()) {
// if so, we'll simply forward to the remote URL for the original
// source of this harvested dataset:
String originalSourceURL = dataset.getRemoteArchiveURL();
if (originalSourceURL != null && !originalSourceURL.equals("")) {
logger.fine("redirecting to "+originalSourceURL);
try {
FacesContext.getCurrentInstance().getExternalContext().redirect(originalSourceURL);
} catch (IOException ioex) {
// must be a bad URL...
// we don't need to do anything special here - we'll redirect
// to the local 404 page, below.
logger.warning("failed to issue a redirect to "+originalSourceURL);
}
return originalSourceURL;
}
return permissionsWrapper.notFound();
}
// Check permisisons
if (!(workingVersion.isReleased() || workingVersion.isDeaccessioned()) && !this.canViewUnpublishedDataset()) {
return permissionsWrapper.notAuthorized();
}
if (!retrieveDatasetVersionResponse.wasRequestedVersionRetrieved()) {
//msg("checkit " + retrieveDatasetVersionResponse.getDifferentVersionMessage());
JsfHelper.addWarningMessage(retrieveDatasetVersionResponse.getDifferentVersionMessage());//JH.localize("dataset.message.metadataSuccess"));
}
// init the citation
displayCitation = dataset.getCitation(true, workingVersion);
if (initFull) {
// init the list of FileMetadatas
if (workingVersion.isDraft() && canUpdateDataset()) {
readOnly = false;
} else {
// an attempt to retreive both the filemetadatas and datafiles early on, so that
// we don't have to do so later (possibly, many more times than necessary):
datafileService.findFileMetadataOptimizedExperimental(dataset);
}
fileMetadatasSearch = workingVersion.getFileMetadatasSorted();
ownerId = dataset.getOwner().getId();
datasetNextMajorVersion = this.dataset.getNextMajorVersionString();
datasetNextMinorVersion = this.dataset.getNextMinorVersionString();
datasetVersionUI = datasetVersionUI.initDatasetVersionUI(workingVersion, false);
updateDatasetFieldInputLevels();
setExistReleasedVersion(resetExistRealeaseVersion());
//moving setVersionTabList to tab change event
//setVersionTabList(resetVersionTabList());
//setReleasedVersionTabList(resetReleasedVersionTabList());
//SEK - lazymodel may be needed for datascroller in future release
// lazyModel = new LazyFileMetadataDataModel(workingVersion.getId(), datafileService );
// populate MapLayerMetadata
this.loadMapLayerMetadataLookup(); // A DataFile may have a related MapLayerMetadata object
this.guestbookResponse = guestbookResponseService.initGuestbookResponseForFragment(workingVersion, null, session);
this.getFileDownloadHelper().setGuestbookResponse(guestbookResponse);
logger.fine("Checking if rsync support is enabled.");
if (DataCaptureModuleUtil.rsyncSupportEnabled(settingsWrapper.getValueForKey(SettingsServiceBean.Key.UploadMethods))) {
try {
ScriptRequestResponse scriptRequestResponse = commandEngine.submit(new RequestRsyncScriptCommand(dvRequestService.getDataverseRequest(), dataset));
logger.fine("script: " + scriptRequestResponse.getScript());
if(scriptRequestResponse.getScript()!=null && !scriptRequestResponse.getScript().isEmpty()){
setHasRsyncScript(true);
setRsyncScript(scriptRequestResponse.getScript());
rsyncScriptFilename = "upload-"+ workingVersion.getDataset().getIdentifier() + ".bash";
}
else{
setHasRsyncScript(false);
}
} catch (RuntimeException ex) {
logger.warning("Problem getting rsync script: " + ex.getLocalizedMessage());
} catch (CommandException cex) {
logger.warning("Problem getting rsync script (Command Exception): " + cex.getLocalizedMessage());
}
}
}
} else if (ownerId != null) {
// create mode for a new child dataset
readOnly = false;
editMode = EditMode.CREATE;
dataset.setOwner(dataverseService.find(ownerId));
dataset.setProtocol(protocol);
dataset.setAuthority(authority);
//Wait until the create command before actually getting an identifier
if (dataset.getOwner() == null) {
return permissionsWrapper.notFound();
} else if (!permissionService.on(dataset.getOwner()).has(Permission.AddDataset)) {
return permissionsWrapper.notAuthorized();
}
dataverseTemplates.addAll(dataverseService.find(ownerId).getTemplates());
if (!dataverseService.find(ownerId).isTemplateRoot()) {
dataverseTemplates.addAll(dataverseService.find(ownerId).getParentTemplates());
}
Collections.sort(dataverseTemplates, (Template t1, Template t2) -> t1.getName().compareToIgnoreCase(t2.getName()));
defaultTemplate = dataverseService.find(ownerId).getDefaultTemplate();
if (defaultTemplate != null) {
selectedTemplate = defaultTemplate;
for (Template testT : dataverseTemplates) {
if (defaultTemplate.getId().equals(testT.getId())) {
selectedTemplate = testT;
}
}
workingVersion = dataset.getEditVersion(selectedTemplate);
updateDatasetFieldInputLevels();
} else {
workingVersion = dataset.getCreateVersion();
updateDatasetFieldInputLevels();
}
if (settingsWrapper.isTrueForKey(SettingsServiceBean.Key.PublicInstall, false)){
JH.addMessage(FacesMessage.SEVERITY_WARN, BundleUtil.getStringFromBundle("dataset.message.publicInstall"));
}
resetVersionUI();
// FacesContext.getCurrentInstance().addMessage(null, new FacesMessage(FacesMessage.SEVERITY_INFO, "Add New Dataset", " - Enter metadata to create the dataset's citation. You can add more metadata about this dataset after it's created."));
} else {
return permissionsWrapper.notFound();
}
try {
privateUrl = commandEngine.submit(new GetPrivateUrlCommand(dvRequestService.getDataverseRequest(), dataset));
if (privateUrl != null) {
JH.addMessage(FacesMessage.SEVERITY_INFO, BundleUtil.getStringFromBundle("dataset.privateurl.infoMessageAuthor", Arrays.asList(getPrivateUrlLink(privateUrl))));
}
} catch (CommandException ex) {
// No big deal. The user simply doesn't have access to create or delete a Private URL.
}
if (session.getUser() instanceof PrivateUrlUser) {
PrivateUrlUser privateUrlUser = (PrivateUrlUser) session.getUser();
if (dataset != null && dataset.getId().equals(privateUrlUser.getDatasetId())) {
JH.addMessage(FacesMessage.SEVERITY_INFO, BundleUtil.getStringFromBundle("dataset.privateurl.infoMessageReviewer"));
}
}
// Various info messages, when the dataset is locked (for various reasons):
if (dataset.isLocked()) {
if (dataset.isLockedFor(DatasetLock.Reason.Workflow)) {
JH.addMessage(FacesMessage.SEVERITY_WARN, BundleUtil.getStringFromBundle("dataset.locked.message"),
BundleUtil.getStringFromBundle("dataset.publish.workflow.inprogress"));
}
if (dataset.isLockedFor(DatasetLock.Reason.InReview)) {
JH.addMessage(FacesMessage.SEVERITY_WARN, BundleUtil.getStringFromBundle("dataset.locked.inReview.message"),
BundleUtil.getStringFromBundle("dataset.inreview.infoMessage"));
}
if (dataset.isLockedFor(DatasetLock.Reason.DcmUpload)) {
JH.addMessage(FacesMessage.SEVERITY_WARN, BundleUtil.getStringFromBundle("file.rsyncUpload.inProgressMessage.summary"),
BundleUtil.getStringFromBundle("file.rsyncUpload.inProgressMessage.details"));
}
//This is a hack to remove dataset locks for File PID registration if
//the dataset is released
//in testing we had cases where datasets with 1000 files were remaining locked after being published successfully
/*if(dataset.getLatestVersion().isReleased() && dataset.isLockedFor(DatasetLock.Reason.pidRegister)){
datasetService.removeDatasetLocks(dataset.getId(), DatasetLock.Reason.pidRegister);
}*/
if (dataset.isLockedFor(DatasetLock.Reason.pidRegister)) {
JH.addMessage(FacesMessage.SEVERITY_WARN, BundleUtil.getStringFromBundle("dataset.pidRegister.workflow.inprogress"),
BundleUtil.getStringFromBundle("dataset.publish.workflow.inprogress"));
}
}
configureTools = externalToolService.findByType(ExternalTool.Type.CONFIGURE);
exploreTools = externalToolService.findByType(ExternalTool.Type.EXPLORE);
rowsPerPage = 10;
return null;
}
public boolean isReadOnly() {
return readOnly;
}
private void resetVersionUI() {
datasetVersionUI = datasetVersionUI.initDatasetVersionUI(workingVersion, true);
if (isSessionUserAuthenticated()) {
AuthenticatedUser au = (AuthenticatedUser) session.getUser();
//On create set pre-populated fields
for (DatasetField dsf : dataset.getEditVersion().getDatasetFields()) {
if (dsf.getDatasetFieldType().getName().equals(DatasetFieldConstant.depositor) && dsf.isEmpty()) {
dsf.getDatasetFieldValues().get(0).setValue(au.getLastName() + ", " + au.getFirstName());
}
if (dsf.getDatasetFieldType().getName().equals(DatasetFieldConstant.dateOfDeposit) && dsf.isEmpty()) {
dsf.getDatasetFieldValues().get(0).setValue(new SimpleDateFormat("yyyy-MM-dd").format(new Timestamp(new Date().getTime())));
}
if (dsf.getDatasetFieldType().getName().equals(DatasetFieldConstant.datasetContact) && dsf.isEmpty()) {
for (DatasetFieldCompoundValue contactValue : dsf.getDatasetFieldCompoundValues()) {
for (DatasetField subField : contactValue.getChildDatasetFields()) {
if (subField.getDatasetFieldType().getName().equals(DatasetFieldConstant.datasetContactName)) {
subField.getDatasetFieldValues().get(0).setValue(au.getLastName() + ", " + au.getFirstName());
}
if (subField.getDatasetFieldType().getName().equals(DatasetFieldConstant.datasetContactAffiliation)) {
subField.getDatasetFieldValues().get(0).setValue(au.getAffiliation());
}
if (subField.getDatasetFieldType().getName().equals(DatasetFieldConstant.datasetContactEmail)) {
subField.getDatasetFieldValues().get(0).setValue(au.getEmail());
}
}
}
}
String creatorOrcidId = au.getOrcidId();
if (dsf.getDatasetFieldType().getName().equals(DatasetFieldConstant.author) && dsf.isEmpty()) {
for (DatasetFieldCompoundValue authorValue : dsf.getDatasetFieldCompoundValues()) {
for (DatasetField subField : authorValue.getChildDatasetFields()) {
if (subField.getDatasetFieldType().getName().equals(DatasetFieldConstant.authorName)) {
subField.getDatasetFieldValues().get(0).setValue(au.getLastName() + ", " + au.getFirstName());
}
if (subField.getDatasetFieldType().getName().equals(DatasetFieldConstant.authorAffiliation)) {
subField.getDatasetFieldValues().get(0).setValue(au.getAffiliation());
}
if (creatorOrcidId != null) {
if (subField.getDatasetFieldType().getName().equals(DatasetFieldConstant.authorIdValue)) {
subField.getDatasetFieldValues().get(0).setValue(creatorOrcidId);
}
if (subField.getDatasetFieldType().getName().equals(DatasetFieldConstant.authorIdType)) {
DatasetFieldType authorIdTypeDatasetField = fieldService.findByName(DatasetFieldConstant.authorIdType);
subField.setSingleControlledVocabularyValue(fieldService.findControlledVocabularyValueByDatasetFieldTypeAndStrValue(authorIdTypeDatasetField, "ORCID", true));
}
}
}
}
}
}
}
}
private boolean bulkUpdateCheckVersion(){
return workingVersion.isReleased();
}
private void refreshSelectedFiles(){
if (readOnly) {
dataset = datasetService.find(dataset.getId());
}
String termsOfAccess = workingVersion.getTermsOfUseAndAccess().getTermsOfAccess();
boolean requestAccess = workingVersion.getTermsOfUseAndAccess().isFileAccessRequest();
workingVersion = dataset.getEditVersion();
workingVersion.getTermsOfUseAndAccess().setTermsOfAccess(termsOfAccess);
workingVersion.getTermsOfUseAndAccess().setFileAccessRequest(requestAccess);
List <FileMetadata> newSelectedFiles = new ArrayList<>();
for (FileMetadata fmd : selectedFiles){
for (FileMetadata fmdn: workingVersion.getFileMetadatas()){
if (fmd.getDataFile().equals(fmdn.getDataFile())){
newSelectedFiles.add(fmdn);
}
}
}
selectedFiles.clear();
for (FileMetadata fmdn : newSelectedFiles ){
selectedFiles.add(fmdn);
}
readOnly = false;
}
public void testSelectedFilesForMapData(){
setSelectedFilesHasMapLayer(false);
for (FileMetadata fmd : selectedFiles){
if(worldMapPermissionHelper.hasMapLayerMetadata(fmd)){
setSelectedFilesHasMapLayer(true);
return; //only need one for warning message
}
}
}
private boolean selectedFilesHasMapLayer;
public boolean isSelectedFilesHasMapLayer() {
return selectedFilesHasMapLayer;
}
public void setSelectedFilesHasMapLayer(boolean selectedFilesHasMapLayer) {
this.selectedFilesHasMapLayer = selectedFilesHasMapLayer;
}
private Integer chunkSize = 25;
public Integer getChunkSize() {
return chunkSize;
}
public void setChunkSize(Integer chunkSize) {
this.chunkSize = chunkSize;
}
public void viewAllButtonPress(){
setChunkSize(fileMetadatasSearch.size());
}
private int activeTabIndex;
public int getActiveTabIndex() {
return activeTabIndex;
}
public void setActiveTabIndex(int activeTabIndex) {
this.activeTabIndex = activeTabIndex;
}
public void tabChanged(TabChangeEvent event) {
TabView tv = (TabView) event.getComponent();
this.activeTabIndex = tv.getActiveIndex();
if (this.activeTabIndex == 3) {
setVersionTabList(resetVersionTabList());
setReleasedVersionTabList(resetReleasedVersionTabList());
} else {
releasedVersionTabList = new ArrayList<>();
versionTabList = new ArrayList<>();
if(this.activeTabIndex == 0) {
init();
}
}
}
public void edit(EditMode editMode) {
this.editMode = editMode;
if (this.readOnly) {
dataset = datasetService.find(dataset.getId());
}
workingVersion = dataset.getEditVersion();
if (editMode == EditMode.INFO) {
// ?
} else if (editMode == EditMode.FILE) {
// JH.addMessage(FacesMessage.SEVERITY_INFO, JH.localize("dataset.message.editFiles"));
// FacesContext.getCurrentInstance().addMessage(null, new FacesMessage(FacesMessage.SEVERITY_INFO, "Upload + Edit Dataset Files", " - You can drag and drop your files from your desktop, directly into the upload widget."));
} else if (editMode.equals(EditMode.METADATA)) {
datasetVersionUI = datasetVersionUI.initDatasetVersionUI(workingVersion, true);
updateDatasetFieldInputLevels();
JH.addMessage(FacesMessage.SEVERITY_INFO, JH.localize("dataset.message.editMetadata"));
//FacesContext.getCurrentInstance().addMessage(null, new FacesMessage(FacesMessage.SEVERITY_INFO, "Edit Dataset Metadata", " - Add more metadata about your dataset to help others easily find it."));
} else if (editMode.equals(EditMode.LICENSE)){
JH.addMessage(FacesMessage.SEVERITY_INFO, JH.localize("dataset.message.editTerms"));
//FacesContext.getCurrentInstance().addMessage(null, new FacesMessage(FacesMessage.SEVERITY_INFO, "Edit Dataset License and Terms", " - Update your dataset's license and terms of use."));
}
this.readOnly = false;
}
public String releaseDraft() {
if (releaseRadio == 1) {
return releaseDataset(true);
} else {
return releaseDataset(false);
}
}
public String releaseMajor() {
return releaseDataset(false);
}
public String sendBackToContributor() {
try {
//FIXME - Get Return Comment from sendBackToContributor popup
Command<Dataset> cmd = new ReturnDatasetToAuthorCommand(dvRequestService.getDataverseRequest(), dataset, "");
dataset = commandEngine.submit(cmd);
JsfHelper.addSuccessMessage(BundleUtil.getStringFromBundle("dataset.reject.success"));
} catch (CommandException ex) {
String message = ex.getMessage();
logger.log(Level.SEVERE, "sendBackToContributor: {0}", message);
JsfHelper.addErrorMessage(BundleUtil.getStringFromBundle("dataset.reject.failure", Collections.singletonList(message)));
}
/*
The notifications below are redundant, since the ReturnDatasetToAuthorCommand
sends them already. - L.A. Sep. 7 2017
List<AuthenticatedUser> authUsers = permissionService.getUsersWithPermissionOn(Permission.PublishDataset, dataset);
List<AuthenticatedUser> editUsers = permissionService.getUsersWithPermissionOn(Permission.EditDataset, dataset);
editUsers.removeAll(authUsers);
new HashSet<>(editUsers).forEach( au ->
userNotificationService.sendNotification(au, new Timestamp(new Date().getTime()),
UserNotification.Type.RETURNEDDS, dataset.getLatestVersion().getId())
);
*/
//FacesMessage message = new FacesMessage(FacesMessage.SEVERITY_INFO, "DatasetSubmitted", "This dataset has been sent back to the contributor.");
//FacesContext.getCurrentInstance().addMessage(null, message);
return returnToLatestVersion();
}
public String submitDataset() {
try {
Command<Dataset> cmd = new SubmitDatasetForReviewCommand( dvRequestService.getDataverseRequest(), dataset);
dataset = commandEngine.submit(cmd);
//JsfHelper.addSuccessMessage(BundleUtil.getStringFromBundle("dataset.submit.success"));
} catch (CommandException ex) {
String message = ex.getMessage();
logger.log(Level.SEVERE, "submitDataset: {0}", message);
JsfHelper.addErrorMessage(BundleUtil.getStringFromBundle("dataset.submit.failure", Collections.singletonList(message)));
}
return returnToLatestVersion();
}
public String releaseParentDVAndDataset(){
releaseParentDV();
return releaseDataset(false);
}
public String releaseDataset() {
return releaseDataset(false);
}
private void releaseParentDV(){
if (session.getUser() instanceof AuthenticatedUser) {
PublishDataverseCommand cmd = new PublishDataverseCommand(dvRequestService.getDataverseRequest(), dataset.getOwner());
try {
commandEngine.submit(cmd);
JsfHelper.addSuccessMessage(JH.localize("dataverse.publish.success"));
} catch (CommandException ex) {
logger.log(Level.SEVERE, "Unexpected Exception calling publish dataverse command", ex);
JsfHelper.addErrorMessage(JH.localize("dataverse.publish.failure"));
}
} else {
FacesMessage message = new FacesMessage(FacesMessage.SEVERITY_INFO, BundleUtil.getStringFromBundle("dataverse.notreleased") ,BundleUtil.getStringFromBundle( "dataverse.release.authenticatedUsersOnly"));
FacesContext.getCurrentInstance().addMessage(null, message);
}
}
public String deaccessionVersions() {
Command<DatasetVersion> cmd;
try {
if (selectedDeaccessionVersions == null) {
for (DatasetVersion dv : this.dataset.getVersions()) {
if (dv.isReleased()) {
DatasetVersion deaccession = datasetVersionService.find(dv.getId());
cmd = new DeaccessionDatasetVersionCommand(dvRequestService.getDataverseRequest(), setDatasetVersionDeaccessionReasonAndURL(deaccession), true);
DatasetVersion datasetv = commandEngine.submit(cmd);
}
}
} else {
for (DatasetVersion dv : selectedDeaccessionVersions) {
DatasetVersion deaccession = datasetVersionService.find(dv.getId());
cmd = new DeaccessionDatasetVersionCommand(dvRequestService.getDataverseRequest(), setDatasetVersionDeaccessionReasonAndURL(deaccession), false);
DatasetVersion datasetv = commandEngine.submit(cmd);
}
}
} catch (CommandException ex) {
logger.severe(ex.getMessage());
JH.addMessage(FacesMessage.SEVERITY_FATAL, JH.localize("dataset.message.deaccessionFailure"));
}
JsfHelper.addSuccessMessage(JH.localize("datasetVersion.message.deaccessionSuccess"));
return returnToDatasetOnly();
}
private DatasetVersion setDatasetVersionDeaccessionReasonAndURL(DatasetVersion dvIn) {
int deaccessionReasonCode = getDeaccessionReasonRadio();
String deacessionReasonDetail = getDeaccessionReasonText() != null ? ( getDeaccessionReasonText()).trim() : "";
switch (deaccessionReasonCode) {
case 1:
dvIn.setVersionNote("There is identifiable data in one or more files.");
break;
case 2:
dvIn.setVersionNote("The research article has been retracted.");
break;
case 3:
dvIn.setVersionNote("The dataset has been transferred to another repository.");
break;
case 4:
dvIn.setVersionNote("IRB request.");
break;
case 5:
dvIn.setVersionNote("Legal issue or Data Usage Agreement.");
break;
case 6:
dvIn.setVersionNote("Not a valid dataset.");
break;
case 7:
break;
}
if (!deacessionReasonDetail.isEmpty()){
if (!StringUtil.isEmpty(dvIn.getVersionNote())){
dvIn.setVersionNote(dvIn.getVersionNote() + " " + deacessionReasonDetail);
} else {
dvIn.setVersionNote(deacessionReasonDetail);
}
}
dvIn.setArchiveNote(getDeaccessionForwardURLFor());
return dvIn;
}
public boolean isMapLayerToBeDeletedOnPublish(){
for (FileMetadata fmd : workingVersion.getFileMetadatas()){
if (worldMapPermissionHelper.hasMapLayerMetadata(fmd)){
if (fmd.isRestricted() || fmd.isRestrictedUI()){
return true;
}
}
}
return false;
}
private String releaseDataset(boolean minor) {
if (session.getUser() instanceof AuthenticatedUser) {
try {
final PublishDatasetResult result = commandEngine.submit(
new PublishDatasetCommand(dataset, dvRequestService.getDataverseRequest(), minor)
);
dataset = result.getDataset();
// Sucessfully executing PublishDatasetCommand does not guarantee that the dataset
// has been published. If a publishing workflow is configured, this may have sent the
// dataset into a workflow limbo, potentially waiting for a third party system to complete
// the process. So it may be premature to show the "success" message at this point.
if ( result.isCompleted() ) {
JsfHelper.addSuccessMessage(BundleUtil.getStringFromBundle("dataset.message.publishSuccess"));
} else {
JH.addMessage(FacesMessage.SEVERITY_WARN, BundleUtil.getStringFromBundle("dataset.locked.message"), BundleUtil.getStringFromBundle("dataset.publish.workflow.inprogress"));
}
} catch (CommandException ex) {
JsfHelper.addErrorMessage(ex.getLocalizedMessage());
logger.severe(ex.getMessage());
}
} else {
JsfHelper.addErrorMessage(BundleUtil.getStringFromBundle("dataset.message.only.authenticatedUsers"));
}
return returnToDatasetOnly();
}
public String registerDataset() {
try {
UpdateDatasetVersionCommand cmd = new UpdateDatasetVersionCommand(dataset, dvRequestService.getDataverseRequest());
cmd.setValidateLenient(true);
dataset = commandEngine.submit(cmd);
} catch (CommandException ex) {
FacesContext.getCurrentInstance().addMessage(null, new FacesMessage(FacesMessage.SEVERITY_WARN,BundleUtil.getStringFromBundle( "dataset.registration.failed"), " - " + ex.toString()));
logger.severe(ex.getMessage());
}
FacesMessage message = new FacesMessage(FacesMessage.SEVERITY_INFO, BundleUtil.getStringFromBundle("dataset.registered"), BundleUtil.getStringFromBundle("dataset.registered.msg"));
FacesContext.getCurrentInstance().addMessage(null, message);
return returnToDatasetOnly();
}
public void refresh(ActionEvent e) {
refresh();
}
public void refresh() {
logger.fine("refreshing");
//dataset = datasetService.find(dataset.getId());
dataset = null;
logger.fine("refreshing working version");
DatasetVersionServiceBean.RetrieveDatasetVersionResponse retrieveDatasetVersionResponse = null;
if (persistentId != null) {
//retrieveDatasetVersionResponse = datasetVersionService.retrieveDatasetVersionByPersistentId(persistentId, version);
dataset = datasetService.findByGlobalId(persistentId);
retrieveDatasetVersionResponse = datasetVersionService.selectRequestedVersion(dataset.getVersions(), version);
} else if (versionId != null) {
retrieveDatasetVersionResponse = datasetVersionService.retrieveDatasetVersionByVersionId(versionId);
} else if (dataset.getId() != null) {
//retrieveDatasetVersionResponse = datasetVersionService.retrieveDatasetVersionById(dataset.getId(), version);
dataset = datasetService.find(dataset.getId());
retrieveDatasetVersionResponse = datasetVersionService.selectRequestedVersion(dataset.getVersions(), version);
}
if (retrieveDatasetVersionResponse == null) {
// TODO:
// should probably redirect to the 404 page, if we can't find
// this version anymore.
// -- L.A. 4.2.3
return;
}
this.workingVersion = retrieveDatasetVersionResponse.getDatasetVersion();
if (this.workingVersion == null) {
// TODO:
// same as the above
return;
}
if (dataset == null) {
// this would be the case if we were retrieving the version by
// the versionId, above.
this.dataset = this.workingVersion.getDataset();
}
if (readOnly) {
datafileService.findFileMetadataOptimizedExperimental(dataset);
}
fileMetadatasSearch = workingVersion.getFileMetadatasSorted();
displayCitation = dataset.getCitation(true, workingVersion);
stateChanged = false;
}
public String deleteDataset() {
DestroyDatasetCommand cmd;
try {
cmd = new DestroyDatasetCommand(dataset, dvRequestService.getDataverseRequest());
commandEngine.submit(cmd);
/* - need to figure out what to do
Update notification in Delete Dataset Method
for (UserNotification und : userNotificationService.findByDvObject(dataset.getId())){
userNotificationService.delete(und);
} */
} catch (CommandException ex) {
JH.addMessage(FacesMessage.SEVERITY_FATAL, JH.localize("dataset.message.deleteFailure"));
logger.severe(ex.getMessage());
}
JsfHelper.addSuccessMessage(JH.localize("dataset.message.deleteSuccess"));
return "/dataverse.xhtml?alias=" + dataset.getOwner().getAlias() + "&faces-redirect=true";
}
public String editFileMetadata(){
// If there are no files selected, return an empty string - which
// means, do nothing, don't redirect anywhere, stay on this page.
// The dialogue telling the user to select at least one file will
// be shown to them by an onclick javascript method attached to the
// filemetadata edit button on the page.
// -- L.A. 4.2.1
if (this.selectedFiles == null || this.selectedFiles.size() < 1) {
return "";
}
return "/editdatafiles.xhtml?selectedFileIds=" + getSelectedFilesIdsString() + "&datasetId=" + dataset.getId() +"&faces-redirect=true";
}
public String deleteDatasetVersion() {
DeleteDatasetVersionCommand cmd;
try {
cmd = new DeleteDatasetVersionCommand(dvRequestService.getDataverseRequest(), dataset);
commandEngine.submit(cmd);
JsfHelper.addSuccessMessage(JH.localize("datasetVersion.message.deleteSuccess"));
} catch (CommandException ex) {
JH.addMessage(FacesMessage.SEVERITY_FATAL, JH.localize("dataset.message.deleteFailure"));
logger.severe(ex.getMessage());
}
return returnToDatasetOnly();
}
private List<FileMetadata> selectedFiles = new ArrayList<>();
public List<FileMetadata> getSelectedFiles() {
return selectedFiles;
}
public void setSelectedFiles(List<FileMetadata> selectedFiles) {
this.selectedFiles = selectedFiles;
}
private List<FileMetadata> selectedRestrictedFiles; // = new ArrayList<>();
public List<FileMetadata> getSelectedRestrictedFiles() {
return selectedRestrictedFiles;
}
public void setSelectedRestrictedFiles(List<FileMetadata> selectedRestrictedFiles) {
this.selectedRestrictedFiles = selectedRestrictedFiles;
}
private List<FileMetadata> selectedUnrestrictedFiles; // = new ArrayList<>();
public List<FileMetadata> getSelectedUnrestrictedFiles() {
return selectedUnrestrictedFiles;
}
public void setSelectedUnrestrictedFiles(List<FileMetadata> selectedUnrestrictedFiles) {
this.selectedUnrestrictedFiles = selectedUnrestrictedFiles;
}
private List<FileMetadata> selectedDownloadableFiles;
public List<FileMetadata> getSelectedDownloadableFiles() {
return selectedDownloadableFiles;
}
public void setSelectedDownloadableFiles(List<FileMetadata> selectedDownloadableFiles) {
this.selectedDownloadableFiles = selectedDownloadableFiles;
}
private List<FileMetadata> selectedNonDownloadableFiles;
public List<FileMetadata> getSelectedNonDownloadableFiles() {
return selectedNonDownloadableFiles;
}
public void setSelectedNonDownloadableFiles(List<FileMetadata> selectedNonDownloadableFiles) {
this.selectedNonDownloadableFiles = selectedNonDownloadableFiles;
}
public void validateFilesForDownload(boolean guestbookRequired){
setSelectedDownloadableFiles(new ArrayList<>());
setSelectedNonDownloadableFiles(new ArrayList<>());
if (this.selectedFiles.isEmpty()) {
RequestContext requestContext = RequestContext.getCurrentInstance();
requestContext.execute("PF('selectFilesForDownload').show()");
return;
}
for (FileMetadata fmd : this.selectedFiles){
if(this.fileDownloadHelper.canDownloadFile(fmd)){
getSelectedDownloadableFiles().add(fmd);
} else {
getSelectedNonDownloadableFiles().add(fmd);
}
}
if(!getSelectedDownloadableFiles().isEmpty() && getSelectedNonDownloadableFiles().isEmpty()){
if (guestbookRequired){
modifyGuestbookMultipleResponse();
} else{
startMultipleFileDownload(false);
}
}
if(getSelectedDownloadableFiles().isEmpty() && !getSelectedNonDownloadableFiles().isEmpty()){
RequestContext requestContext = RequestContext.getCurrentInstance();
requestContext.execute("PF('downloadInvalid').show()");
return;
}
if(!getSelectedDownloadableFiles().isEmpty() && !getSelectedNonDownloadableFiles().isEmpty()){
RequestContext requestContext = RequestContext.getCurrentInstance();
requestContext.execute("PF('downloadMixed').show()");
}
}
private boolean selectAllFiles;
public boolean isSelectAllFiles() {
return selectAllFiles;
}
public void setSelectAllFiles(boolean selectAllFiles) {
this.selectAllFiles = selectAllFiles;
}
public void toggleAllSelected(){
//This is here so that if the user selects all on the dataset page
// s/he will get all files on download
this.selectAllFiles = !this.selectAllFiles;
}
// helper Method
public String getSelectedFilesIdsString() {
String downloadIdString = "";
for (FileMetadata fmd : this.selectedFiles){
if (!StringUtil.isEmpty(downloadIdString)) {
downloadIdString += ",";
}
downloadIdString += fmd.getDataFile().getId();
}
return downloadIdString;
}
// helper Method
public String getSelectedDownloadableFilesIdsString() {
String downloadIdString = "";
for (FileMetadata fmd : this.selectedDownloadableFiles){
if (!StringUtil.isEmpty(downloadIdString)) {
downloadIdString += ",";
}
downloadIdString += fmd.getDataFile().getId();
}
return downloadIdString;
}
// helper Method
public String getSelectedFilesIdsStringForDownload() {
String downloadIdString = "";
for (FileMetadata fmd : this.selectedFiles){
if (!StringUtil.isEmpty(downloadIdString)) {
downloadIdString += ",";
}
downloadIdString += fmd.getDataFile().getId();
}
return downloadIdString;
}
public String getDownloadableFilesIdsString() {
String downloadIdString = "";
for (FileMetadata fmd : this.selectedDownloadableFiles){
if (!StringUtil.isEmpty(downloadIdString)) {
downloadIdString += ",";
}
downloadIdString += fmd.getDataFile().getId();
}
return downloadIdString;
}
public void updateFileCounts(){
setSelectedUnrestrictedFiles(new ArrayList<>());
setSelectedRestrictedFiles(new ArrayList<>());
setTabularDataSelected(false);
for (FileMetadata fmd : this.selectedFiles){
if(fmd.isRestricted()){
getSelectedRestrictedFiles().add(fmd);
} else {
getSelectedUnrestrictedFiles().add(fmd);
}
if(fmd.getDataFile().isTabularData()){
setTabularDataSelected(true);
}
}
}
private List<String> getSuccessMessageArguments() {
List<String> arguments = new ArrayList<>();
arguments.add(StringEscapeUtils.escapeHtml(dataset.getDisplayName()));
String linkString = "<a href=\"/dataverse/" + linkingDataverse.getAlias() + "\">" + StringEscapeUtils.escapeHtml(linkingDataverse.getDisplayName()) + "</a>";
arguments.add(linkString);
return arguments;
}
public String saveLinkedDataset() {
if (linkingDataverseId == null) {
JsfHelper.addFlashMessage(BundleUtil.getStringFromBundle("dataverse.link.select"));
return "";
}
linkingDataverse = dataverseService.find(linkingDataverseId);
if (readOnly) {
// Pass a "real", non-readonly dataset the the LinkDatasetCommand:
dataset = datasetService.find(dataset.getId());
}
LinkDatasetCommand cmd = new LinkDatasetCommand(dvRequestService.getDataverseRequest(), linkingDataverse, dataset);
try {
commandEngine.submit(cmd);
JsfHelper.addSuccessMessage(BundleUtil.getStringFromBundle("dataset.message.linkSuccess", getSuccessMessageArguments()));
} catch (CommandException ex) {
String msg = "There was a problem linking this dataset to yours: " + ex;
logger.severe(msg);
msg = BundleUtil.getStringFromBundle("dataset.notlinked.msg") + ex;
/**
* @todo how do we get this message to show up in the GUI?
*/
FacesMessage message = new FacesMessage(FacesMessage.SEVERITY_INFO, BundleUtil.getStringFromBundle("dataset.notlinked"), msg);
FacesContext.getCurrentInstance().addMessage(null, message);
}
return returnToLatestVersion();
}
List<FileMetadata> previouslyRestrictedFiles = null;
public boolean isShowAccessPopup() {
for (FileMetadata fmd : workingVersion.getFileMetadatas()) {
if (fmd.isRestricted()) {
if (editMode == EditMode.CREATE) {
// if this is a brand new file, it's definitely not
// of a previously restricted kind!
return true;
}
if (previouslyRestrictedFiles != null) {
// We've already checked whether we are in the CREATE mode,
// above; and that means we can safely assume this filemetadata
// has an existing db id. So it is safe to use the .contains()
// method below:
if (!previouslyRestrictedFiles.contains(fmd)) {
return true;
}
}
}
}
return false;
}
public void setShowAccessPopup(boolean showAccessPopup) {} // dummy set method
public String testSelectedFilesForRestrict(){
RequestContext requestContext = RequestContext.getCurrentInstance();
if (selectedFiles.isEmpty()) {
requestContext.execute("PF('selectFilesForRestrict').show()");
return "";
} else {
boolean validSelection = false;
for (FileMetadata fmd : selectedFiles) {
if (!fmd.isRestricted() ){
validSelection = true;
}
}
if (!validSelection) {
requestContext.execute("PF('selectFilesForRestrict').show()");
return "";
}
testSelectedFilesForMapData();
requestContext.execute("PF('accessPopup').show()");
return "";
}
}
public String restrictSelectedFiles(boolean restricted) throws CommandException{
RequestContext requestContext = RequestContext.getCurrentInstance();
if (selectedFiles.isEmpty()) {
if (restricted) {
requestContext.execute("PF('selectFilesForRestrict').show()");
} else {
requestContext.execute("PF('selectFilesForUnRestrict').show()");
}
return "";
} else {
boolean validSelection = false;
for (FileMetadata fmd : selectedFiles) {
if ((fmd.isRestricted() && !restricted) || (!fmd.isRestricted() && restricted)) {
validSelection = true;
}
}
if (!validSelection) {
if (restricted) {
requestContext.execute("PF('selectFilesForRestrict').show()");
}
if (!restricted) {
requestContext.execute("PF('selectFilesForUnRestrict').show()");
}
return "";
}
}
if (editMode != EditMode.CREATE) {
if (bulkUpdateCheckVersion()) {
refreshSelectedFiles();
}
restrictFiles(restricted);
}
save();
return returnToDraftVersion();
}
public void restrictFiles(boolean restricted) throws CommandException {
//if (previouslyRestrictedFiles == null) {
// we don't need to buther with this "previously restricted" business
// when in Create mode... because all the files are new, so none could
// have been restricted previously;
// (well, it looks like the code below should never be called in the
// CREATE mode in the first place... the edit files fragment uses
// its own restrictFiles() method there; also, the fmd.getDataFile().equals(fmw.getDataFile()))
// line is not going to work on a new file... so be mindful of all this
// when the code between the 2 beans is merged in 4.3.
if (editMode != EditMode.CREATE) {
previouslyRestrictedFiles = new ArrayList<>();
for (FileMetadata fmd : workingVersion.getFileMetadatas()) {
if (fmd.isRestricted()) {
previouslyRestrictedFiles.add(fmd);
}
}
Command cmd;
String fileNames = null;
for (FileMetadata fmw : workingVersion.getFileMetadatas()) {
for (FileMetadata fmd : this.getSelectedFiles()) {
if (restricted && !fmw.isRestricted()) {
// collect the names of the newly-restrticted files,
// to show in the success message:
// I don't think this does the right thing:
// (adds too many files to the message; good thing this
// message isn't used, normally)
if (fileNames == null) {
fileNames = fmd.getLabel();
} else {
fileNames = fileNames.concat(fmd.getLabel());
}
}
if (fmd.getDataFile().equals(fmw.getDataFile())) {
cmd = new RestrictFileCommand(fmw.getDataFile(), dvRequestService.getDataverseRequest(), restricted);
commandEngine.submit(cmd);
// fmw.setRestricted(restricted);
// if (workingVersion.isDraft() && !fmw.getDataFile().isReleased()) {
// // We do not really need to check that the working version is
// // a draft here - it must be a draft, if we've gotten this
// // far. But just in case. -- L.A. 4.2.1
// fmw.getDataFile().setRestricted(restricted);
// }
}
}
}
if (fileNames != null) {
String successMessage = JH.localize("file.restricted.success");
logger.fine(successMessage);
successMessage = successMessage.replace("{0}", fileNames);
JsfHelper.addFlashMessage(successMessage);
}
}
}
public int getRestrictedFileCount() {
int restrictedFileCount = 0;
for (FileMetadata fmd : workingVersion.getFileMetadatas()) {
if (fmd.isRestricted()) {
restrictedFileCount++;
}
}
return restrictedFileCount;
}
private List<FileMetadata> filesToBeDeleted = new ArrayList<>();
public String deleteFilesAndSave(){
bulkFileDeleteInProgress = true;
if (bulkUpdateCheckVersion()){
refreshSelectedFiles();
}
deleteFiles();
return save();
}
public void deleteFiles() {
for (FileMetadata markedForDelete : selectedFiles) {
if (markedForDelete.getId() != null) {
// This FileMetadata has an id, i.e., it exists in the database.
// We are going to remove this filemetadata from the version:
dataset.getEditVersion().getFileMetadatas().remove(markedForDelete);
// But the actual delete will be handled inside the UpdateDatasetCommand
// (called later on). The list "filesToBeDeleted" is passed to the
// command as a parameter:
filesToBeDeleted.add(markedForDelete);
} else {
// This FileMetadata does not have an id, meaning it has just been
// created, and not yet saved in the database. This in turn means this is
// a freshly created DRAFT version; specifically created because
// the user is trying to delete a file from an existing published
// version. This means we are not really *deleting* the file -
// we are going to keep it in the published version; we are simply
// going to save a new DRAFT version that does not contain this file.
// So below we are deleting the metadata from the version; we are
// NOT adding the file to the filesToBeDeleted list that will be
// passed to the UpdateDatasetCommand. -- L.A. Aug 2017
Iterator<FileMetadata> fmit = dataset.getEditVersion().getFileMetadatas().iterator();
while (fmit.hasNext()) {
FileMetadata fmd = fmit.next();
if (markedForDelete.getDataFile().getStorageIdentifier().equals(fmd.getDataFile().getStorageIdentifier())) {
// And if this is an image file that happens to be assigned
// as the dataset thumbnail, let's null the assignment here:
if (fmd.getDataFile().equals(dataset.getThumbnailFile())) {
dataset.setThumbnailFile(null);
}
//if not published then delete identifier
if (!fmd.getDataFile().isReleased()){
try{
commandEngine.submit(new DeleteDataFileCommand(fmd.getDataFile(), dvRequestService.getDataverseRequest()));
} catch (CommandException e){
//this command is here to delete the identifier of unreleased files
//if it fails then a reserved identifier may still be present on the remote provider
}
}
fmit.remove();
break;
}
}
}
}
/*
Do note that if we are deleting any files that have UNFs (i.e.,
tabular files), we DO NEED TO RECALCULATE the UNF of the version!
- but we will do this inside the UpdateDatasetCommand.
*/
}
public String save() {
//Before dataset saved, write cached prov freeform to version
if(systemConfig.isProvCollectionEnabled()) {
provPopupFragmentBean.saveStageProvFreeformToLatestVersion();
}
// Validate
Set<ConstraintViolation> constraintViolations = workingVersion.validate();
if (!constraintViolations.isEmpty()) {
//JsfHelper.addFlashMessage(JH.localize("dataset.message.validationError"));
JH.addMessage(FacesMessage.SEVERITY_ERROR, JH.localize("dataset.message.validationError"));
//FacesContext.getCurrentInstance().addMessage(null, new FacesMessage(FacesMessage.SEVERITY_ERROR, "Validation Error", "See below for details."));
return "";
}
// Use the Create or Update command to save the dataset:
Command<Dataset> cmd;
try {
if (editMode == EditMode.CREATE) {
if ( selectedTemplate != null ) {
if ( isSessionUserAuthenticated() ) {
cmd = new CreateNewDatasetCommand(dataset, dvRequestService.getDataverseRequest(), false, selectedTemplate);
} else {
JH.addMessage(FacesMessage.SEVERITY_FATAL, JH.localize("dataset.create.authenticatedUsersOnly"));
return null;
}
} else {
cmd = new CreateNewDatasetCommand(dataset, dvRequestService.getDataverseRequest());
}
} else {
cmd = new UpdateDatasetVersionCommand(dataset, dvRequestService.getDataverseRequest(), filesToBeDeleted);
((UpdateDatasetVersionCommand) cmd).setValidateLenient(true);
}
dataset = commandEngine.submit(cmd);
if (editMode == EditMode.CREATE) {
if (session.getUser() instanceof AuthenticatedUser) {
userNotificationService.sendNotification((AuthenticatedUser) session.getUser(), dataset.getCreateDate(), UserNotification.Type.CREATEDS, dataset.getLatestVersion().getId());
}
}
logger.fine("Successfully executed SaveDatasetCommand.");
} catch (EJBException ex) {
StringBuilder error = new StringBuilder();
error.append(ex).append(" ");
error.append(ex.getMessage()).append(" ");
Throwable cause = ex;
while (cause.getCause()!= null) {
cause = cause.getCause();
error.append(cause).append(" ");
error.append(cause.getMessage()).append(" ");
}
logger.log(Level.FINE, "Couldn''t save dataset: {0}", error.toString());
populateDatasetUpdateFailureMessage();
return returnToDraftVersion();
} catch (CommandException ex) {
//FacesContext.getCurrentInstance().addMessage(null, new FacesMessage(FacesMessage.SEVERITY_ERROR, "Dataset Save Failed", " - " + ex.toString()));
logger.log(Level.SEVERE, "CommandException, when attempting to update the dataset: " + ex.getMessage(), ex);
populateDatasetUpdateFailureMessage();
return returnToDraftVersion();
}
if (editMode != null) {
if (editMode.equals(EditMode.CREATE)) {
// We allow users to upload files on Create:
int nNewFiles = newFiles.size();
logger.fine("NEW FILES: "+nNewFiles);
if (nNewFiles > 0) {
// Save the NEW files permanently and add the to the dataset:
// But first, fully refresh the newly created dataset (with a
// datasetService.find().
// We have reasons to believe that the CreateDatasetCommand
// returns the dataset that doesn't have all the
// RoleAssignments properly linked to it - even though they
// have been created in the dataset.
dataset = datasetService.find(dataset.getId());
List<DataFile> filesAdded = ingestService.saveAndAddFilesToDataset(dataset.getEditVersion(), newFiles);
newFiles.clear();
// and another update command:
boolean addFilesSuccess = false;
cmd = new UpdateDatasetVersionCommand(dataset, dvRequestService.getDataverseRequest(), new ArrayList<FileMetadata>());
try {
dataset = commandEngine.submit(cmd);
addFilesSuccess = true;
} catch (Exception ex) {
addFilesSuccess = false;
}
if (addFilesSuccess && dataset.getFiles().size() > 0) {
if (nNewFiles == dataset.getFiles().size()) {
JsfHelper.addSuccessMessage(JH.localize("dataset.message.createSuccess"));
} else {
String partialSuccessMessage = JH.localize("dataset.message.createSuccess.partialSuccessSavingFiles");
partialSuccessMessage = partialSuccessMessage.replace("{0}", "" + dataset.getFiles().size() + "");
partialSuccessMessage = partialSuccessMessage.replace("{1}", "" + nNewFiles + "");
JsfHelper.addWarningMessage(partialSuccessMessage);
}
} else {
JsfHelper.addWarningMessage(JH.localize("dataset.message.createSuccess.failedToSaveFiles"));
}
} else {
JsfHelper.addSuccessMessage(JH.localize("dataset.message.createSuccess"));
}
}
if (editMode.equals(EditMode.METADATA)) {
JsfHelper.addSuccessMessage(JH.localize("dataset.message.metadataSuccess"));
}
if (editMode.equals(EditMode.LICENSE)) {
JsfHelper.addSuccessMessage(JH.localize("dataset.message.termsSuccess"));
}
if (editMode.equals(EditMode.FILE)) {
JsfHelper.addSuccessMessage(JH.localize("dataset.message.filesSuccess"));
}
} else {
// must have been a bulk file update or delete:
if (bulkFileDeleteInProgress) {
JsfHelper.addSuccessMessage(JH.localize("dataset.message.bulkFileDeleteSuccess"));
} else {
JsfHelper.addSuccessMessage(JH.localize("dataset.message.bulkFileUpdateSuccess"));
}
}
editMode = null;
bulkFileDeleteInProgress = false;
// Call Ingest Service one more time, to
// queue the data ingest jobs for asynchronous execution:
ingestService.startIngestJobsForDataset(dataset, (AuthenticatedUser) session.getUser());
//After dataset saved, then persist prov json data
if(systemConfig.isProvCollectionEnabled()) {
try {
provPopupFragmentBean.saveStagedProvJson(false, dataset.getLatestVersion().getFileMetadatas());
} catch (AbstractApiBean.WrappedResponse ex) {
JsfHelper.addErrorMessage(BundleUtil.getStringFromBundle("file.metadataTab.provenance.error"));
Logger.getLogger(DatasetPage.class.getName()).log(Level.SEVERE, null, ex);
}
}
logger.fine("Redirecting to the Dataset page.");
return returnToDraftVersion();
}
private void populateDatasetUpdateFailureMessage(){
if (editMode == null) {
// that must have been a bulk file update or delete:
if (bulkFileDeleteInProgress) {
JsfHelper.addErrorMessage(JH.localize("dataset.message.bulkFileDeleteFailure"));
} else {
JsfHelper.addErrorMessage(JH.localize("dataset.message.filesFailure"));
}
} else {
if (editMode.equals(EditMode.CREATE)) {
JsfHelper.addErrorMessage(JH.localize("dataset.message.createFailure"));
}
if (editMode.equals(EditMode.METADATA)) {
JsfHelper.addErrorMessage(JH.localize("dataset.message.metadataFailure"));
}
if (editMode.equals(EditMode.LICENSE)) {
JsfHelper.addErrorMessage(JH.localize("dataset.message.termsFailure"));
}
if (editMode.equals(EditMode.FILE)) {
JsfHelper.addErrorMessage(JH.localize("dataset.message.filesFailure"));
}
}
bulkFileDeleteInProgress = false;
}
private String returnToLatestVersion(){
dataset = datasetService.find(dataset.getId());
workingVersion = dataset.getLatestVersion();
if (workingVersion.isDeaccessioned() && dataset.getReleasedVersion() != null) {
workingVersion = dataset.getReleasedVersion();
}
setVersionTabList(resetVersionTabList());
setReleasedVersionTabList(resetReleasedVersionTabList());
newFiles.clear();
editMode = null;
return "/dataset.xhtml?persistentId=" + dataset.getGlobalIdString() + "&version="+ workingVersion.getFriendlyVersionNumber() + "&faces-redirect=true";
}
private String returnToDatasetOnly(){
dataset = datasetService.find(dataset.getId());
editMode = null;
return "/dataset.xhtml?persistentId=" + dataset.getGlobalIdString() + "&faces-redirect=true";
}
private String returnToDraftVersion(){
return "/dataset.xhtml?persistentId=" + dataset.getGlobalIdString() + "&version=DRAFT" + "&faces-redirect=true";
}
public String cancel() {
return returnToLatestVersion();
}
private HttpClient getClient() {
// TODO:
// cache the http client? -- L.A. 4.0 alpha
return new HttpClient();
}
public void refreshLock() {
//RequestContext requestContext = RequestContext.getCurrentInstance();
logger.fine("checking lock");
if (isStillLocked()) {
logger.fine("(still locked)");
} else {
// OK, the dataset is no longer locked.
// let's tell the page to refresh:
logger.fine("no longer locked!");
stateChanged = true;
lockedFromEditsVar = null;
lockedFromDownloadVar = null;
//requestContext.execute("refreshPage();");
}
}
public void refreshIngestLock() {
//RequestContext requestContext = RequestContext.getCurrentInstance();
logger.fine("checking ingest lock");
if (isStillLockedForIngest()) {
logger.fine("(still locked)");
} else {
// OK, the dataset is no longer locked.
// let's tell the page to refresh:
logger.fine("no longer locked!");
stateChanged = true;
lockedFromEditsVar = null;
lockedFromDownloadVar = null;
//requestContext.execute("refreshPage();");
}
}
public void refreshAllLocks() {
//RequestContext requestContext = RequestContext.getCurrentInstance();
logger.fine("checking all locks");
if (isStillLockedForAnyReason()) {
logger.fine("(still locked)");
} else {
// OK, the dataset is no longer locked.
// let's tell the page to refresh:
logger.fine("no longer locked!");
stateChanged = true;
lockedFromEditsVar = null;
lockedFromDownloadVar = null;
//requestContext.execute("refreshPage();");
}
}
/*
public boolean isLockedInProgress() {
if (dataset != null) {
logger.log(Level.FINE, "checking lock status of dataset {0}", dataset.getId());
if (dataset.isLocked()) {
return true;
}
}
return false;
}*/
public boolean isDatasetLockedInWorkflow() {
return (dataset != null)
? dataset.isLockedFor(DatasetLock.Reason.Workflow)
: false;
}
public boolean isStillLocked() {
if (dataset != null && dataset.getId() != null) {
logger.log(Level.FINE, "checking lock status of dataset {0}", dataset.getId());
if(dataset.getLocks().size() == 1 && dataset.getLockFor(DatasetLock.Reason.InReview) != null){
return false;
}
if (datasetService.checkDatasetLock(dataset.getId())) {
return true;
}
}
return false;
}
public boolean isStillLockedForIngest() {
if (dataset.getId() != null) {
Dataset testDataset = datasetService.find(dataset.getId());
if (testDataset != null && testDataset.getId() != null) {
logger.log(Level.FINE, "checking lock status of dataset {0}", dataset.getId());
if (testDataset.getLockFor(DatasetLock.Reason.Ingest) != null) {
return true;
}
}
}
return false;
}
public boolean isStillLockedForAnyReason() {
if (dataset.getId() != null) {
Dataset testDataset = datasetService.find(dataset.getId());
if (testDataset != null && testDataset.getId() != null) {
logger.log(Level.FINE, "checking lock status of dataset {0}", dataset.getId());
if (testDataset.getLocks().size() > 0) {
return true;
}
}
}
return false;
}
public boolean isLocked() {
if (stateChanged) {
return false;
}
if (dataset != null) {
if (dataset.isLocked()) {
return true;
}
}
return false;
}
public boolean isLockedForIngest() {
if (dataset.getId() != null) {
Dataset testDataset = datasetService.find(dataset.getId());
if (stateChanged) {
return false;
}
if (testDataset != null) {
if (testDataset.getLockFor(DatasetLock.Reason.Ingest) != null) {
return true;
}
}
}
return false;
}
public boolean isLockedForAnyReason() {
if (dataset.getId() != null) {
Dataset testDataset = datasetService.find(dataset.getId());
if (stateChanged) {
return false;
}
if (testDataset != null) {
if (testDataset.getLocks().size() > 0) {
return true;
}
}
}
return false;
}
private Boolean lockedFromEditsVar;
private Boolean lockedFromDownloadVar;
/**
* Authors are not allowed to edit but curators are allowed - when Dataset is inReview
* For all other locks edit should be locked for all editors.
*/
public boolean isLockedFromEdits() {
if(null == lockedFromEditsVar || stateChanged) {
try {
permissionService.checkEditDatasetLock(dataset, dvRequestService.getDataverseRequest(), new UpdateDatasetVersionCommand(dataset, dvRequestService.getDataverseRequest()));
lockedFromEditsVar = false;
} catch (IllegalCommandException ex) {
lockedFromEditsVar = true;
}
}
return lockedFromEditsVar;
}
public boolean isLockedFromDownload(){
if(null == lockedFromDownloadVar || stateChanged) {
try {
permissionService.checkDownloadFileLock(dataset, dvRequestService.getDataverseRequest(), new CreateNewDatasetCommand(dataset, dvRequestService.getDataverseRequest()));
lockedFromDownloadVar = false;
} catch (IllegalCommandException ex) {
lockedFromDownloadVar = true;
return true;
}
}
return lockedFromDownloadVar;
}
public void setLocked(boolean locked) {
// empty method, so that we can use DatasetPage.locked in a hidden
// input on the page.
}
public void setLockedForIngest(boolean locked) {
// empty method, so that we can use DatasetPage.locked in a hidden
// input on the page.
}
public void setLockedForAnyReason(boolean locked) {
// empty method, so that we can use DatasetPage.locked in a hidden
// input on the page.
}
public boolean isStateChanged() {
return stateChanged;
}
public void setStateChanged(boolean stateChanged) {
// empty method, so that we can use DatasetPage.stateChanged in a hidden
// input on the page.
}
public DatasetVersionUI getDatasetVersionUI() {
return datasetVersionUI;
}
public List<DatasetVersion> getVersionTabList() {
return versionTabList;
}
public List<DatasetVersion> getVersionTabListForPostLoad(){
return this.versionTabListForPostLoad;
}
public void setVersionTabListForPostLoad(List<DatasetVersion> versionTabListForPostLoad) {
this.versionTabListForPostLoad = versionTabListForPostLoad;
}
public Integer getCompareVersionsCount() {
Integer retVal = 0;
for (DatasetVersion dvTest : dataset.getVersions()) {
if (!dvTest.isDeaccessioned()) {
retVal++;
}
}
return retVal;
}
/**
* To improve performance, Version Differences
* are retrieved/calculated after the page load
*
* See: dataset-versions.xhtml, remoteCommand 'postLoadVersionTablList'
*/
public void postLoadSetVersionTabList(){
if (this.getVersionTabList().isEmpty() && workingVersion.isDeaccessioned()){
setVersionTabList(resetVersionTabList());
}
this.setVersionTabListForPostLoad(this.getVersionTabList());
//this.versionTabList = this.resetVersionTabList();
}
/**
*
*
* @param versionTabList
*/
public void setVersionTabList(List<DatasetVersion> versionTabList) {
this.versionTabList = versionTabList;
}
private List<DatasetVersion> releasedVersionTabList = new ArrayList<>();
public List<DatasetVersion> getReleasedVersionTabList() {
return releasedVersionTabList;
}
public void setReleasedVersionTabList(List<DatasetVersion> releasedVersionTabList) {
this.releasedVersionTabList = releasedVersionTabList;
}
private List<DatasetVersion> selectedVersions;
public List<DatasetVersion> getSelectedVersions() {
return selectedVersions;
}
public void setSelectedVersions(List<DatasetVersion> selectedVersions) {
this.selectedVersions = selectedVersions;
}
private List<DatasetVersion> selectedDeaccessionVersions;
public List<DatasetVersion> getSelectedDeaccessionVersions() {
return selectedDeaccessionVersions;
}
public void setSelectedDeaccessionVersions(List<DatasetVersion> selectedDeaccessionVersions) {
this.selectedDeaccessionVersions = selectedDeaccessionVersions;
}
public DatasetVersionDifference getDatasetVersionDifference() {
return datasetVersionDifference;
}
public void setDatasetVersionDifference(DatasetVersionDifference datasetVersionDifference) {
this.datasetVersionDifference = datasetVersionDifference;
}
public void startMultipleFileDownload (Boolean writeGuestbook){
fileDownloadService.callDownloadServlet(getDownloadableFilesIdsString(), writeGuestbook);
}
private String downloadType = "";
public String getDownloadType() {
return downloadType;
}
public void setDownloadType(String downloadType) {
this.downloadType = downloadType;
}
public void modifyGuestbookMultipleResponse(){
if (this.selectedFiles.isEmpty()) {
RequestContext requestContext = RequestContext.getCurrentInstance();
requestContext.execute("PF('selectFilesForDownload').show()");
return;
}
this.guestbookResponse = this.guestbookResponseService.modifySelectedFileIds(guestbookResponse, getSelectedDownloadableFilesIdsString());
this.guestbookResponse.setDownloadtype("Download");
this.guestbookResponse.setFileFormat("Download");
RequestContext requestContext = RequestContext.getCurrentInstance();
requestContext.execute("PF('downloadPopup').show();handleResizeDialog('downloadPopup');");
}
public void initGuestbookMultipleResponse(String selectedFileIds){
initGuestbookResponse(null, "download", selectedFileIds);
}
public void initGuestbookResponse(FileMetadata fileMetadata, String downloadFormat, String selectedFileIds) {
this.guestbookResponse = guestbookResponseService.initGuestbookResponse(fileMetadata, downloadFormat, selectedFileIds, session);
}
public void compareVersionDifferences() {
RequestContext requestContext = RequestContext.getCurrentInstance();
if (this.selectedVersions.size() != 2) {
requestContext.execute("openCompareTwo();");
} else {
//order depends on order of selection - needs to be chronological order
if (this.selectedVersions.get(0).getId().intValue() > this.selectedVersions.get(1).getId().intValue()) {
updateVersionDifferences(this.selectedVersions.get(0), this.selectedVersions.get(1));
} else {
updateVersionDifferences(this.selectedVersions.get(1), this.selectedVersions.get(0));
}
}
}
public void updateVersionDifferences(DatasetVersion newVersion, DatasetVersion originalVersion) {
if (originalVersion == null) {
setDatasetVersionDifference(newVersion.getDefaultVersionDifference());
} else {
setDatasetVersionDifference(new DatasetVersionDifference(newVersion, originalVersion));
}
}
private List<DatasetVersion> resetVersionTabList() {
//if (true)return null;
List<DatasetVersion> retList = new ArrayList<>();
if (permissionService.on(dataset).has(Permission.ViewUnpublishedDataset)) {
for (DatasetVersion version : dataset.getVersions()) {
version.setContributorNames(datasetVersionService.getContributorsNames(version));
retList.add(version);
}
} else {
for (DatasetVersion version : dataset.getVersions()) {
if (version.isReleased() || version.isDeaccessioned()) {
version.setContributorNames(datasetVersionService.getContributorsNames(version));
retList.add(version);
}
}
}
return retList;
}
private boolean existReleasedVersion;
public boolean isExistReleasedVersion() {
return existReleasedVersion;
}
public void setExistReleasedVersion(boolean existReleasedVersion) {
this.existReleasedVersion = existReleasedVersion;
}
private boolean resetExistRealeaseVersion(){
for (DatasetVersion version : dataset.getVersions()) {
if (version.isReleased() || version.isArchived()) {
return true;
}
}
return false;
}
private List<DatasetVersion> resetReleasedVersionTabList() {
List<DatasetVersion> retList = new ArrayList<>();
for (DatasetVersion version : dataset.getVersions()) {
if (version.isReleased() || version.isArchived()) {
retList.add(version);
}
}
return retList;
}
public String getDatasetPublishCustomText(){
String datasetPublishCustomText = settingsWrapper.getValueForKey(SettingsServiceBean.Key.DatasetPublishPopupCustomText);
if( datasetPublishCustomText!= null && !datasetPublishCustomText.isEmpty()){
return datasetPublishCustomText;
}
return "";
}
public Boolean isDatasetPublishPopupCustomTextOnAllVersions(){
return settingsWrapper.isTrueForKey(SettingsServiceBean.Key.DatasetPublishPopupCustomTextOnAllVersions, false);
}
public String getVariableMetadataURL(Long fileid) {
String myHostURL = getDataverseSiteUrl();
String metaURL = myHostURL + "/api/meta/datafile/" + fileid;
return metaURL;
}
public String getTabularDataFileURL(Long fileid) {
String myHostURL = getDataverseSiteUrl();
String dataURL = myHostURL + "/api/access/datafile/" + fileid;
return dataURL;
}
public List< String[]> getExporters(){
List<String[]> retList = new ArrayList<>();
String myHostURL = getDataverseSiteUrl();
for (String [] provider : ExportService.getInstance(settingsService).getExportersLabels() ){
String formatName = provider[1];
String formatDisplayName = provider[0];
Exporter exporter = null;
try {
exporter = ExportService.getInstance(settingsService).getExporter(formatName);
} catch (ExportException ex) {
exporter = null;
}
if (exporter != null && exporter.isAvailableToUsers()) {
// Not all metadata exports should be presented to the web users!
// Some are only for harvesting clients.
String[] temp = new String[2];
temp[0] = formatDisplayName;
temp[1] = myHostURL + "/api/datasets/export?exporter=" + formatName + "&persistentId=" + dataset.getGlobalIdString();
retList.add(temp);
}
}
return retList;
}
private FileMetadata fileMetadataSelected = null;
public void setFileMetadataSelected(FileMetadata fm){
setFileMetadataSelected(fm, null);
}
public void setFileMetadataSelected(FileMetadata fm, String guestbook) {
if (guestbook != null) {
if (guestbook.equals("create")) {
//
/*
FIX ME guestbook entry for subsetting
*/
// guestbookResponseService.createSilentGuestbookEntry(fm, "Subset");
} else {
initGuestbookResponse(fm, "Subset", null);
}
}
fileMetadataSelected = fm;
logger.fine("set the file for the advanced options popup (" + fileMetadataSelected.getLabel() + ")");
}
public FileMetadata getFileMetadataSelected() {
if (fileMetadataSelected != null) {
logger.fine("returning file metadata for the advanced options popup (" + fileMetadataSelected.getLabel() + ")");
} else {
logger.fine("file metadata for the advanced options popup is null.");
}
return fileMetadataSelected;
}
public void clearFileMetadataSelected() {
fileMetadataSelected = null;
}
public boolean isDesignatedDatasetThumbnail (FileMetadata fileMetadata) {
if (fileMetadata != null) {
if (fileMetadata.getDataFile() != null) {
if (fileMetadata.getDataFile().getId() != null) {
if (fileMetadata.getDataFile().getOwner() != null) {
if (fileMetadata.getDataFile().equals(fileMetadata.getDataFile().getOwner().getThumbnailFile())) {
return true;
}
}
}
}
}
return false;
}
/*
* Items for the "Designated this image as the Dataset thumbnail:
*/
private FileMetadata fileMetadataSelectedForThumbnailPopup = null;
public void setFileMetadataSelectedForThumbnailPopup(FileMetadata fm){
fileMetadataSelectedForThumbnailPopup = fm;
alreadyDesignatedAsDatasetThumbnail = getUseAsDatasetThumbnail();
}
public FileMetadata getFileMetadataSelectedForThumbnailPopup() {
return fileMetadataSelectedForThumbnailPopup;
}
public void clearFileMetadataSelectedForThumbnailPopup() {
fileMetadataSelectedForThumbnailPopup = null;
}
private boolean alreadyDesignatedAsDatasetThumbnail = false;
public boolean getUseAsDatasetThumbnail() {
if (fileMetadataSelectedForThumbnailPopup != null) {
if (fileMetadataSelectedForThumbnailPopup.getDataFile() != null) {
if (fileMetadataSelectedForThumbnailPopup.getDataFile().getId() != null) {
if (fileMetadataSelectedForThumbnailPopup.getDataFile().getOwner() != null) {
if (fileMetadataSelectedForThumbnailPopup.getDataFile().equals(fileMetadataSelectedForThumbnailPopup.getDataFile().getOwner().getThumbnailFile())) {
return true;
}
}
}
}
}
return false;
}
public void setUseAsDatasetThumbnail(boolean useAsThumbnail) {
if (fileMetadataSelectedForThumbnailPopup != null) {
if (fileMetadataSelectedForThumbnailPopup.getDataFile() != null) {
if (fileMetadataSelectedForThumbnailPopup.getDataFile().getId() != null) { // ?
if (fileMetadataSelectedForThumbnailPopup.getDataFile().getOwner() != null) {
if (useAsThumbnail) {
fileMetadataSelectedForThumbnailPopup.getDataFile().getOwner().setThumbnailFile(fileMetadataSelectedForThumbnailPopup.getDataFile());
} else if (getUseAsDatasetThumbnail()) {
fileMetadataSelectedForThumbnailPopup.getDataFile().getOwner().setThumbnailFile(null);
}
}
}
}
}
}
public void saveAsDesignatedThumbnail() {
// We don't need to do anything specific to save this setting, because
// the setUseAsDatasetThumbnail() method, above, has already updated the
// file object appropriately.
// However, once the "save" button is pressed, we want to show a success message, if this is
// a new image has been designated as such:
if (getUseAsDatasetThumbnail() && !alreadyDesignatedAsDatasetThumbnail) {
String successMessage = JH.localize("file.assignedDataverseImage.success");
logger.fine(successMessage);
successMessage = successMessage.replace("{0}", fileMetadataSelectedForThumbnailPopup.getLabel());
JsfHelper.addFlashMessage(successMessage);
}
// And reset the selected fileMetadata:
fileMetadataSelectedForThumbnailPopup = null;
}
/*
* Items for the "Tags (Categories)" popup.
*
*/
private FileMetadata fileMetadataSelectedForTagsPopup = null;
public void setFileMetadataSelectedForTagsPopup(){
}
public void setFileMetadataSelectedForTagsPopup(FileMetadata fm){
fileMetadataSelectedForTagsPopup = fm;
}
public FileMetadata getFileMetadataSelectedForTagsPopup() {
return fileMetadataSelectedForTagsPopup;
}
public void clearFileMetadataSelectedForTagsPopup() {
fileMetadataSelectedForTagsPopup = null;
}
public List <FileMetadata> getListFileMetadataSelectedForTagsPopup(){
List<FileMetadata> retList = new ArrayList<>();
for (FileMetadata fm : selectedFiles){
retList.add(fm);
}
return retList;
}
private List<String> categoriesByName;
public void setCategoriesByName(List<String> dummy){
categoriesByName = dummy;
}
public void refreshTagsPopUp(){
if (bulkUpdateCheckVersion()){
refreshSelectedFiles();
}
updateFileCounts();
refreshCategoriesByName();
refreshTabFileTagsByName();
}
private List<String> tabFileTagsByName;
public List<String> getTabFileTagsByName() {
return tabFileTagsByName;
}
public void setTabFileTagsByName(List<String> tabFileTagsByName) {
this.tabFileTagsByName = tabFileTagsByName;
}
private void refreshCategoriesByName(){
categoriesByName= new ArrayList<>();
for (String category: dataset.getCategoriesByName() ){
categoriesByName.add(category);
}
refreshSelectedTags();
}
public List<String> getCategoriesByName() {
return categoriesByName;
}
/*
* 1. Tabular File Tags:
*/
private List<String> tabFileTags = null;
public List<String> getTabFileTags() {
if (tabFileTags == null) {
tabFileTags = DataFileTag.listTags();
}
return tabFileTags;
}
public void setTabFileTags(List<String> tabFileTags) {
this.tabFileTags = tabFileTags;
}
private String[] selectedTabFileTags = {};
public String[] getSelectedTabFileTags() {
return selectedTabFileTags;
}
public void setSelectedTabFileTags(String[] selectedTabFileTags) {
this.selectedTabFileTags = selectedTabFileTags;
}
private String[] selectedTags = {};
public void handleSelection(final AjaxBehaviorEvent event) {
if (selectedTags != null) {
selectedTags = selectedTags.clone();
}
}
private void refreshTabFileTagsByName(){
tabFileTagsByName= new ArrayList<>();
for (FileMetadata fm : selectedFiles) {
if (fm.getDataFile().getTags() != null) {
for (int i = 0; i < fm.getDataFile().getTags().size(); i++) {
if (!tabFileTagsByName.contains(fm.getDataFile().getTags().get(i).getTypeLabel())) {
tabFileTagsByName.add(fm.getDataFile().getTags().get(i).getTypeLabel());
}
}
}
}
refreshSelectedTabFileTags();
}
private void refreshSelectedTabFileTags() {
selectedTabFileTags = null;
selectedTabFileTags = new String[0];
if (tabFileTagsByName.size() > 0) {
selectedTabFileTags = new String[tabFileTagsByName.size()];
for (int i = 0; i < tabFileTagsByName.size(); i++) {
selectedTabFileTags[i] = tabFileTagsByName.get(i);
}
}
Arrays.sort(selectedTabFileTags);
}
private boolean tabularDataSelected = false;
public boolean isTabularDataSelected() {
return tabularDataSelected;
}
public void setTabularDataSelected(boolean tabularDataSelected) {
this.tabularDataSelected = tabularDataSelected;
}
public String[] getSelectedTags() {
return selectedTags;
}
public void setSelectedTags(String[] selectedTags) {
this.selectedTags = selectedTags;
}
/*
* "File Tags" (aka "File Categories"):
*/
private String newCategoryName = null;
public String getNewCategoryName() {
return newCategoryName;
}
public void setNewCategoryName(String newCategoryName) {
this.newCategoryName = newCategoryName;
}
public String saveNewCategory() {
if (newCategoryName != null && !newCategoryName.isEmpty()) {
categoriesByName.add(newCategoryName);
}
//Now increase size of selectedTags and add new category
String[] temp = new String[selectedTags.length + 1];
System.arraycopy(selectedTags, 0, temp, 0, selectedTags.length);
selectedTags = temp;
selectedTags[selectedTags.length - 1] = newCategoryName;
//Blank out added category
newCategoryName = "";
return "";
}
private void refreshSelectedTags() {
selectedTags = null;
selectedTags = new String[0];
List<String> selectedCategoriesByName= new ArrayList<>();
for (FileMetadata fm : selectedFiles) {
if (fm.getCategories() != null) {
for (int i = 0; i < fm.getCategories().size(); i++) {
if (!selectedCategoriesByName.contains(fm.getCategories().get(i).getName())) {
selectedCategoriesByName.add(fm.getCategories().get(i).getName());
}
}
}
}
if (selectedCategoriesByName.size() > 0) {
selectedTags = new String[selectedCategoriesByName.size()];
for (int i = 0; i < selectedCategoriesByName.size(); i++) {
selectedTags[i] = selectedCategoriesByName.get(i);
}
}
Arrays.sort(selectedTags);
}
/* This method handles saving both "tabular file tags" and
* "file categories" (which are also considered "tags" in 4.0)
*/
public String saveFileTagsAndCategories() {
// 1. New Category name:
// With we don't need to do anything for the file categories that the user
// selected from the pull down list; that was done directly from the
// page with the FileMetadata.setCategoriesByName() method.
// So here we only need to take care of the new, custom category
// name, if entered:
if (bulkUpdateCheckVersion()) {
refreshSelectedFiles();
}
for (FileMetadata fmd : workingVersion.getFileMetadatas()) {
if (selectedFiles != null && selectedFiles.size() > 0) {
for (FileMetadata fm : selectedFiles) {
if (fm.getDataFile().equals(fmd.getDataFile())) {
fmd.setCategories(new ArrayList<>());
if (newCategoryName != null) {
fmd.addCategoryByName(newCategoryName);
}
// 2. Tabular DataFile Tags:
if (selectedTags != null) {
for (String selectedTag : selectedTags) {
fmd.addCategoryByName(selectedTag);
}
}
if (fmd.getDataFile().isTabularData()) {
fmd.getDataFile().setTags(null);
for (String selectedTabFileTag : selectedTabFileTags) {
DataFileTag tag = new DataFileTag();
try {
tag.setTypeByLabel(selectedTabFileTag);
tag.setDataFile(fmd.getDataFile());
fmd.getDataFile().addTag(tag);
}catch (IllegalArgumentException iax) {
// ignore
}
}
}
}
}
}
}
// success message:
String successMessage = JH.localize("file.assignedTabFileTags.success");
logger.fine(successMessage);
successMessage = successMessage.replace("{0}", "Selected Files");
JsfHelper.addFlashMessage(successMessage);
selectedTags = null;
logger.fine("New category name: " + newCategoryName);
newCategoryName = null;
if (removeUnusedTags){
removeUnusedFileTagsFromDataset();
}
save();
return returnToDraftVersion();
}
/*
Remove unused file tags
When updating datafile tags see if any custom tags are not in use.
Remove them
*/
private void removeUnusedFileTagsFromDataset() {
categoriesByName = new ArrayList<>();
for (FileMetadata fm : workingVersion.getFileMetadatas()) {
if (fm.getCategories() != null) {
for (int i = 0; i < fm.getCategories().size(); i++) {
if (!categoriesByName.contains(fm.getCategories().get(i).getName())) {
categoriesByName.add(fm.getCategories().get(i).getName());
}
}
}
}
List<DataFileCategory> datasetFileCategoriesToRemove = new ArrayList<>();
for (DataFileCategory test : dataset.getCategories()) {
boolean remove = true;
for (String catByName : categoriesByName) {
if (catByName.equals(test.getName())) {
remove = false;
break;
}
}
if (remove) {
datasetFileCategoriesToRemove.add(test);
}
}
if (!datasetFileCategoriesToRemove.isEmpty()) {
for (DataFileCategory remove : datasetFileCategoriesToRemove) {
dataset.getCategories().remove(remove);
}
}
}
/*
* Items for the "Advanced (Ingest) Options" popup.
*
*/
private FileMetadata fileMetadataSelectedForIngestOptionsPopup = null;
public void setFileMetadataSelectedForIngestOptionsPopup(FileMetadata fm){
fileMetadataSelectedForIngestOptionsPopup = fm;
}
public FileMetadata getFileMetadataSelectedForIngestOptionsPopup() {
return fileMetadataSelectedForIngestOptionsPopup;
}
public void clearFileMetadataSelectedForIngestOptionsPopup() {
fileMetadataSelectedForIngestOptionsPopup = null;
}
private String ingestLanguageEncoding = null;
public String getIngestLanguageEncoding() {
if (ingestLanguageEncoding == null) {
return "UTF8 (default)";
}
return ingestLanguageEncoding;
}
public void setIngestLanguageEncoding(String ingestLanguageEncoding) {
this.ingestLanguageEncoding = ingestLanguageEncoding;
}
public void setIngestEncoding(String ingestEncoding) {
ingestLanguageEncoding = ingestEncoding;
}
private String savedLabelsTempFile = null;
public void handleLabelsFileUpload(FileUploadEvent event) {
logger.fine("entering handleUpload method.");
UploadedFile file = event.getFile();
if (file != null) {
InputStream uploadStream = null;
try {
uploadStream = file.getInputstream();
} catch (IOException ioex) {
logger.log(Level.WARNING, ioex, ()->"the file "+file.getFileName()+" failed to upload!");
List<String> args = Arrays.asList(file.getFileName());
String msg = BundleUtil.getStringFromBundle("dataset.file.uploadFailure.detailmsg", args);
FacesMessage message = new FacesMessage(FacesMessage.SEVERITY_WARN, BundleUtil.getStringFromBundle("dataset.file.uploadFailure"), msg);
FacesContext.getCurrentInstance().addMessage(null, message);
return;
}
savedLabelsTempFile = saveTempFile(uploadStream);
logger.fine(()->file.getFileName() + " is successfully uploaded.");
List<String> args = Arrays.asList(file.getFileName());
FacesMessage message = new FacesMessage(BundleUtil.getStringFromBundle("dataset.file.upload", args));
FacesContext.getCurrentInstance().addMessage(null, message);
}
// process file (i.e., just save it in a temp location; for now):
}
private String saveTempFile(InputStream input) {
if (input == null) {
return null;
}
byte[] buffer = new byte[8192];
int bytesRead = 0;
File labelsFile = null;
FileOutputStream output = null;
try {
labelsFile = File.createTempFile("tempIngestLabels.", ".txt");
output = new FileOutputStream(labelsFile);
while ((bytesRead = input.read(buffer)) > -1) {
output.write(buffer, 0, bytesRead);
}
} catch (IOException ioex) {
if (input != null) {
try {
input.close();
} catch (IOException e) {
}
}
if (output != null) {
try {
output.close();
} catch (IOException e) {
}
}
return null;
}
if (labelsFile != null) {
return labelsFile.getAbsolutePath();
}
return null;
}
public void saveAdvancedOptions() {
// Language encoding for SPSS SAV (and, possibly, other tabular ingests:)
if (ingestLanguageEncoding != null) {
if (fileMetadataSelectedForIngestOptionsPopup != null && fileMetadataSelectedForIngestOptionsPopup.getDataFile() != null) {
if (fileMetadataSelectedForIngestOptionsPopup.getDataFile().getIngestRequest() == null) {
IngestRequest ingestRequest = new IngestRequest();
ingestRequest.setDataFile(fileMetadataSelectedForIngestOptionsPopup.getDataFile());
fileMetadataSelectedForIngestOptionsPopup.getDataFile().setIngestRequest(ingestRequest);
}
fileMetadataSelectedForIngestOptionsPopup.getDataFile().getIngestRequest().setTextEncoding(ingestLanguageEncoding);
}
}
ingestLanguageEncoding = null;
// Extra labels for SPSS POR (and, possibly, other tabular ingests:)
// (we are adding this parameter to the IngestRequest now, instead of back
// when it was uploaded. This is because we want the user to be able to
// hit cancel and bail out, until they actually click 'save' in the
// "advanced options" popup) -- L.A. 4.0 beta 11
if (savedLabelsTempFile != null) {
if (fileMetadataSelectedForIngestOptionsPopup != null && fileMetadataSelectedForIngestOptionsPopup.getDataFile() != null) {
if (fileMetadataSelectedForIngestOptionsPopup.getDataFile().getIngestRequest() == null) {
IngestRequest ingestRequest = new IngestRequest();
ingestRequest.setDataFile(fileMetadataSelectedForIngestOptionsPopup.getDataFile());
fileMetadataSelectedForIngestOptionsPopup.getDataFile().setIngestRequest(ingestRequest);
}
fileMetadataSelectedForIngestOptionsPopup.getDataFile().getIngestRequest().setLabelsFile(savedLabelsTempFile);
}
}
savedLabelsTempFile = null;
fileMetadataSelectedForIngestOptionsPopup = null;
}
private Boolean downloadButtonAvailable = null;
public boolean isDownloadButtonAvailable(){
if (downloadButtonAvailable != null) {
return downloadButtonAvailable;
}
for (FileMetadata fmd : workingVersion.getFileMetadatas()) {
if (this.fileDownloadHelper.canDownloadFile(fmd)) {
downloadButtonAvailable = true;
return true;
}
}
downloadButtonAvailable = false;
return false;
}
public boolean isFileAccessRequestMultiButtonRequired(){
if (!isSessionUserAuthenticated() || !dataset.isFileAccessRequest()){
return false;
}
if (workingVersion == null) {
return false;
}
if (!workingVersion.getTermsOfUseAndAccess().isFileAccessRequest()){
// return false;
}
for (FileMetadata fmd : workingVersion.getFileMetadatas()){
if (!this.fileDownloadHelper.canDownloadFile(fmd)){
return true;
}
}
return false;
}
public boolean isFileAccessRequestMultiButtonEnabled(){
if (!isSessionUserAuthenticated() || !dataset.isFileAccessRequest()){
return false;
}
if( this.selectedRestrictedFiles == null || this.selectedRestrictedFiles.isEmpty() ){
return false;
}
for (FileMetadata fmd : this.selectedRestrictedFiles){
if (!this.fileDownloadHelper.canDownloadFile(fmd)){
return true;
}
}
return false;
}
private Boolean downloadButtonAllEnabled = null;
public boolean isDownloadAllButtonEnabled() {
if (downloadButtonAllEnabled == null) {
for (FileMetadata fmd : workingVersion.getFileMetadatas()) {
if (!this.fileDownloadHelper.canDownloadFile(fmd)) {
downloadButtonAllEnabled = false;
break;
}
}
downloadButtonAllEnabled = true;
}
return downloadButtonAllEnabled;
}
public boolean isDownloadSelectedButtonEnabled(){
if( this.selectedFiles == null || this.selectedFiles.isEmpty() ){
return false;
}
for (FileMetadata fmd : this.selectedFiles){
if (this.fileDownloadHelper.canDownloadFile(fmd)){
return true;
}
}
return false;
}
public boolean isFileAccessRequestMultiSignUpButtonRequired(){
if (isSessionUserAuthenticated()){
return false;
}
// only show button if dataset allows an access request
if (!dataset.isFileAccessRequest()){
return false;
}
for (FileMetadata fmd : workingVersion.getFileMetadatas()){
if (!this.fileDownloadHelper.canDownloadFile(fmd)){
return true;
}
}
return false;
}
public boolean isFileAccessRequestMultiSignUpButtonEnabled(){
if (isSessionUserAuthenticated()){
return false;
}
if( this.selectedRestrictedFiles == null || this.selectedRestrictedFiles.isEmpty() ){
return false;
}
// only show button if dataset allows an access request
if (!dataset.isFileAccessRequest()){
return false;
}
for (FileMetadata fmd : this.selectedRestrictedFiles){
if (!this.fileDownloadHelper.canDownloadFile(fmd)){
return true;
}
}
return false;
}
public boolean isDownloadPopupRequired() {
return FileUtil.isDownloadPopupRequired(workingVersion);
}
public boolean isRequestAccessPopupRequired() {
return FileUtil.isRequestAccessPopupRequired(workingVersion);
}
public String requestAccessMultipleFiles() {
if (selectedFiles.isEmpty()) {
RequestContext requestContext = RequestContext.getCurrentInstance();
requestContext.execute("PF('selectFilesForRequestAccess').show()");
return "";
} else {
fileDownloadHelper.clearRequestAccessFiles();
for (FileMetadata fmd : selectedFiles){
fileDownloadHelper.addMultipleFilesForRequestAccess(fmd.getDataFile());
}
if (isRequestAccessPopupRequired()) {
RequestContext requestContext = RequestContext.getCurrentInstance();
requestContext.execute("PF('requestAccessPopup').show()");
return "";
} else {
//No popup required
fileDownloadHelper.requestAccessIndirect();
return "";
}
}
}
public boolean isSortButtonEnabled() {
/**
* @todo The "Sort" Button seems to stop responding to mouse clicks
* after a while so it can't be shipped in 4.2 and will be deferred, to
* be picked up in https://github.com/IQSS/dataverse/issues/2506
*/
return false;
}
public void updateFileListing(String fileSortField, String fileSortOrder) {
this.fileSortField = fileSortField;
this.fileSortOrder = fileSortOrder;
fileMetadatas = populateFileMetadatas();
}
private List<FileMetadata> populateFileMetadatas() {
if (isSortButtonEnabled()) {
List<FileMetadata> fileMetadatasToSet = new ArrayList<>();
Long datasetVersion = workingVersion.getId();
if (datasetVersion != null) {
int unlimited = 0;
int maxResults = unlimited;
List<FileMetadata> dataFilesNew = datafileService.findFileMetadataByDatasetVersionId(datasetVersion, maxResults, fileSortField, fileSortOrder);
fileMetadatasToSet.addAll(dataFilesNew);
}
return fileMetadatasToSet;
} else {
return new ArrayList<>();
}
}
public String getFileSortField() {
return fileSortField;
}
public void setFileSortField(String fileSortField) {
this.fileSortField = fileSortField;
}
public String getFileSortOrder() {
return fileSortOrder;
}
public void setFileSortOrder(String fileSortOrder) {
this.fileSortOrder = fileSortOrder;
}
public List<FileMetadata> getFileMetadatas() {
if (isSortButtonEnabled()) {
return fileMetadatas;
} else {
return new ArrayList<>();
}
}
public String getFileSortFieldName() {
return FileSortFieldAndOrder.label;
}
public String getFileSortFieldDate() {
return FileSortFieldAndOrder.createDate;
}
public String getFileSortFieldSize() {
return FileSortFieldAndOrder.size;
}
public String getFileSortFieldType() {
return FileSortFieldAndOrder.type;
}
public String getSortByAscending() {
return SortBy.ASCENDING;
}
public String getSortByDescending() {
return SortBy.DESCENDING;
}
PrivateUrl privateUrl;
public PrivateUrl getPrivateUrl() {
return privateUrl;
}
public void setPrivateUrl(PrivateUrl privateUrl) {
this.privateUrl = privateUrl;
}
public void initPrivateUrlPopUp() {
if (privateUrl != null) {
setPrivateUrlJustCreatedToFalse();
}
}
boolean privateUrlWasJustCreated;
public boolean isPrivateUrlWasJustCreated() {
return privateUrlWasJustCreated;
}
public void setPrivateUrlJustCreatedToFalse() {
privateUrlWasJustCreated = false;
}
public void createPrivateUrl() {
try {
PrivateUrl createdPrivateUrl = commandEngine.submit(new CreatePrivateUrlCommand(dvRequestService.getDataverseRequest(), dataset));
privateUrl = createdPrivateUrl;
JH.addMessage(FacesMessage.SEVERITY_INFO, BundleUtil.getStringFromBundle("dataset.privateurl.infoMessageAuthor", Arrays.asList(getPrivateUrlLink(privateUrl))));
privateUrlWasJustCreated = true;
} catch (CommandException ex) {
String msg = BundleUtil.getStringFromBundle("dataset.privateurl.noPermToCreate", PrivateUrlUtil.getRequiredPermissions(ex));
logger.info("Unable to create a Private URL for dataset id " + dataset.getId() + ". Message to user: " + msg + " Exception: " + ex);
JH.addErrorMessage(msg);
}
}
public void disablePrivateUrl() {
try {
commandEngine.submit(new DeletePrivateUrlCommand(dvRequestService.getDataverseRequest(), dataset));
privateUrl = null;
JH.addSuccessMessage(BundleUtil.getStringFromBundle("dataset.privateurl.disabledSuccess"));
} catch (CommandException ex) {
logger.info("CommandException caught calling DeletePrivateUrlCommand: " + ex);
}
}
public boolean isUserCanCreatePrivateURL() {
return dataset.getLatestVersion().isDraft();
}
public String getPrivateUrlLink(PrivateUrl privateUrl) {
return privateUrl.getLink();
}
public FileDownloadHelper getFileDownloadHelper() {
return fileDownloadHelper;
}
public void setFileDownloadHelper(FileDownloadHelper fileDownloadHelper) {
this.fileDownloadHelper = fileDownloadHelper;
}
public FileDownloadServiceBean getFileDownloadService() {
return fileDownloadService;
}
public void setFileDownloadService(FileDownloadServiceBean fileDownloadService) {
this.fileDownloadService = fileDownloadService;
}
public GuestbookResponseServiceBean getGuestbookResponseService() {
return guestbookResponseService;
}
public void setGuestbookResponseService(GuestbookResponseServiceBean guestbookResponseService) {
this.guestbookResponseService = guestbookResponseService;
}
public WorldMapPermissionHelper getWorldMapPermissionHelper() {
return worldMapPermissionHelper;
}
public void setWorldMapPermissionHelper(WorldMapPermissionHelper worldMapPermissionHelper) {
this.worldMapPermissionHelper = worldMapPermissionHelper;
}
/**
* dataset title
* @return title of workingVersion
*/
public String getTitle() {
assert (null != workingVersion);
return workingVersion.getTitle();
}
/**
* dataset description
*
* @return description of workingVersion
*/
public String getDescription() {
return workingVersion.getDescriptionPlainText();
}
/**
* dataset authors
*
* @return list of author names
*/
public List<String> getDatasetAuthors() {
assert (workingVersion != null);
return workingVersion.getDatasetAuthorNames();
}
/**
* publisher (aka - name of root dataverse)
*
* @return the publisher of the version
*/
public String getPublisher() {
assert (null != workingVersion);
return workingVersion.getRootDataverseNameforCitation();
}
public void downloadRsyncScript() {
String bibFormatDowload = new BibtexCitation(workingVersion).toString();
FacesContext ctx = FacesContext.getCurrentInstance();
HttpServletResponse response = (HttpServletResponse) ctx.getExternalContext().getResponse();
response.setContentType("application/download");
String contentDispositionString;
contentDispositionString = "attachment;filename=" + rsyncScriptFilename;
response.setHeader("Content-Disposition", contentDispositionString);
try {
ServletOutputStream out = response.getOutputStream();
out.write(getRsyncScript().getBytes());
out.flush();
ctx.responseComplete();
} catch (IOException e) {
String error = "Problem getting bytes from rsync script: " + e;
logger.warning(error);
return;
}
// If the script has been successfully downloaded, lock the dataset:
String lockInfoMessage = "script downloaded";
DatasetLock lock = datasetService.addDatasetLock(dataset.getId(), DatasetLock.Reason.DcmUpload, session.getUser() != null ? ((AuthenticatedUser)session.getUser()).getId() : null, lockInfoMessage);
if (lock != null) {
dataset.addLock(lock);
} else {
logger.log(Level.WARNING, "Failed to lock the dataset (dataset id={0})", dataset.getId());
}
}
public void closeRsyncScriptPopup(CloseEvent event) {
finishRsyncScriptAction();
}
public String finishRsyncScriptAction() {
// This method is called when the user clicks on "Close" in the "Rsync Upload"
// popup. If they have successfully downloaded the rsync script, the
// dataset should now be locked; which means we should put up the
// "dcm upload in progress" message - that will be shown on the page
// until the rsync upload is completed and the dataset is unlocked.
if (isLocked()) {
JH.addMessage(FacesMessage.SEVERITY_WARN, BundleUtil.getStringFromBundle("file.rsyncUpload.inProgressMessage.summary"), BundleUtil.getStringFromBundle("file.rsyncUpload.inProgressMessage.details"));
}
return "";
}
/**
* this method returns the dataset fields to be shown in the dataset summary
* on the dataset page.
* It returns the default summary fields( subject, description, keywords, related publications and notes)
* if the custom summary datafields has not been set, otherwise will set the custom fields set by the sysadmins
*
* @return the dataset fields to be shown in the dataset summary
*/
public List<DatasetField> getDatasetSummaryFields() {
customFields = settingsWrapper.getValueForKey(SettingsServiceBean.Key.CustomDatasetSummaryFields);
return DatasetUtil.getDatasetSummaryFields(workingVersion, customFields);
}
public List<ExternalTool> getConfigureToolsForDataFile(Long fileId) {
return getCachedToolsForDataFile(fileId, ExternalTool.Type.CONFIGURE);
}
public List<ExternalTool> getExploreToolsForDataFile(Long fileId) {
return getCachedToolsForDataFile(fileId, ExternalTool.Type.EXPLORE);
}
public List<ExternalTool> getCachedToolsForDataFile(Long fileId, ExternalTool.Type type) {
Map<Long, List<ExternalTool>> cachedToolsByFileId = new HashMap<>();
List<ExternalTool> externalTools = new ArrayList<>();
switch (type) {
case EXPLORE:
cachedToolsByFileId = exploreToolsByFileId;
externalTools = exploreTools;
break;
case CONFIGURE:
cachedToolsByFileId = configureToolsByFileId;
externalTools = configureTools;
break;
default:
break;
}
List<ExternalTool> cachedTools = cachedToolsByFileId.get(fileId);
if (cachedTools != null) { //if already queried before and added to list
return cachedTools;
}
DataFile dataFile = datafileService.find(fileId);
cachedTools = ExternalToolServiceBean.findExternalToolsByFile(externalTools, dataFile);
cachedToolsByFileId.put(fileId, cachedTools); //add to map so we don't have to do the lifting again
return cachedTools;
}
Boolean thisLatestReleasedVersion = null;
public boolean isThisLatestReleasedVersion() {
if (thisLatestReleasedVersion != null) {
return thisLatestReleasedVersion;
}
if (!workingVersion.isPublished()) {
thisLatestReleasedVersion = false;
return false;
}
DatasetVersion latestPublishedVersion = null;
Command<DatasetVersion> cmd = new GetLatestPublishedDatasetVersionCommand(dvRequestService.getDataverseRequest(), dataset);
try {
latestPublishedVersion = commandEngine.submit(cmd);
} catch (Exception ex) {
// whatever...
}
thisLatestReleasedVersion = workingVersion.equals(latestPublishedVersion);
return thisLatestReleasedVersion;
}
public String getJsonLd() {
if (isThisLatestReleasedVersion()) {
ExportService instance = ExportService.getInstance(settingsService);
String jsonLd = instance.getExportAsString(dataset, SchemaDotOrgExporter.NAME);
if (jsonLd != null) {
logger.fine("Returning cached schema.org JSON-LD.");
return jsonLd;
} else {
logger.fine("No cached schema.org JSON-LD available. Going to the database.");
return workingVersion.getJsonLd();
}
}
return "";
}
public void selectAllFiles() {
logger.fine("selectAllFiles called");
selectedFiles = workingVersion.getFileMetadatas();
}
public void clearSelection() {
logger.info("clearSelection called");
selectedFiles = Collections.EMPTY_LIST;
}
public void fileListingPaginatorListener(PageEvent event) {
setFilePaginatorPage(event.getPage());
}
public void refreshPaginator() {
FacesContext facesContext = FacesContext.getCurrentInstance();
org.primefaces.component.datatable.DataTable dt = (org.primefaces.component.datatable.DataTable) facesContext.getViewRoot().findComponent("datasetForm:tabView:filesTable");
setFilePaginatorPage(dt.getPage());
setRowsPerPage(dt.getRowsToRender());
}
}
| 1 | 38,419 | shouldn't every `Boolean` in this pr be `boolean`? | IQSS-dataverse | java |
@@ -619,6 +619,14 @@ class WebDriver(object):
else:
return self.execute(Command.GET_WINDOW_HANDLES)['value']
+ def minimize_window(self):
+ """
+ Miniimizes the current window that webdriver is using
+ """
+ if self.w3c:
+ command = Command.W3C_MINIMIZE_WINDOW
+ self.execute(command, {"windowHandle": "current"})
+
def maximize_window(self):
"""
Maximizes the current window that webdriver is using | 1 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The WebDriver implementation."""
import base64
import copy
import warnings
from contextlib import contextmanager
from .command import Command
from .webelement import WebElement
from .remote_connection import RemoteConnection
from .errorhandler import ErrorHandler
from .switch_to import SwitchTo
from .mobile import Mobile
from .file_detector import FileDetector, LocalFileDetector
from selenium.common.exceptions import (InvalidArgumentException,
WebDriverException)
from selenium.webdriver.common.by import By
from selenium.webdriver.common.html5.application_cache import ApplicationCache
try:
str = basestring
except NameError:
pass
_W3C_CAPABILITY_NAMES = frozenset([
'acceptInsecureCerts',
'browserName',
'browserVersion',
'platformName',
'pageLoadStrategy',
'proxy',
'setWindowRect',
'timeouts',
'unhandledPromptBehavior',
])
def _make_w3c_caps(caps):
"""Makes a W3C alwaysMatch capabilities object.
Filters out capability names that are not in the W3C spec. Spec-compliant
drivers will reject requests containing unknown capability names.
Moves the Firefox profile, if present, from the old location to the new Firefox
options object.
:Args:
- caps - A dictionary of capabilities requested by the caller.
"""
profile = caps.get('firefox_profile')
always_match = {}
for k, v in caps.iteritems():
if k in _W3C_CAPABILITY_NAMES or ':' in k:
always_match[k] = v
if profile:
moz_opts = always_match.get('moz:firefoxOptions', {})
# If it's already present, assume the caller did that intentionally.
if 'profile' not in moz_opts:
# Don't mutate the original capabilities.
new_opts = copy.deepcopy(moz_opts)
new_opts['profile'] = profile
always_match['moz:firefoxOptions'] = new_opts
return {"firstMatch": [{}], "alwaysMatch": always_match}
class WebDriver(object):
"""
Controls a browser by sending commands to a remote server.
This server is expected to be running the WebDriver wire protocol
as defined at
https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol
:Attributes:
- session_id - String ID of the browser session started and controlled by this WebDriver.
- capabilities - Dictionaty of effective capabilities of this browser session as returned
by the remote server. See https://github.com/SeleniumHQ/selenium/wiki/DesiredCapabilities
- command_executor - remote_connection.RemoteConnection object used to execute commands.
- error_handler - errorhandler.ErrorHandler object used to handle errors.
"""
_web_element_cls = WebElement
def __init__(self, command_executor='http://127.0.0.1:4444/wd/hub',
desired_capabilities=None, browser_profile=None, proxy=None,
keep_alive=False, file_detector=None):
"""
Create a new driver that will issue commands using the wire protocol.
:Args:
- command_executor - Either a string representing URL of the remote server or a custom
remote_connection.RemoteConnection object. Defaults to 'http://127.0.0.1:4444/wd/hub'.
- desired_capabilities - A dictionary of capabilities to request when
starting the browser session. Required parameter.
- browser_profile - A selenium.webdriver.firefox.firefox_profile.FirefoxProfile object.
Only used if Firefox is requested. Optional.
- proxy - A selenium.webdriver.common.proxy.Proxy object. The browser session will
be started with given proxy settings, if possible. Optional.
- keep_alive - Whether to configure remote_connection.RemoteConnection to use
HTTP keep-alive. Defaults to False.
- file_detector - Pass custom file detector object during instantiation. If None,
then default LocalFileDetector() will be used.
"""
if desired_capabilities is None:
raise WebDriverException("Desired Capabilities can't be None")
if not isinstance(desired_capabilities, dict):
raise WebDriverException("Desired Capabilities must be a dictionary")
if proxy is not None:
warnings.warn("Please use FirefoxOptions to set proxy",
DeprecationWarning)
proxy.add_to_capabilities(desired_capabilities)
self.command_executor = command_executor
if type(self.command_executor) is bytes or isinstance(self.command_executor, str):
self.command_executor = RemoteConnection(command_executor, keep_alive=keep_alive)
self._is_remote = True
self.session_id = None
self.capabilities = {}
self.error_handler = ErrorHandler()
self.start_client()
if browser_profile is not None:
warnings.warn("Please use FirefoxOptions to set browser profile",
DeprecationWarning)
self.start_session(desired_capabilities, browser_profile)
self._switch_to = SwitchTo(self)
self._mobile = Mobile(self)
self.file_detector = file_detector or LocalFileDetector()
def __repr__(self):
return '<{0.__module__}.{0.__name__} (session="{1}")>'.format(
type(self), self.session_id)
@contextmanager
def file_detector_context(self, file_detector_class, *args, **kwargs):
"""
Overrides the current file detector (if necessary) in limited context.
Ensures the original file detector is set afterwards.
Example:
with webdriver.file_detector_context(UselessFileDetector):
someinput.send_keys('/etc/hosts')
:Args:
- file_detector_class - Class of the desired file detector. If the class is different
from the current file_detector, then the class is instantiated with args and kwargs
and used as a file detector during the duration of the context manager.
- args - Optional arguments that get passed to the file detector class during
instantiation.
- kwargs - Keyword arguments, passed the same way as args.
"""
last_detector = None
if not isinstance(self.file_detector, file_detector_class):
last_detector = self.file_detector
self.file_detector = file_detector_class(*args, **kwargs)
try:
yield
finally:
if last_detector is not None:
self.file_detector = last_detector
@property
def mobile(self):
return self._mobile
@property
def name(self):
"""Returns the name of the underlying browser for this instance.
:Usage:
- driver.name
"""
if 'browserName' in self.capabilities:
return self.capabilities['browserName']
else:
raise KeyError('browserName not specified in session capabilities')
def start_client(self):
"""
Called before starting a new session. This method may be overridden
to define custom startup behavior.
"""
pass
def stop_client(self):
"""
Called after executing a quit command. This method may be overridden
to define custom shutdown behavior.
"""
pass
def start_session(self, capabilities, browser_profile=None):
"""
Creates a new session with the desired capabilities.
:Args:
- browser_name - The name of the browser to request.
- version - Which browser version to request.
- platform - Which platform to request the browser on.
- javascript_enabled - Whether the new session should support JavaScript.
- browser_profile - A selenium.webdriver.firefox.firefox_profile.FirefoxProfile object. Only used if Firefox is requested.
"""
if not isinstance(capabilities, dict):
raise InvalidArgumentException("Capabilities must be a dictionary")
if browser_profile:
if "moz:firefoxOptions" in capabilities:
capabilities["moz:firefoxOptions"]["profile"] = browser_profile.encoded
else:
capabilities.update({'firefox_profile': browser_profile.encoded})
w3c_caps = _make_w3c_caps(capabilities)
parameters = {"capabilities": w3c_caps,
"desiredCapabilities": capabilities}
response = self.execute(Command.NEW_SESSION, parameters)
if 'sessionId' not in response:
response = response['value']
self.session_id = response['sessionId']
self.capabilities = response.get('value')
# if capabilities is none we are probably speaking to
# a W3C endpoint
if self.capabilities is None:
self.capabilities = response.get('capabilities')
# Double check to see if we have a W3C Compliant browser
self.w3c = response.get('status') is None
def _wrap_value(self, value):
if isinstance(value, dict):
converted = {}
for key, val in value.items():
converted[key] = self._wrap_value(val)
return converted
elif isinstance(value, self._web_element_cls):
return {'ELEMENT': value.id, 'element-6066-11e4-a52e-4f735466cecf': value.id}
elif isinstance(value, list):
return list(self._wrap_value(item) for item in value)
else:
return value
def create_web_element(self, element_id):
"""Creates a web element with the specified `element_id`."""
return self._web_element_cls(self, element_id, w3c=self.w3c)
def _unwrap_value(self, value):
if isinstance(value, dict):
if 'ELEMENT' in value or 'element-6066-11e4-a52e-4f735466cecf' in value:
wrapped_id = value.get('ELEMENT', None)
if wrapped_id:
return self.create_web_element(value['ELEMENT'])
else:
return self.create_web_element(value['element-6066-11e4-a52e-4f735466cecf'])
else:
for key, val in value.items():
value[key] = self._unwrap_value(val)
return value
elif isinstance(value, list):
return list(self._unwrap_value(item) for item in value)
else:
return value
def execute(self, driver_command, params=None):
"""
Sends a command to be executed by a command.CommandExecutor.
:Args:
- driver_command: The name of the command to execute as a string.
- params: A dictionary of named parameters to send with the command.
:Returns:
The command's JSON response loaded into a dictionary object.
"""
if self.session_id is not None:
if not params:
params = {'sessionId': self.session_id}
elif 'sessionId' not in params:
params['sessionId'] = self.session_id
params = self._wrap_value(params)
response = self.command_executor.execute(driver_command, params)
if response:
self.error_handler.check_response(response)
response['value'] = self._unwrap_value(
response.get('value', None))
return response
# If the server doesn't send a response, assume the command was
# a success
return {'success': 0, 'value': None, 'sessionId': self.session_id}
def get(self, url):
"""
Loads a web page in the current browser session.
"""
self.execute(Command.GET, {'url': url})
@property
def title(self):
"""Returns the title of the current page.
:Usage:
driver.title
"""
resp = self.execute(Command.GET_TITLE)
return resp['value'] if resp['value'] is not None else ""
def find_element_by_id(self, id_):
"""Finds an element by id.
:Args:
- id\_ - The id of the element to be found.
:Usage:
driver.find_element_by_id('foo')
"""
return self.find_element(by=By.ID, value=id_)
def find_elements_by_id(self, id_):
"""
Finds multiple elements by id.
:Args:
- id\_ - The id of the elements to be found.
:Usage:
driver.find_elements_by_id('foo')
"""
return self.find_elements(by=By.ID, value=id_)
def find_element_by_xpath(self, xpath):
"""
Finds an element by xpath.
:Args:
- xpath - The xpath locator of the element to find.
:Usage:
driver.find_element_by_xpath('//div/td[1]')
"""
return self.find_element(by=By.XPATH, value=xpath)
def find_elements_by_xpath(self, xpath):
"""
Finds multiple elements by xpath.
:Args:
- xpath - The xpath locator of the elements to be found.
:Usage:
driver.find_elements_by_xpath("//div[contains(@class, 'foo')]")
"""
return self.find_elements(by=By.XPATH, value=xpath)
def find_element_by_link_text(self, link_text):
"""
Finds an element by link text.
:Args:
- link_text: The text of the element to be found.
:Usage:
driver.find_element_by_link_text('Sign In')
"""
return self.find_element(by=By.LINK_TEXT, value=link_text)
def find_elements_by_link_text(self, text):
"""
Finds elements by link text.
:Args:
- link_text: The text of the elements to be found.
:Usage:
driver.find_elements_by_link_text('Sign In')
"""
return self.find_elements(by=By.LINK_TEXT, value=text)
def find_element_by_partial_link_text(self, link_text):
"""
Finds an element by a partial match of its link text.
:Args:
- link_text: The text of the element to partially match on.
:Usage:
driver.find_element_by_partial_link_text('Sign')
"""
return self.find_element(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_elements_by_partial_link_text(self, link_text):
"""
Finds elements by a partial match of their link text.
:Args:
- link_text: The text of the element to partial match on.
:Usage:
driver.find_element_by_partial_link_text('Sign')
"""
return self.find_elements(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_element_by_name(self, name):
"""
Finds an element by name.
:Args:
- name: The name of the element to find.
:Usage:
driver.find_element_by_name('foo')
"""
return self.find_element(by=By.NAME, value=name)
def find_elements_by_name(self, name):
"""
Finds elements by name.
:Args:
- name: The name of the elements to find.
:Usage:
driver.find_elements_by_name('foo')
"""
return self.find_elements(by=By.NAME, value=name)
def find_element_by_tag_name(self, name):
"""
Finds an element by tag name.
:Args:
- name: The tag name of the element to find.
:Usage:
driver.find_element_by_tag_name('foo')
"""
return self.find_element(by=By.TAG_NAME, value=name)
def find_elements_by_tag_name(self, name):
"""
Finds elements by tag name.
:Args:
- name: The tag name the use when finding elements.
:Usage:
driver.find_elements_by_tag_name('foo')
"""
return self.find_elements(by=By.TAG_NAME, value=name)
def find_element_by_class_name(self, name):
"""
Finds an element by class name.
:Args:
- name: The class name of the element to find.
:Usage:
driver.find_element_by_class_name('foo')
"""
return self.find_element(by=By.CLASS_NAME, value=name)
def find_elements_by_class_name(self, name):
"""
Finds elements by class name.
:Args:
- name: The class name of the elements to find.
:Usage:
driver.find_elements_by_class_name('foo')
"""
return self.find_elements(by=By.CLASS_NAME, value=name)
def find_element_by_css_selector(self, css_selector):
"""
Finds an element by css selector.
:Args:
- css_selector: The css selector to use when finding elements.
:Usage:
driver.find_element_by_css_selector('#foo')
"""
return self.find_element(by=By.CSS_SELECTOR, value=css_selector)
def find_elements_by_css_selector(self, css_selector):
"""
Finds elements by css selector.
:Args:
- css_selector: The css selector to use when finding elements.
:Usage:
driver.find_elements_by_css_selector('.foo')
"""
return self.find_elements(by=By.CSS_SELECTOR, value=css_selector)
def execute_script(self, script, *args):
"""
Synchronously Executes JavaScript in the current window/frame.
:Args:
- script: The JavaScript to execute.
- \*args: Any applicable arguments for your JavaScript.
:Usage:
driver.execute_script('document.title')
"""
converted_args = list(args)
command = None
if self.w3c:
command = Command.W3C_EXECUTE_SCRIPT
else:
command = Command.EXECUTE_SCRIPT
return self.execute(command, {
'script': script,
'args': converted_args})['value']
def execute_async_script(self, script, *args):
"""
Asynchronously Executes JavaScript in the current window/frame.
:Args:
- script: The JavaScript to execute.
- \*args: Any applicable arguments for your JavaScript.
:Usage:
driver.execute_async_script('document.title')
"""
converted_args = list(args)
if self.w3c:
command = Command.W3C_EXECUTE_SCRIPT_ASYNC
else:
command = Command.EXECUTE_ASYNC_SCRIPT
return self.execute(command, {
'script': script,
'args': converted_args})['value']
@property
def current_url(self):
"""
Gets the URL of the current page.
:Usage:
driver.current_url
"""
return self.execute(Command.GET_CURRENT_URL)['value']
@property
def page_source(self):
"""
Gets the source of the current page.
:Usage:
driver.page_source
"""
return self.execute(Command.GET_PAGE_SOURCE)['value']
def close(self):
"""
Closes the current window.
:Usage:
driver.close()
"""
self.execute(Command.CLOSE)
def quit(self):
"""
Quits the driver and closes every associated window.
:Usage:
driver.quit()
"""
try:
self.execute(Command.QUIT)
finally:
self.stop_client()
@property
def current_window_handle(self):
"""
Returns the handle of the current window.
:Usage:
driver.current_window_handle
"""
if self.w3c:
return self.execute(Command.W3C_GET_CURRENT_WINDOW_HANDLE)['value']
else:
return self.execute(Command.GET_CURRENT_WINDOW_HANDLE)['value']
@property
def window_handles(self):
"""
Returns the handles of all windows within the current session.
:Usage:
driver.window_handles
"""
if self.w3c:
return self.execute(Command.W3C_GET_WINDOW_HANDLES)['value']
else:
return self.execute(Command.GET_WINDOW_HANDLES)['value']
def maximize_window(self):
"""
Maximizes the current window that webdriver is using
"""
command = Command.MAXIMIZE_WINDOW
if self.w3c:
command = Command.W3C_MAXIMIZE_WINDOW
self.execute(command, {"windowHandle": "current"})
@property
def switch_to(self):
return self._switch_to
# Target Locators
def switch_to_active_element(self):
""" Deprecated use driver.switch_to.active_element
"""
warnings.warn("use driver.switch_to.active_element instead", DeprecationWarning)
return self._switch_to.active_element
def switch_to_window(self, window_name):
""" Deprecated use driver.switch_to.window
"""
warnings.warn("use driver.switch_to.window instead", DeprecationWarning)
self._switch_to.window(window_name)
def switch_to_frame(self, frame_reference):
""" Deprecated use driver.switch_to.frame
"""
warnings.warn("use driver.switch_to.frame instead", DeprecationWarning)
self._switch_to.frame(frame_reference)
def switch_to_default_content(self):
""" Deprecated use driver.switch_to.default_content
"""
warnings.warn("use driver.switch_to.default_content instead", DeprecationWarning)
self._switch_to.default_content()
def switch_to_alert(self):
""" Deprecated use driver.switch_to.alert
"""
warnings.warn("use driver.switch_to.alert instead", DeprecationWarning)
return self._switch_to.alert
# Navigation
def back(self):
"""
Goes one step backward in the browser history.
:Usage:
driver.back()
"""
self.execute(Command.GO_BACK)
def forward(self):
"""
Goes one step forward in the browser history.
:Usage:
driver.forward()
"""
self.execute(Command.GO_FORWARD)
def refresh(self):
"""
Refreshes the current page.
:Usage:
driver.refresh()
"""
self.execute(Command.REFRESH)
# Options
def get_cookies(self):
"""
Returns a set of dictionaries, corresponding to cookies visible in the current session.
:Usage:
driver.get_cookies()
"""
return self.execute(Command.GET_ALL_COOKIES)['value']
def get_cookie(self, name):
"""
Get a single cookie by name. Returns the cookie if found, None if not.
:Usage:
driver.get_cookie('my_cookie')
"""
cookies = self.get_cookies()
for cookie in cookies:
if cookie['name'] == name:
return cookie
return None
def delete_cookie(self, name):
"""
Deletes a single cookie with the given name.
:Usage:
driver.delete_cookie('my_cookie')
"""
self.execute(Command.DELETE_COOKIE, {'name': name})
def delete_all_cookies(self):
"""
Delete all cookies in the scope of the session.
:Usage:
driver.delete_all_cookies()
"""
self.execute(Command.DELETE_ALL_COOKIES)
def add_cookie(self, cookie_dict):
"""
Adds a cookie to your current session.
:Args:
- cookie_dict: A dictionary object, with required keys - "name" and "value";
optional keys - "path", "domain", "secure", "expiry"
Usage:
driver.add_cookie({'name' : 'foo', 'value' : 'bar'})
driver.add_cookie({'name' : 'foo', 'value' : 'bar', 'path' : '/'})
driver.add_cookie({'name' : 'foo', 'value' : 'bar', 'path' : '/', 'secure':True})
"""
self.execute(Command.ADD_COOKIE, {'cookie': cookie_dict})
# Timeouts
def implicitly_wait(self, time_to_wait):
"""
Sets a sticky timeout to implicitly wait for an element to be found,
or a command to complete. This method only needs to be called one
time per session. To set the timeout for calls to
execute_async_script, see set_script_timeout.
:Args:
- time_to_wait: Amount of time to wait (in seconds)
:Usage:
driver.implicitly_wait(30)
"""
if self.w3c:
self.execute(Command.SET_TIMEOUTS, {
'implicit': int(float(time_to_wait) * 1000)})
else:
self.execute(Command.IMPLICIT_WAIT, {
'ms': float(time_to_wait) * 1000})
def set_script_timeout(self, time_to_wait):
"""
Set the amount of time that the script should wait during an
execute_async_script call before throwing an error.
:Args:
- time_to_wait: The amount of time to wait (in seconds)
:Usage:
driver.set_script_timeout(30)
"""
if self.w3c:
self.execute(Command.SET_TIMEOUTS, {
'script': int(float(time_to_wait) * 1000)})
else:
self.execute(Command.SET_SCRIPT_TIMEOUT, {
'ms': float(time_to_wait) * 1000})
def set_page_load_timeout(self, time_to_wait):
"""
Set the amount of time to wait for a page load to complete
before throwing an error.
:Args:
- time_to_wait: The amount of time to wait
:Usage:
driver.set_page_load_timeout(30)
"""
try:
self.execute(Command.SET_TIMEOUTS, {
'pageLoad': int(float(time_to_wait) * 1000)})
except WebDriverException:
self.execute(Command.SET_TIMEOUTS, {
'ms': float(time_to_wait) * 1000,
'type': 'page load'})
def find_element(self, by=By.ID, value=None):
"""
'Private' method used by the find_element_by_* methods.
:Usage:
Use the corresponding find_element_by_* instead of this.
:rtype: WebElement
"""
if self.w3c:
if by == By.ID:
by = By.CSS_SELECTOR
value = '[id="%s"]' % value
elif by == By.TAG_NAME:
by = By.CSS_SELECTOR
elif by == By.CLASS_NAME:
by = By.CSS_SELECTOR
value = ".%s" % value
elif by == By.NAME:
by = By.CSS_SELECTOR
value = '[name="%s"]' % value
return self.execute(Command.FIND_ELEMENT, {
'using': by,
'value': value})['value']
def find_elements(self, by=By.ID, value=None):
"""
'Private' method used by the find_elements_by_* methods.
:Usage:
Use the corresponding find_elements_by_* instead of this.
:rtype: list of WebElement
"""
if self.w3c:
if by == By.ID:
by = By.CSS_SELECTOR
value = '[id="%s"]' % value
elif by == By.TAG_NAME:
by = By.CSS_SELECTOR
elif by == By.CLASS_NAME:
by = By.CSS_SELECTOR
value = ".%s" % value
elif by == By.NAME:
by = By.CSS_SELECTOR
value = '[name="%s"]' % value
return self.execute(Command.FIND_ELEMENTS, {
'using': by,
'value': value})['value']
@property
def desired_capabilities(self):
"""
returns the drivers current desired capabilities being used
"""
return self.capabilities
def get_screenshot_as_file(self, filename):
"""
Saves a screenshot of the current window to a PNG image file. Returns
False if there is any IOError, else returns True. Use full paths in
your filename.
:Args:
- filename: The full path you wish to save your screenshot to. This
should end with a `.png` extension.
:Usage:
driver.get_screenshot_as_file('/Screenshots/foo.png')
"""
if not filename.lower().endswith('.png'):
warnings.warn("name used for saved screenshot does not match file "
"type. It should end with a `.png` extension", UserWarning)
png = self.get_screenshot_as_png()
try:
with open(filename, 'wb') as f:
f.write(png)
except IOError:
return False
finally:
del png
return True
def save_screenshot(self, filename):
"""
Saves a screenshot of the current window to a PNG image file. Returns
False if there is any IOError, else returns True. Use full paths in
your filename.
:Args:
- filename: The full path you wish to save your screenshot to. This
should end with a `.png` extension.
:Usage:
driver.save_screenshot('/Screenshots/foo.png')
"""
return self.get_screenshot_as_file(filename)
def get_screenshot_as_png(self):
"""
Gets the screenshot of the current window as a binary data.
:Usage:
driver.get_screenshot_as_png()
"""
return base64.b64decode(self.get_screenshot_as_base64().encode('ascii'))
def get_screenshot_as_base64(self):
"""
Gets the screenshot of the current window as a base64 encoded string
which is useful in embedded images in HTML.
:Usage:
driver.get_screenshot_as_base64()
"""
return self.execute(Command.SCREENSHOT)['value']
def set_window_size(self, width, height, windowHandle='current'):
"""
Sets the width and height of the current window. (window.resizeTo)
:Args:
- width: the width in pixels to set the window to
- height: the height in pixels to set the window to
:Usage:
driver.set_window_size(800,600)
"""
command = Command.SET_WINDOW_SIZE
if self.w3c:
command = Command.W3C_SET_WINDOW_SIZE
self.execute(command, {
'width': int(width),
'height': int(height),
'windowHandle': windowHandle})
def get_window_size(self, windowHandle='current'):
"""
Gets the width and height of the current window.
:Usage:
driver.get_window_size()
"""
command = Command.GET_WINDOW_SIZE
if self.w3c:
command = Command.W3C_GET_WINDOW_SIZE
size = self.execute(command, {'windowHandle': windowHandle})
if size.get('value', None) is not None:
return size['value']
else:
return size
def set_window_position(self, x, y, windowHandle='current'):
"""
Sets the x,y position of the current window. (window.moveTo)
:Args:
- x: the x-coordinate in pixels to set the window position
- y: the y-coordinate in pixels to set the window position
:Usage:
driver.set_window_position(0,0)
"""
if self.w3c:
return self.execute(Command.W3C_SET_WINDOW_POSITION, {
'x': int(x),
'y': int(y)
})
else:
self.execute(Command.SET_WINDOW_POSITION,
{
'x': int(x),
'y': int(y),
'windowHandle': windowHandle
})
def get_window_position(self, windowHandle='current'):
"""
Gets the x,y position of the current window.
:Usage:
driver.get_window_position()
"""
if self.w3c:
return self.execute(Command.W3C_GET_WINDOW_POSITION)['value']
else:
return self.execute(Command.GET_WINDOW_POSITION, {
'windowHandle': windowHandle})['value']
def get_window_rect(self):
"""
Gets the x, y coordinates of the window as well as height and width of
the current window.
:Usage:
driver.get_window_rect()
"""
return self.execute(Command.GET_WINDOW_RECT)['value']
def set_window_rect(self, x=None, y=None, width=None, height=None):
"""
Sets the x, y coordinates of the window as well as height and width of
the current window.
:Usage:
driver.set_window_rect(x=10, y=10)
driver.set_window_rect(width=100, height=200)
driver.set_window_rect(x=10, y=10, width=100, height=200)
"""
if (x is None and y is None) and (height is None and width is None):
raise InvalidArgumentException("x and y or height and width need values")
return self.execute(Command.SET_WINDOW_RECT, {"x": x, "y": y,
"width": width,
"height": height})['value']
@property
def file_detector(self):
return self._file_detector
@file_detector.setter
def file_detector(self, detector):
"""
Set the file detector to be used when sending keyboard input.
By default, this is set to a file detector that does nothing.
see FileDetector
see LocalFileDetector
see UselessFileDetector
:Args:
- detector: The detector to use. Must not be None.
"""
if detector is None:
raise WebDriverException("You may not set a file detector that is null")
if not isinstance(detector, FileDetector):
raise WebDriverException("Detector has to be instance of FileDetector")
self._file_detector = detector
@property
def orientation(self):
"""
Gets the current orientation of the device
:Usage:
orientation = driver.orientation
"""
return self.execute(Command.GET_SCREEN_ORIENTATION)['value']
@orientation.setter
def orientation(self, value):
"""
Sets the current orientation of the device
:Args:
- value: orientation to set it to.
:Usage:
driver.orientation = 'landscape'
"""
allowed_values = ['LANDSCAPE', 'PORTRAIT']
if value.upper() in allowed_values:
self.execute(Command.SET_SCREEN_ORIENTATION, {'orientation': value})
else:
raise WebDriverException("You can only set the orientation to 'LANDSCAPE' and 'PORTRAIT'")
@property
def application_cache(self):
""" Returns a ApplicationCache Object to interact with the browser app cache"""
return ApplicationCache(self)
@property
def log_types(self):
"""
Gets a list of the available log types
:Usage:
driver.log_types
"""
return self.execute(Command.GET_AVAILABLE_LOG_TYPES)['value']
def get_log(self, log_type):
"""
Gets the log for a given log type
:Args:
- log_type: type of log that which will be returned
:Usage:
driver.get_log('browser')
driver.get_log('driver')
driver.get_log('client')
driver.get_log('server')
"""
return self.execute(Command.GET_LOG, {'type': log_type})['value']
| 1 | 14,687 | This `if` is not necessary | SeleniumHQ-selenium | rb |
@@ -45,5 +45,7 @@ namespace Datadog.Trace.ClrProfiler
public const string GenericParameterTask = "System.Threading.Tasks.Task`1<T>";
public const string ObjectTask = "System.Threading.Tasks.Task`1<System.Object>";
public const string Int32Task = "System.Threading.Tasks.Task`1<System.Int32>";
+
+ public const string HttpConfigurationAction = "System.Action`1<System.Web.Http.HttpConfiguration>";
}
} | 1 | // <copyright file="ClrNames.cs" company="Datadog">
// Unless explicitly stated otherwise all files in this repository are licensed under the Apache 2 License.
// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2017 Datadog, Inc.
// </copyright>
namespace Datadog.Trace.ClrProfiler
{
internal static class ClrNames
{
public const string Ignore = "_";
public const string Void = "System.Void";
public const string Object = "System.Object";
public const string Bool = "System.Boolean";
public const string String = "System.String";
public const string SByte = "System.SByte";
public const string Byte = "System.Byte";
public const string Int16 = "System.Int16";
public const string Int32 = "System.Int32";
public const string Int64 = "System.Int64";
public const string UInt16 = "System.UInt16";
public const string UInt32 = "System.UInt32";
public const string UInt64 = "System.UInt64";
public const string TimeSpan = "System.TimeSpan";
public const string Stream = "System.IO.Stream";
public const string Task = "System.Threading.Tasks.Task";
public const string CancellationToken = "System.Threading.CancellationToken";
// ReSharper disable once InconsistentNaming
public const string IAsyncResult = "System.IAsyncResult";
public const string AsyncCallback = "System.AsyncCallback";
public const string HttpRequestMessage = "System.Net.Http.HttpRequestMessage";
public const string HttpResponseMessage = "System.Net.Http.HttpResponseMessage";
public const string HttpResponseMessageTask = "System.Threading.Tasks.Task`1<System.Net.Http.HttpResponseMessage>";
public const string GenericTask = "System.Threading.Tasks.Task`1";
public const string IgnoreGenericTask = "System.Threading.Tasks.Task`1<_>";
public const string GenericParameterTask = "System.Threading.Tasks.Task`1<T>";
public const string ObjectTask = "System.Threading.Tasks.Task`1<System.Object>";
public const string Int32Task = "System.Threading.Tasks.Task`1<System.Int32>";
}
}
| 1 | 22,016 | Are you using this anywhere? (I couldn't find it.) | DataDog-dd-trace-dotnet | .cs |
@@ -250,7 +250,14 @@ func (s *Step) populate(ctx context.Context) dErr {
return err
}
+func (s *Step) recordStepTime(startTime time.Time) {
+ endTime := time.Now()
+ s.w.recordStepTime(s.name, startTime, endTime)
+}
+
func (s *Step) run(ctx context.Context) dErr {
+ startTime := time.Now()
+ defer s.recordStepTime(startTime)
impl, err := s.stepImpl()
if err != nil {
return s.wrapRunError(err) | 1 | // Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package daisy
import (
"context"
"reflect"
"strings"
"time"
)
type stepImpl interface {
// populate modifies the step type field values.
// populate should set defaults, extend GCE partial URLs to full partial
// URLs (partial URLs including the "projects/<project>" prefix), etc.
// This should not perform value validation.
// Returns any parsing errors.
populate(ctx context.Context, s *Step) dErr
validate(ctx context.Context, s *Step) dErr
run(ctx context.Context, s *Step) dErr
}
// Step is a single daisy workflow step.
type Step struct {
name string
w *Workflow
// Time to wait for this step to complete (default 10m).
// Must be parsable by https://golang.org/pkg/time/#ParseDuration.
Timeout string `json:",omitempty"`
timeout time.Duration
// Only one of the below fields should exist for each instance of Step.
AttachDisks *AttachDisks `json:",omitempty"`
DetachDisks *DetachDisks `json:",omitempty"`
CreateDisks *CreateDisks `json:",omitempty"`
CreateForwardingRules *CreateForwardingRules `json:",omitempty"`
CreateFirewallRules *CreateFirewallRules `json:",omitempty"`
CreateImages *CreateImages `json:",omitempty"`
CreateInstances *CreateInstances `json:",omitempty"`
CreateNetworks *CreateNetworks `json:",omitempty"`
CreateSubnetworks *CreateSubnetworks `json:",omitempty"`
CreateTargetInstances *CreateTargetInstances `json:",omitempty"`
CopyGCSObjects *CopyGCSObjects `json:",omitempty"`
ResizeDisks *ResizeDisks `json:",omitempty"`
StartInstances *StartInstances `json:",omitempty"`
StopInstances *StopInstances `json:",omitempty"`
DeleteResources *DeleteResources `json:",omitempty"`
DeprecateImages *DeprecateImages `json:",omitempty"`
IncludeWorkflow *IncludeWorkflow `json:",omitempty"`
SubWorkflow *SubWorkflow `json:",omitempty"`
WaitForInstancesSignal *WaitForInstancesSignal `json:",omitempty"`
// Used for unit tests.
testType stepImpl
}
// NewStep creates a Step with given name and timeout with the specified workflow
func NewStep(name string, w *Workflow, timeout time.Duration) *Step {
return &Step{name: name, w: w, timeout: timeout}
}
func (s *Step) stepImpl() (stepImpl, dErr) {
var result stepImpl
matchCount := 0
if s.AttachDisks != nil {
matchCount++
result = s.AttachDisks
}
if s.DetachDisks != nil {
matchCount++
result = s.DetachDisks
}
if s.CreateDisks != nil {
matchCount++
result = s.CreateDisks
}
if s.CreateForwardingRules != nil {
matchCount++
result = s.CreateForwardingRules
}
if s.CreateFirewallRules != nil {
matchCount++
result = s.CreateFirewallRules
}
if s.CreateImages != nil {
matchCount++
result = s.CreateImages
}
if s.CreateInstances != nil {
matchCount++
result = s.CreateInstances
}
if s.CreateNetworks != nil {
matchCount++
result = s.CreateNetworks
}
if s.CreateSubnetworks != nil {
matchCount++
result = s.CreateSubnetworks
}
if s.CreateTargetInstances != nil {
matchCount++
result = s.CreateTargetInstances
}
if s.CopyGCSObjects != nil {
matchCount++
result = s.CopyGCSObjects
}
if s.ResizeDisks != nil {
matchCount++
result = s.ResizeDisks
}
if s.StartInstances != nil {
matchCount++
result = s.StartInstances
}
if s.StopInstances != nil {
matchCount++
result = s.StopInstances
}
if s.DeleteResources != nil {
matchCount++
result = s.DeleteResources
}
if s.DeprecateImages != nil {
matchCount++
result = s.DeprecateImages
}
if s.IncludeWorkflow != nil {
matchCount++
result = s.IncludeWorkflow
}
if s.SubWorkflow != nil {
matchCount++
result = s.SubWorkflow
}
if s.WaitForInstancesSignal != nil {
matchCount++
result = s.WaitForInstancesSignal
}
if s.testType != nil {
matchCount++
result = s.testType
}
if matchCount == 0 {
return nil, errf("no step type defined")
}
if matchCount > 1 {
return nil, errf("multiple step types defined")
}
return result, nil
}
func (s *Step) depends(other *Step) bool {
if s == nil || other == nil || s.w == nil || s.w != other.w {
return false
}
deps := s.w.Dependencies
steps := s.w.Steps
q := deps[s.name]
seen := map[string]bool{}
// Do a BFS search on s's dependencies, looking for the target dependency. Don't revisit visited dependencies.
for i := 0; i < len(q); i++ {
name := q[i]
if seen[name] {
continue
}
seen[name] = true
if steps[name] == other {
return true
}
for _, dep := range deps[name] {
q = append(q, dep)
}
}
return false
}
// nestedDepends determines if s depends on other, taking into account the recursive, nested nature of
// workflows, i.e. workflows in IncludeWorkflow and SubWorkflow.
// Example: if s depends on an IncludeWorkflow whose workflow contains other, then s depends on other.
func (s *Step) nestedDepends(other *Step) bool {
sChain := s.getChain()
oChain := other.getChain()
// If sChain and oChain don't share the same root workflow, then there is no dependency relationship.
if len(sChain) == 0 || len(oChain) == 0 || sChain[0].w != oChain[0].w {
return false
}
// Find where the step chains diverge.
// A divergence in the chains indicates sibling steps, where we can check dependency.
// We want to see if s's branch depends on other's branch.
var sStep, oStep *Step
for i := 0; i < minInt(len(sChain), len(oChain)); i++ {
sStep = sChain[i]
oStep = oChain[i]
if sStep != oStep {
break
}
}
return sStep.depends(oStep)
}
// getChain returns the step chain getting to a step. A link in the chain represents an IncludeWorkflow step, a
// SubWorkflow step, or the step itself.
// For example, workflow A has a step s1 which includes workflow B. B has a step s2 which subworkflows C. Finally,
// C has a step s3. s3.getChain() will return []*Step{s1, s2, s3}
func (s *Step) getChain() []*Step {
if s == nil || s.w == nil {
return nil
}
if s.w.parent == nil {
return []*Step{s}
}
for _, st := range s.w.parent.Steps {
if st.IncludeWorkflow != nil && st.IncludeWorkflow.Workflow == s.w {
return append(st.getChain(), s)
}
if st.SubWorkflow != nil && st.SubWorkflow.Workflow == s.w {
return append(st.getChain(), s)
}
}
// We shouldn't get here.
return nil
}
func (s *Step) populate(ctx context.Context) dErr {
s.w.LogWorkflowInfo("Populating step %q", s.name)
impl, err := s.stepImpl()
if err != nil {
return s.wrapPopulateError(err)
}
if err = impl.populate(ctx, s); err != nil {
err = s.wrapPopulateError(err)
}
return err
}
func (s *Step) run(ctx context.Context) dErr {
impl, err := s.stepImpl()
if err != nil {
return s.wrapRunError(err)
}
var st string
if t := reflect.TypeOf(impl); t.Kind() == reflect.Ptr {
st = t.Elem().Name()
} else {
st = t.Name()
}
s.w.LogWorkflowInfo("Running step %q (%s)", s.name, st)
if err = impl.run(ctx, s); err != nil {
return s.wrapRunError(err)
}
select {
case <-s.w.Cancel:
default:
s.w.LogWorkflowInfo("Step %q (%s) successfully finished.", s.name, st)
}
return nil
}
func (s *Step) validate(ctx context.Context) dErr {
s.w.LogWorkflowInfo("Validating step %q", s.name)
if !rfc1035Rgx.MatchString(strings.ToLower(s.name)) {
return s.wrapValidateError(errf("step name must start with a letter and only contain letters, numbers, and hyphens"))
}
impl, err := s.stepImpl()
if err != nil {
return s.wrapValidateError(err)
}
if err = impl.validate(ctx, s); err != nil {
return s.wrapValidateError(err)
}
return nil
}
func (s *Step) wrapPopulateError(e dErr) dErr {
return errf("step %q populate error: %s", s.name, e)
}
func (s *Step) wrapRunError(e dErr) dErr {
return errf("step %q run error: %s", s.name, e)
}
func (s *Step) wrapValidateError(e dErr) dErr {
return errf("step %q validation error: %s", s.name, e)
}
| 1 | 8,974 | Can be inlined | GoogleCloudPlatform-compute-image-tools | go |
@@ -3,6 +3,6 @@ require 'spec_helper'
describe Api::V1::CompletionsController, '#show' do
it 'returns a 401 when users are not authenticated' do
get :index
- response.code.should eq '401'
+ expect(response.code).to eq '401'
end
end | 1 | require 'spec_helper'
describe Api::V1::CompletionsController, '#show' do
it 'returns a 401 when users are not authenticated' do
get :index
response.code.should eq '401'
end
end
| 1 | 9,666 | Prefer double-quoted strings unless you need single quotes to avoid extra backslashes for escaping. | thoughtbot-upcase | rb |
@@ -1,3 +1,4 @@
+import json
import warnings
import colander | 1 | import warnings
import colander
from kinto.core.schema import (Any, HeaderField, QueryField, HeaderQuotedInteger,
FieldList, TimeStamp, URL)
from kinto.core.errors import ErrorSchema
from kinto.core.utils import native_value
POSTGRESQL_MAX_INTEGER_VALUE = 2**63
positive_big_integer = colander.Range(min=0, max=POSTGRESQL_MAX_INTEGER_VALUE)
class TimeStamp(TimeStamp):
"""This schema is deprecated, you shoud use `kinto.core.schema.TimeStamp` instead."""
def __init__(self, *args, **kwargs):
message = ('`kinto.core.resource.schema.TimeStamp` is deprecated, '
'use `kinto.core.schema.TimeStamp` instead.')
warnings.warn(message, DeprecationWarning)
super().__init__(*args, **kwargs)
class URL(URL):
"""This schema is deprecated, you shoud use `kinto.core.schema.URL` instead."""
def __init__(self, *args, **kwargs):
message = ('`kinto.core.resource.schema.URL` is deprecated, '
'use `kinto.core.schema.URL` instead.')
warnings.warn(message, DeprecationWarning)
super().__init__(*args, **kwargs)
# Resource related schemas
class ResourceSchema(colander.MappingSchema):
"""Base resource schema, with *Cliquet* specific built-in options."""
class Options:
"""
Resource schema options.
This is meant to be overriden for changing values:
.. code-block:: python
class Product(ResourceSchema):
reference = colander.SchemaNode(colander.String())
class Options:
readonly_fields = ('reference',)
"""
readonly_fields = tuple()
"""Fields that cannot be updated. Values for fields will have to be
provided either during record creation, through default values using
``missing`` attribute or implementing a custom logic in
:meth:`kinto.core.resource.UserResource.process_record`.
"""
preserve_unknown = True
"""Define if unknown fields should be preserved or not.
The resource is schema-less by default. In other words, any field name
will be accepted on records. Set this to ``False`` in order to limit
the accepted fields to the ones defined in the schema.
"""
@classmethod
def get_option(cls, attr):
default_value = getattr(ResourceSchema.Options, attr)
return getattr(cls.Options, attr, default_value)
@classmethod
def is_readonly(cls, field):
"""Return True if specified field name is read-only.
:param str field: the field name in the schema
:returns: ``True`` if the specified field is read-only,
``False`` otherwise.
:rtype: bool
"""
return field in cls.get_option('readonly_fields')
def schema_type(self):
if self.get_option('preserve_unknown') is True:
unknown = 'preserve'
else:
unknown = 'ignore'
return colander.Mapping(unknown=unknown)
class PermissionsSchema(colander.SchemaNode):
"""A permission mapping defines ACEs.
It has permission names as keys and principals as values.
::
{
"write": ["fxa:af3e077eb9f5444a949ad65aa86e82ff"],
"groups:create": ["fxa:70a9335eecfe440fa445ba752a750f3d"]
}
"""
def __init__(self, *args, **kwargs):
self.known_perms = kwargs.pop('permissions', tuple())
super().__init__(*args, **kwargs)
for perm in self.known_perms:
self[perm] = self._get_node_principals(perm)
def schema_type(self):
if self.known_perms:
return colander.Mapping(unknown='raise')
else:
return colander.Mapping(unknown='preserve')
def deserialize(self, cstruct=colander.null):
# If permissions are not a mapping (e.g null or invalid), try deserializing
if not isinstance(cstruct, dict):
return super().deserialize(cstruct)
# If using application/merge-patch+json we need to allow null values as they
# represent removing a key.
cstruct, removed_keys = self._preprocess_null_perms(cstruct)
# If permissions are listed, check fields and produce fancy error messages
if self.known_perms:
for perm in cstruct:
colander.OneOf(choices=self.known_perms)(self, perm)
permissions = super().deserialize(cstruct)
# Else deserialize the fields that are not on the schema
else:
permissions = {}
perm_schema = colander.SequenceSchema(colander.SchemaNode(colander.String()))
for perm, principals in cstruct.items():
permissions[perm] = perm_schema.deserialize(principals)
return self._postprocess_null_perms(permissions, removed_keys)
def _get_node_principals(self, perm):
principal = colander.SchemaNode(colander.String())
return colander.SchemaNode(colander.Sequence(), principal, name=perm,
missing=colander.drop)
@staticmethod
def _preprocess_null_perms(cstruct):
keys = {k for k, v in cstruct.items() if v is None}
cleaned = {k: v for k, v in cstruct.items() if v is not None}
return cleaned, keys
@staticmethod
def _postprocess_null_perms(validated, keys):
validated.update({k: None for k in keys})
return validated
# Header schemas
class HeaderSchema(colander.MappingSchema):
"""Base schema used for validating and deserializing request headers. """
missing = colander.drop
if_match = HeaderQuotedInteger(name='If-Match')
if_none_match = HeaderQuotedInteger(name='If-None-Match')
@staticmethod
def schema_type():
return colander.Mapping(unknown='preserve')
class PatchHeaderSchema(HeaderSchema):
"""Header schema used with PATCH requests."""
def response_behavior_validator():
return colander.OneOf(['full', 'light', 'diff'])
response_behaviour = HeaderField(colander.String(), name='Response-Behavior',
validator=response_behavior_validator())
# Querystring schemas
class QuerySchema(colander.MappingSchema):
"""
Schema used for validating and deserializing querystrings. It will include
and try to guess the type of unknown fields (field filters) on deserialization.
"""
missing = colander.drop
@staticmethod
def schema_type():
return colander.Mapping(unknown='ignore')
def deserialize(self, cstruct=colander.null):
"""
Deserialize and validate the QuerySchema fields and try to deserialize and
get the native value of additional filds (field filters) that may be present
on the cstruct.
e.g:: ?exclude_id=a,b&deleted=true -> {'exclude_id': ['a', 'b'], deleted: True}
"""
values = {}
schema_values = super().deserialize(cstruct)
if schema_values is colander.drop:
return schema_values
# Deserialize querystring field filters (see docstring e.g)
for k, v in cstruct.items():
# Deserialize lists used on in_ and exclude_ filters
if k.startswith('in_') or k.startswith('exclude_'):
as_list = FieldList().deserialize(v)
values[k] = [native_value(v) for v in as_list]
else:
values[k] = native_value(v)
values.update(schema_values)
return values
class CollectionQuerySchema(QuerySchema):
"""Querystring schema used with collections."""
_limit = QueryField(colander.Integer(), validator=positive_big_integer)
_sort = FieldList()
_token = QueryField(colander.String())
_since = QueryField(colander.Integer(), validator=positive_big_integer)
_to = QueryField(colander.Integer(), validator=positive_big_integer)
_before = QueryField(colander.Integer(), validator=positive_big_integer)
id = QueryField(colander.String())
last_modified = QueryField(colander.Integer(), validator=positive_big_integer)
class RecordGetQuerySchema(QuerySchema):
"""Querystring schema for GET record requests."""
_fields = FieldList()
class CollectionGetQuerySchema(CollectionQuerySchema):
"""Querystring schema for GET collection requests."""
_fields = FieldList()
# Body Schemas
class RecordSchema(colander.MappingSchema):
@colander.deferred
def data(node, kwargs):
data = kwargs.get('data')
if data:
# Check if empty record is allowed.
# (e.g every schema fields have defaults)
try:
data.deserialize({})
except colander.Invalid:
pass
else:
data.default = {}
data.missing = colander.drop
return data
@colander.deferred
def permissions(node, kwargs):
def get_perms(node, kwargs):
return kwargs.get('permissions')
# Set if node is provided, else keep deferred. This allows binding the body
# on Resource first and bind permissions later if using SharableResource.
return get_perms(node, kwargs) or colander.deferred(get_perms)
@staticmethod
def schema_type():
return colander.Mapping(unknown='raise')
class JsonPatchOperationSchema(colander.MappingSchema):
"""Single JSON Patch Operation."""
def op_validator():
op_values = ['test', 'add', 'remove', 'replace', 'move', 'copy']
return colander.OneOf(op_values)
def path_validator():
return colander.Regex('(/\w*)+')
op = colander.SchemaNode(colander.String(), validator=op_validator())
path = colander.SchemaNode(colander.String(), validator=path_validator())
from_ = colander.SchemaNode(colander.String(), name='from',
validator=path_validator(), missing=colander.drop)
value = colander.SchemaNode(Any(), missing=colander.drop)
@staticmethod
def schema_type():
return colander.Mapping(unknown='raise')
class JsonPatchBodySchema(colander.SequenceSchema):
"""Body used with JSON Patch (application/json-patch+json) as in RFC 6902."""
operations = JsonPatchOperationSchema(missing=colander.drop)
# Request schemas
class RequestSchema(colander.MappingSchema):
"""Base schema for kinto requests."""
@colander.deferred
def header(node, kwargs):
return kwargs.get('header')
@colander.deferred
def querystring(node, kwargs):
return kwargs.get('querystring')
def after_bind(self, node, kw):
# Set default bindings
if not self.get('header'):
self['header'] = HeaderSchema()
if not self.get('querystring'):
self['querystring'] = QuerySchema()
class PayloadRequestSchema(RequestSchema):
"""Base schema for methods that use a JSON request body."""
@colander.deferred
def body(node, kwargs):
def get_body(node, kwargs):
return kwargs.get('body')
# Set if node is provided, else keep deferred (and allow bindind later)
return get_body(node, kwargs) or colander.deferred(get_body)
class JsonPatchRequestSchema(RequestSchema):
"""JSON Patch (application/json-patch+json) request schema."""
body = JsonPatchBodySchema()
querystring = QuerySchema()
header = PatchHeaderSchema()
# Response schemas
class ResponseHeaderSchema(colander.MappingSchema):
"""Kinto API custom response headers."""
etag = HeaderQuotedInteger(name='Etag')
last_modified = colander.SchemaNode(colander.String(), name='Last-Modified')
class ErrorResponseSchema(colander.MappingSchema):
"""Response schema used on 4xx and 5xx errors."""
body = ErrorSchema()
class NotModifiedResponseSchema(colander.MappingSchema):
"""Response schema used on 304 Not Modified responses."""
header = ResponseHeaderSchema()
class RecordResponseSchema(colander.MappingSchema):
"""Response schema used with sigle resource endpoints."""
header = ResponseHeaderSchema()
@colander.deferred
def body(node, kwargs):
return kwargs.get('record')
class CollectionResponseSchema(colander.MappingSchema):
"""Response schema used with plural endpoints."""
header = ResponseHeaderSchema()
@colander.deferred
def body(node, kwargs):
resource = kwargs.get('record')['data']
collection = colander.MappingSchema()
collection['data'] = colander.SequenceSchema(resource, missing=[])
return collection
class ResourceReponses:
"""Class that wraps and handles Resource responses."""
default_schemas = {
'400': ErrorResponseSchema(description='The request is invalid.'),
'406': ErrorResponseSchema(
description="The client doesn't accept supported responses Content-Type."),
'412': ErrorResponseSchema(
description='Record was changed or deleted since value in `If-Match` header.'),
'default': ErrorResponseSchema(description='Unexpected error.'),
}
default_record_schemas = {
'200': RecordResponseSchema(description='Return the target object.')
}
default_collection_schemas = {
'200': CollectionResponseSchema(description='Return a list of matching objects.')
}
default_get_schemas = {
'304': NotModifiedResponseSchema(
description='Reponse has not changed since value in If-None-Match header')
}
default_post_schemas = {
'200': RecordResponseSchema(description='Return an existing object.'),
'201': RecordResponseSchema(description='Return a created object.'),
'415': ErrorResponseSchema(
description='The client request was not sent with a correct Content-Type.')
}
default_put_schemas = {
'201': RecordResponseSchema(description='Return created object.'),
'415': ErrorResponseSchema(
description='The client request was not sent with a correct Content-Type.')
}
default_patch_schemas = {
'415': ErrorResponseSchema(
description='The client request was not sent with a correct Content-Type.')
}
default_delete_schemas = {
}
record_get_schemas = {
'404': ErrorResponseSchema(description='The object does not exist or was deleted.'),
}
record_patch_schemas = {
'404': ErrorResponseSchema(description='The object does not exist or was deleted.'),
}
record_delete_schemas = {
'404': ErrorResponseSchema(
description='The object does not exist or was already deleted.'),
}
def get_and_bind(self, endpoint_type, method, **kwargs):
"""Wrap resource colander response schemas for an endpoint and return a dict
of status codes mapping cloned and binded responses."""
responses = self.default_schemas.copy()
type_responses = getattr(self, 'default_{}_schemas'.format(endpoint_type))
responses.update(**type_responses)
verb_responses = 'default_{}_schemas'.format(method.lower())
method_args = getattr(self, verb_responses, {})
responses.update(**method_args)
method_responses = '{}_{}_schemas'.format(endpoint_type, method.lower())
endpoint_args = getattr(self, method_responses, {})
responses.update(**endpoint_args)
# Bind and clone schemas into a new dict
bound = {code: resp.bind(**kwargs) for code, resp in responses.items()}
return bound
class ShareableResourseResponses(ResourceReponses):
"""Class that wraps and handles SharableResource responses."""
def __init__(self, **kwargs):
# Add permission related responses to defaults
self.default_schemas = {
'401': ErrorResponseSchema(
description='The request is missing authentication headers.'),
'403': ErrorResponseSchema(
description=('The user is not allowed to perform the operation, '
'or the resource is not accessible.')),
**self.default_schemas
}
| 1 | 11,654 | Please use `kinto.core.utils.json` | Kinto-kinto | py |
@@ -204,6 +204,16 @@ func (h *Handler) Provision(ctx caddy.Context) error {
// set up upstreams
for _, upstream := range h.Upstreams {
+ addr, err := caddy.ParseNetworkAddress(upstream.Dial)
+ if err != nil {
+ return err
+ }
+ if addr.PortRangeSize() != 1 {
+ h.logger.Error("multiple addresses (upstream must map to only one address)",
+ zap.String("address", upstream.Dial),
+ )
+ return fmt.Errorf("multiple addresses (upstream must map to only one address): %v", addr)
+ }
// create or get the host representation for this upstream
var host Host = new(upstreamHost)
existingHost, loaded := hosts.LoadOrStore(upstream.String(), host) | 1 | // Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package reverseproxy
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"regexp"
"strconv"
"strings"
"sync"
"time"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/modules/caddyhttp"
"github.com/caddyserver/caddy/v2/modules/caddyhttp/headers"
"go.uber.org/zap"
"golang.org/x/net/http/httpguts"
)
func init() {
caddy.RegisterModule(Handler{})
}
// Handler implements a highly configurable and production-ready reverse proxy.
//
// Upon proxying, this module sets the following placeholders (which can be used
// both within and after this handler):
//
// Placeholder | Description
// ------------|-------------
// `{http.reverse_proxy.upstream.address}` | The full address to the upstream as given in the config
// `{http.reverse_proxy.upstream.hostport}` | The host:port of the upstream
// `{http.reverse_proxy.upstream.host}` | The host of the upstream
// `{http.reverse_proxy.upstream.port}` | The port of the upstream
// `{http.reverse_proxy.upstream.requests}` | The approximate current number of requests to the upstream
// `{http.reverse_proxy.upstream.max_requests}` | The maximum approximate number of requests allowed to the upstream
// `{http.reverse_proxy.upstream.fails}` | The number of recent failed requests to the upstream
type Handler struct {
// Configures the method of transport for the proxy. A transport
// is what performs the actual "round trip" to the backend.
// The default transport is plaintext HTTP.
TransportRaw json.RawMessage `json:"transport,omitempty" caddy:"namespace=http.reverse_proxy.transport inline_key=protocol"`
// A circuit breaker may be used to relieve pressure on a backend
// that is beginning to exhibit symptoms of stress or latency.
// By default, there is no circuit breaker.
CBRaw json.RawMessage `json:"circuit_breaker,omitempty" caddy:"namespace=http.reverse_proxy.circuit_breakers inline_key=type"`
// Load balancing distributes load/requests between backends.
LoadBalancing *LoadBalancing `json:"load_balancing,omitempty"`
// Health checks update the status of backends, whether they are
// up or down. Down backends will not be proxied to.
HealthChecks *HealthChecks `json:"health_checks,omitempty"`
// Upstreams is the list of backends to proxy to.
Upstreams UpstreamPool `json:"upstreams,omitempty"`
// Adjusts how often to flush the response buffer. A
// negative value disables response buffering.
// TODO: figure out good defaults and write docs for this
// (see https://github.com/caddyserver/caddy/issues/1460)
FlushInterval caddy.Duration `json:"flush_interval,omitempty"`
// Headers manipulates headers between Caddy and the backend.
// By default, all headers are passed-thru without changes,
// with the exceptions of special hop-by-hop headers.
//
// X-Forwarded-For and X-Forwarded-Proto are also set
// implicitly, but this may change in the future if the official
// standardized Forwarded header field gains more adoption.
Headers *headers.Handler `json:"headers,omitempty"`
// If true, the entire request body will be read and buffered
// in memory before being proxied to the backend. This should
// be avoided if at all possible for performance reasons.
BufferRequests bool `json:"buffer_requests,omitempty"`
// List of handlers and their associated matchers to evaluate
// after successful roundtrips. The first handler that matches
// the response from a backend will be invoked. The response
// body from the backend will not be written to the client;
// it is up to the handler to finish handling the response.
// If passive health checks are enabled, any errors from the
// handler chain will not affect the health status of the
// backend.
//
// Two new placeholders are available in this handler chain:
// - `{http.reverse_proxy.status_code}` The status code
// - `{http.reverse_proxy.status_text}` The status text
HandleResponse []caddyhttp.ResponseHandler `json:"handle_response,omitempty"`
Transport http.RoundTripper `json:"-"`
CB CircuitBreaker `json:"-"`
ctx caddy.Context
logger *zap.Logger
}
// CaddyModule returns the Caddy module information.
func (Handler) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "http.handlers.reverse_proxy",
New: func() caddy.Module { return new(Handler) },
}
}
// Provision ensures that h is set up properly before use.
func (h *Handler) Provision(ctx caddy.Context) error {
h.ctx = ctx
h.logger = ctx.Logger(h)
// start by loading modules
if h.TransportRaw != nil {
mod, err := ctx.LoadModule(h, "TransportRaw")
if err != nil {
return fmt.Errorf("loading transport: %v", err)
}
h.Transport = mod.(http.RoundTripper)
}
if h.LoadBalancing != nil && h.LoadBalancing.SelectionPolicyRaw != nil {
mod, err := ctx.LoadModule(h.LoadBalancing, "SelectionPolicyRaw")
if err != nil {
return fmt.Errorf("loading load balancing selection policy: %s", err)
}
h.LoadBalancing.SelectionPolicy = mod.(Selector)
}
if h.CBRaw != nil {
mod, err := ctx.LoadModule(h, "CBRaw")
if err != nil {
return fmt.Errorf("loading circuit breaker: %s", err)
}
h.CB = mod.(CircuitBreaker)
}
// ensure any embedded headers handler module gets provisioned
// (see https://caddy.community/t/set-cookie-manipulation-in-reverse-proxy/7666?u=matt
// for what happens if we forget to provision it)
if h.Headers != nil {
err := h.Headers.Provision(ctx)
if err != nil {
return fmt.Errorf("provisioning embedded headers handler: %v", err)
}
}
// set up transport
if h.Transport == nil {
t := &HTTPTransport{
KeepAlive: &KeepAlive{
ProbeInterval: caddy.Duration(30 * time.Second),
IdleConnTimeout: caddy.Duration(2 * time.Minute),
MaxIdleConnsPerHost: 32,
},
DialTimeout: caddy.Duration(10 * time.Second),
}
err := t.Provision(ctx)
if err != nil {
return fmt.Errorf("provisioning default transport: %v", err)
}
h.Transport = t
}
// set up load balancing
if h.LoadBalancing == nil {
h.LoadBalancing = new(LoadBalancing)
}
if h.LoadBalancing.SelectionPolicy == nil {
h.LoadBalancing.SelectionPolicy = RandomSelection{}
}
if h.LoadBalancing.TryDuration > 0 && h.LoadBalancing.TryInterval == 0 {
// a non-zero try_duration with a zero try_interval
// will always spin the CPU for try_duration if the
// upstream is local or low-latency; avoid that by
// defaulting to a sane wait period between attempts
h.LoadBalancing.TryInterval = caddy.Duration(250 * time.Millisecond)
}
lbMatcherSets, err := ctx.LoadModule(h.LoadBalancing, "RetryMatchRaw")
if err != nil {
return err
}
err = h.LoadBalancing.RetryMatch.FromInterface(lbMatcherSets)
if err != nil {
return err
}
// set up upstreams
for _, upstream := range h.Upstreams {
// create or get the host representation for this upstream
var host Host = new(upstreamHost)
existingHost, loaded := hosts.LoadOrStore(upstream.String(), host)
if loaded {
host = existingHost.(Host)
}
upstream.Host = host
// give it the circuit breaker, if any
upstream.cb = h.CB
// if the passive health checker has a non-zero UnhealthyRequestCount
// but the upstream has no MaxRequests set (they are the same thing,
// but the passive health checker is a default value for for upstreams
// without MaxRequests), copy the value into this upstream, since the
// value in the upstream (MaxRequests) is what is used during
// availability checks
if h.HealthChecks != nil && h.HealthChecks.Passive != nil {
h.HealthChecks.Passive.logger = h.logger.Named("health_checker.passive")
if h.HealthChecks.Passive.UnhealthyRequestCount > 0 &&
upstream.MaxRequests == 0 {
upstream.MaxRequests = h.HealthChecks.Passive.UnhealthyRequestCount
}
}
// upstreams need independent access to the passive
// health check policy because passive health checks
// run without access to h.
if h.HealthChecks != nil {
upstream.healthCheckPolicy = h.HealthChecks.Passive
}
}
if h.HealthChecks != nil {
// set defaults on passive health checks, if necessary
if h.HealthChecks.Passive != nil {
if h.HealthChecks.Passive.FailDuration > 0 && h.HealthChecks.Passive.MaxFails == 0 {
h.HealthChecks.Passive.MaxFails = 1
}
}
// if active health checks are enabled, configure them and start a worker
if h.HealthChecks.Active != nil &&
(h.HealthChecks.Active.Path != "" || h.HealthChecks.Active.Port != 0) {
h.HealthChecks.Active.logger = h.logger.Named("health_checker.active")
timeout := time.Duration(h.HealthChecks.Active.Timeout)
if timeout == 0 {
timeout = 5 * time.Second
}
h.HealthChecks.Active.httpClient = &http.Client{
Timeout: timeout,
Transport: h.Transport,
}
if h.HealthChecks.Active.Interval == 0 {
h.HealthChecks.Active.Interval = caddy.Duration(30 * time.Second)
}
if h.HealthChecks.Active.ExpectBody != "" {
var err error
h.HealthChecks.Active.bodyRegexp, err = regexp.Compile(h.HealthChecks.Active.ExpectBody)
if err != nil {
return fmt.Errorf("expect_body: compiling regular expression: %v", err)
}
}
go h.activeHealthChecker()
}
}
// set up any response routes
for i, rh := range h.HandleResponse {
err := rh.Provision(ctx)
if err != nil {
return fmt.Errorf("provisioning response handler %d: %v", i, err)
}
}
return nil
}
// Cleanup cleans up the resources made by h during provisioning.
func (h *Handler) Cleanup() error {
// TODO: Close keepalive connections on reload? https://github.com/caddyserver/caddy/pull/2507/files#diff-70219fd88fe3f36834f474ce6537ed26R762
// remove hosts from our config from the pool
for _, upstream := range h.Upstreams {
hosts.Delete(upstream.String())
}
return nil
}
func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyhttp.Handler) error {
repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
// if enabled, buffer client request;
// this should only be enabled if the
// upstream requires it and does not
// work with "slow clients" (gunicorn,
// etc.) - this obviously has a perf
// overhead and makes the proxy at
// risk of exhausting memory and more
// susceptible to slowloris attacks,
// so it is strongly recommended to
// only use this feature if absolutely
// required, if read timeouts are set,
// and if body size is limited
if h.BufferRequests {
buf := bufPool.Get().(*bytes.Buffer)
buf.Reset()
defer bufPool.Put(buf)
io.Copy(buf, r.Body)
r.Body.Close()
r.Body = ioutil.NopCloser(buf)
}
// prepare the request for proxying; this is needed only once
err := h.prepareRequest(r)
if err != nil {
return caddyhttp.Error(http.StatusInternalServerError,
fmt.Errorf("preparing request for upstream round-trip: %v", err))
}
// we will need the original headers and Host value if
// header operations are configured; and we should
// restore them after we're done if they are changed
// (for example, changing the outbound Host header
// should not permanently change r.Host; issue #3509)
reqHost := r.Host
reqHeader := r.Header
defer func() {
r.Host = reqHost
r.Header = reqHeader
}()
start := time.Now()
var proxyErr error
for {
// choose an available upstream
upstream := h.LoadBalancing.SelectionPolicy.Select(h.Upstreams, r)
if upstream == nil {
if proxyErr == nil {
proxyErr = fmt.Errorf("no upstreams available")
}
if !h.LoadBalancing.tryAgain(h.ctx, start, proxyErr, r) {
break
}
continue
}
// the dial address may vary per-request if placeholders are
// used, so perform those replacements here; the resulting
// DialInfo struct should have valid network address syntax
dialInfo, err := upstream.fillDialInfo(r)
if err != nil {
return fmt.Errorf("making dial info: %v", err)
}
// attach to the request information about how to dial the upstream;
// this is necessary because the information cannot be sufficiently
// or satisfactorily represented in a URL
caddyhttp.SetVar(r.Context(), dialInfoVarKey, dialInfo)
// set placeholders with information about this upstream
repl.Set("http.reverse_proxy.upstream.address", dialInfo.String())
repl.Set("http.reverse_proxy.upstream.hostport", dialInfo.Address)
repl.Set("http.reverse_proxy.upstream.host", dialInfo.Host)
repl.Set("http.reverse_proxy.upstream.port", dialInfo.Port)
repl.Set("http.reverse_proxy.upstream.requests", upstream.Host.NumRequests())
repl.Set("http.reverse_proxy.upstream.max_requests", upstream.MaxRequests)
repl.Set("http.reverse_proxy.upstream.fails", upstream.Host.Fails())
// mutate request headers according to this upstream;
// because we're in a retry loop, we have to copy
// headers (and the r.Host value) from the original
// so that each retry is identical to the first
if h.Headers != nil && h.Headers.Request != nil {
r.Header = make(http.Header)
copyHeader(r.Header, reqHeader)
r.Host = reqHost
h.Headers.Request.ApplyToRequest(r)
}
// proxy the request to that upstream
proxyErr = h.reverseProxy(w, r, dialInfo, next)
if proxyErr == nil || proxyErr == context.Canceled {
// context.Canceled happens when the downstream client
// cancels the request, which is not our failure
return nil
}
// if the roundtrip was successful, don't retry the request or
// ding the health status of the upstream (an error can still
// occur after the roundtrip if, for example, a response handler
// after the roundtrip returns an error)
if succ, ok := proxyErr.(roundtripSucceeded); ok {
return succ.error
}
// remember this failure (if enabled)
h.countFailure(upstream)
// if we've tried long enough, break
if !h.LoadBalancing.tryAgain(h.ctx, start, proxyErr, r) {
break
}
}
return caddyhttp.Error(http.StatusBadGateway, proxyErr)
}
// prepareRequest modifies req so that it is ready to be proxied,
// except for directing to a specific upstream. This method mutates
// headers and other necessary properties of the request and should
// be done just once (before proxying) regardless of proxy retries.
// This assumes that no mutations of the request are performed
// by h during or after proxying.
func (h Handler) prepareRequest(req *http.Request) error {
// most of this is borrowed from the Go std lib reverse proxy
if req.ContentLength == 0 {
req.Body = nil // Issue golang/go#16036: nil Body for http.Transport retries
}
req.Close = false
// if User-Agent is not set by client, then explicitly
// disable it so it's not set to default value by std lib
if _, ok := req.Header["User-Agent"]; !ok {
req.Header.Set("User-Agent", "")
}
reqUpType := upgradeType(req.Header)
removeConnectionHeaders(req.Header)
// Remove hop-by-hop headers to the backend. Especially
// important is "Connection" because we want a persistent
// connection, regardless of what the client sent to us.
for _, h := range hopHeaders {
hv := req.Header.Get(h)
if hv == "" {
continue
}
if h == "Te" && hv == "trailers" {
// Issue golang/go#21096: tell backend applications that
// care about trailer support that we support
// trailers. (We do, but we don't go out of
// our way to advertise that unless the
// incoming client request thought it was
// worth mentioning)
continue
}
req.Header.Del(h)
}
// After stripping all the hop-by-hop connection headers above, add back any
// necessary for protocol upgrades, such as for websockets.
if reqUpType != "" {
req.Header.Set("Connection", "Upgrade")
req.Header.Set("Upgrade", reqUpType)
}
if clientIP, _, err := net.SplitHostPort(req.RemoteAddr); err == nil {
// If we aren't the first proxy retain prior
// X-Forwarded-For information as a comma+space
// separated list and fold multiple headers into one.
if prior, ok := req.Header["X-Forwarded-For"]; ok {
clientIP = strings.Join(prior, ", ") + ", " + clientIP
}
req.Header.Set("X-Forwarded-For", clientIP)
}
if req.Header.Get("X-Forwarded-Proto") == "" {
// set X-Forwarded-Proto; many backend apps expect this too
proto := "https"
if req.TLS == nil {
proto = "http"
}
req.Header.Set("X-Forwarded-Proto", proto)
}
return nil
}
// reverseProxy performs a round-trip to the given backend and processes the response with the client.
// (This method is mostly the beginning of what was borrowed from the net/http/httputil package in the
// Go standard library which was used as the foundation.)
func (h *Handler) reverseProxy(rw http.ResponseWriter, req *http.Request, di DialInfo, next caddyhttp.Handler) error {
di.Upstream.Host.CountRequest(1)
defer di.Upstream.Host.CountRequest(-1)
// point the request to this upstream
h.directRequest(req, di)
// do the round-trip; emit debug log with values we know are
// safe, or if there is no error, emit fuller log entry
start := time.Now()
res, err := h.Transport.RoundTrip(req)
duration := time.Since(start)
logger := h.logger.With(
zap.String("upstream", di.Upstream.String()),
zap.Object("request", caddyhttp.LoggableHTTPRequest{Request: req}),
zap.Duration("duration", duration))
if err != nil {
logger.Debug("upstream roundtrip", zap.Error(err))
return err
}
logger.Debug("upstream roundtrip",
zap.Object("headers", caddyhttp.LoggableHTTPHeader(res.Header)),
zap.Int("status", res.StatusCode))
// update circuit breaker on current conditions
if di.Upstream.cb != nil {
di.Upstream.cb.RecordMetric(res.StatusCode, duration)
}
// perform passive health checks (if enabled)
if h.HealthChecks != nil && h.HealthChecks.Passive != nil {
// strike if the status code matches one that is "bad"
for _, badStatus := range h.HealthChecks.Passive.UnhealthyStatus {
if caddyhttp.StatusCodeMatches(res.StatusCode, badStatus) {
h.countFailure(di.Upstream)
}
}
// strike if the roundtrip took too long
if h.HealthChecks.Passive.UnhealthyLatency > 0 &&
duration >= time.Duration(h.HealthChecks.Passive.UnhealthyLatency) {
h.countFailure(di.Upstream)
}
}
// see if any response handler is configured for this response from the backend
for i, rh := range h.HandleResponse {
if rh.Match != nil && !rh.Match.Match(res.StatusCode, res.Header) {
continue
}
repl := req.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
// if configured to only change the status code, do that then continue regular proxy response
if statusCodeStr := rh.StatusCode.String(); statusCodeStr != "" {
statusCode, err := strconv.Atoi(repl.ReplaceAll(statusCodeStr, ""))
if err != nil {
return caddyhttp.Error(http.StatusInternalServerError, err)
}
if statusCode != 0 {
res.StatusCode = statusCode
}
break
}
// otherwise, if there are any routes configured, execute those as the
// actual response instead of what we got from the proxy backend
if len(rh.Routes) == 0 {
continue
}
res.Body.Close()
repl.Set("http.reverse_proxy.status_code", res.StatusCode)
repl.Set("http.reverse_proxy.status_text", res.Status)
h.logger.Debug("handling response", zap.Int("handler", i))
if routeErr := rh.Routes.Compile(next).ServeHTTP(rw, req); routeErr != nil {
// wrap error in roundtripSucceeded so caller knows that
// the roundtrip was successful and to not retry
return roundtripSucceeded{routeErr}
}
}
// Deal with 101 Switching Protocols responses: (WebSocket, h2c, etc)
if res.StatusCode == http.StatusSwitchingProtocols {
h.handleUpgradeResponse(rw, req, res)
return nil
}
removeConnectionHeaders(res.Header)
for _, h := range hopHeaders {
res.Header.Del(h)
}
// apply any response header operations
if h.Headers != nil && h.Headers.Response != nil {
if h.Headers.Response.Require == nil ||
h.Headers.Response.Require.Match(res.StatusCode, res.Header) {
repl := req.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
h.Headers.Response.ApplyTo(res.Header, repl)
}
}
copyHeader(rw.Header(), res.Header)
// The "Trailer" header isn't included in the Transport's response,
// at least for *http.Transport. Build it up from Trailer.
announcedTrailers := len(res.Trailer)
if announcedTrailers > 0 {
trailerKeys := make([]string, 0, len(res.Trailer))
for k := range res.Trailer {
trailerKeys = append(trailerKeys, k)
}
rw.Header().Add("Trailer", strings.Join(trailerKeys, ", "))
}
rw.WriteHeader(res.StatusCode)
// some apps need the response headers before starting to stream content with http2,
// so it's important to explicitly flush the headers to the client before streaming the data.
// (see https://github.com/caddyserver/caddy/issues/3556 for use case and nuances)
if h.isBidirectionalStream(req, res) {
if wf, ok := rw.(http.Flusher); ok {
wf.Flush()
}
}
err = h.copyResponse(rw, res.Body, h.flushInterval(req, res))
res.Body.Close() // close now, instead of defer, to populate res.Trailer
if err != nil {
// we're streaming the response and we've already written headers, so
// there's nothing an error handler can do to recover at this point;
// the standard lib's proxy panics at this point, but we'll just log
// the error and abort the stream here
h.logger.Error("aborting with incomplete response", zap.Error(err))
return nil
}
if len(res.Trailer) > 0 {
// Force chunking if we saw a response trailer.
// This prevents net/http from calculating the length for short
// bodies and adding a Content-Length.
if fl, ok := rw.(http.Flusher); ok {
fl.Flush()
}
}
if len(res.Trailer) == announcedTrailers {
copyHeader(rw.Header(), res.Trailer)
return nil
}
for k, vv := range res.Trailer {
k = http.TrailerPrefix + k
for _, v := range vv {
rw.Header().Add(k, v)
}
}
return nil
}
// tryAgain takes the time that the handler was initially invoked
// as well as any error currently obtained, and the request being
// tried, and returns true if another attempt should be made at
// proxying the request. If true is returned, it has already blocked
// long enough before the next retry (i.e. no more sleeping is
// needed). If false is returned, the handler should stop trying to
// proxy the request.
func (lb LoadBalancing) tryAgain(ctx caddy.Context, start time.Time, proxyErr error, req *http.Request) bool {
// if we've tried long enough, break
if time.Since(start) >= time.Duration(lb.TryDuration) {
return false
}
// if the error occurred while dialing (i.e. a connection
// could not even be established to the upstream), then it
// should be safe to retry, since without a connection, no
// HTTP request can be transmitted; but if the error is not
// specifically a dialer error, we need to be careful
if _, ok := proxyErr.(DialError); proxyErr != nil && !ok {
// if the error occurred after a connection was established,
// we have to assume the upstream received the request, and
// retries need to be carefully decided, because some requests
// are not idempotent
if lb.RetryMatch == nil && req.Method != "GET" {
// by default, don't retry requests if they aren't GET
return false
}
if !lb.RetryMatch.AnyMatch(req) {
return false
}
}
// otherwise, wait and try the next available host
select {
case <-time.After(time.Duration(lb.TryInterval)):
return true
case <-ctx.Done():
return false
}
}
// directRequest modifies only req.URL so that it points to the upstream
// in the given DialInfo. It must modify ONLY the request URL.
func (h Handler) directRequest(req *http.Request, di DialInfo) {
// we need a host, so set the upstream's host address
reqHost := di.Address
// if the port equates to the scheme, strip the port because
// it's weird to make a request like http://example.com:80/.
if (req.URL.Scheme == "http" && di.Port == "80") ||
(req.URL.Scheme == "https" && di.Port == "443") {
reqHost = di.Host
}
req.URL.Host = reqHost
}
func copyHeader(dst, src http.Header) {
for k, vv := range src {
for _, v := range vv {
dst.Add(k, v)
}
}
}
func cloneHeader(h http.Header) http.Header {
h2 := make(http.Header, len(h))
for k, vv := range h {
vv2 := make([]string, len(vv))
copy(vv2, vv)
h2[k] = vv2
}
return h2
}
func upgradeType(h http.Header) string {
if !httpguts.HeaderValuesContainsToken(h["Connection"], "Upgrade") {
return ""
}
return strings.ToLower(h.Get("Upgrade"))
}
func singleJoiningSlash(a, b string) string {
aslash := strings.HasSuffix(a, "/")
bslash := strings.HasPrefix(b, "/")
switch {
case aslash && bslash:
return a + b[1:]
case !aslash && !bslash:
return a + "/" + b
}
return a + b
}
// removeConnectionHeaders removes hop-by-hop headers listed in the "Connection" header of h.
// See RFC 7230, section 6.1
func removeConnectionHeaders(h http.Header) {
if c := h.Get("Connection"); c != "" {
for _, f := range strings.Split(c, ",") {
if f = strings.TrimSpace(f); f != "" {
h.Del(f)
}
}
}
}
// LoadBalancing has parameters related to load balancing.
type LoadBalancing struct {
// A selection policy is how to choose an available backend.
// The default policy is random selection.
SelectionPolicyRaw json.RawMessage `json:"selection_policy,omitempty" caddy:"namespace=http.reverse_proxy.selection_policies inline_key=policy"`
// How long to try selecting available backends for each request
// if the next available host is down. By default, this retry is
// disabled. Clients will wait for up to this long while the load
// balancer tries to find an available upstream host.
TryDuration caddy.Duration `json:"try_duration,omitempty"`
// How long to wait between selecting the next host from the pool. Default
// is 250ms. Only relevant when a request to an upstream host fails. Be
// aware that setting this to 0 with a non-zero try_duration can cause the
// CPU to spin if all backends are down and latency is very low.
TryInterval caddy.Duration `json:"try_interval,omitempty"`
// A list of matcher sets that restricts with which requests retries are
// allowed. A request must match any of the given matcher sets in order
// to be retried if the connection to the upstream succeeded but the
// subsequent round-trip failed. If the connection to the upstream failed,
// a retry is always allowed. If unspecified, only GET requests will be
// allowed to be retried. Note that a retry is done with the next available
// host according to the load balancing policy.
RetryMatchRaw caddyhttp.RawMatcherSets `json:"retry_match,omitempty" caddy:"namespace=http.matchers"`
SelectionPolicy Selector `json:"-"`
RetryMatch caddyhttp.MatcherSets `json:"-"`
}
// Selector selects an available upstream from the pool.
type Selector interface {
Select(UpstreamPool, *http.Request) *Upstream
}
// Hop-by-hop headers. These are removed when sent to the backend.
// As of RFC 7230, hop-by-hop headers are required to appear in the
// Connection header field. These are the headers defined by the
// obsoleted RFC 2616 (section 13.5.1) and are used for backward
// compatibility.
var hopHeaders = []string{
"Alt-Svc",
"Connection",
"Proxy-Connection", // non-standard but still sent by libcurl and rejected by e.g. google
"Keep-Alive",
"Proxy-Authenticate",
"Proxy-Authorization",
"Te", // canonicalized version of "TE"
"Trailer", // not Trailers per URL above; https://www.rfc-editor.org/errata_search.php?eid=4522
"Transfer-Encoding",
"Upgrade",
}
// DialError is an error that specifically occurs
// in a call to Dial or DialContext.
type DialError struct{ error }
// TLSTransport is implemented by transports
// that are capable of using TLS.
type TLSTransport interface {
// TLSEnabled returns true if the transport
// has TLS enabled, false otherwise.
TLSEnabled() bool
// EnableTLS enables TLS within the transport
// if it is not already, using the provided
// value as a basis for the TLS config.
EnableTLS(base *TLSConfig) error
}
// roundtripSucceeded is an error type that is returned if the
// roundtrip succeeded, but an error occurred after-the-fact.
type roundtripSucceeded struct{ error }
var bufPool = sync.Pool{
New: func() interface{} {
return new(bytes.Buffer)
},
}
// Interface guards
var (
_ caddy.Provisioner = (*Handler)(nil)
_ caddy.CleanerUpper = (*Handler)(nil)
_ caddyhttp.MiddlewareHandler = (*Handler)(nil)
)
| 1 | 15,506 | Since we're returning the error with the same content, we don't need to log it here; it will get bubbled up and emitted that way. | caddyserver-caddy | go |
@@ -0,0 +1,11 @@
+"""Tests for the use of typing.final whenever the py-version is set < 3.8"""
+# pylint: disable=missing-class-docstring, too-few-public-methods, missing-function-docstring, no-name-in-module
+
+from typing import final
+
+
+@final # [using-final-in-unsupported-version]
+class MyClass:
+ @final # [using-final-in-unsupported-version]
+ def my_method(self):
+ pass | 1 | 1 | 16,656 | What happens if someone import `typing` and thus uses `@typing.final` instead? | PyCQA-pylint | py |
|
@@ -76,7 +76,7 @@ def get_test_dependencies():
# Test dependencies exposed as extras, based on:
# https://stackoverflow.com/questions/29870629
return [
- "pandas",
+ "pandas==0.20.0",
"pytest>=3.1",
"pytest-cov",
"pytest-benchmark>=3.1", | 1 | #!/usr/bin/env python3
# © H2O.ai 2018; -*- encoding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#-------------------------------------------------------------------------------
"""
Build script for the `datatable` module.
$ python setup.py sdist
$ python setup.py bdist_wheel
$ twine upload dist/*
"""
import os
import shutil
import sys
import re
import sysconfig
from functools import lru_cache as memoize
from setuptools import setup, find_packages, Extension
from sys import stderr
#-------------------------------------------------------------------------------
# Generic helpers
#-------------------------------------------------------------------------------
def get_version():
"""Determine the package version."""
version = None
with open("datatable/__version__.py", encoding="utf-8") as f:
rx = re.compile(r"""version\s*=\s*['"]([\d.]*)['"]\s*""")
for line in f:
mm = re.match(rx, line)
if mm is not None:
version = mm.group(1)
break
if version is None:
raise SystemExit("Could not detect project version from the "
"__version__.py file")
# Append build suffix if necessary
suffix = os.environ.get("CI_VERSION_SUFFIX")
if suffix:
# See https://www.python.org/dev/peps/pep-0440/ for valid versioning
# schemes.
mm = re.match(r"(?:master|dev)[.+_-]?(\d+)", suffix)
if mm:
suffix = "dev" + str(mm.group(1))
version += "." + suffix
return version
def get_c_sources(folder, include_headers=False):
"""Find all C/C++ source files in the `folder` directory."""
allowed_extensions = [".c", ".C", ".cc", ".cpp", ".cxx", ".c++"]
if include_headers:
allowed_extensions.extend([".h", ".hpp"])
sources = []
for root, dirs, files in os.walk(folder):
for name in files:
ext = os.path.splitext(name)[1]
if ext in allowed_extensions:
sources.append(os.path.join(root, name))
return sources
def get_py_sources():
"""Find python source directories."""
packages = find_packages(exclude=["tests", "tests.munging", "temp", "c"])
print("\nFound packages: %r\n" % packages, file=stderr)
return packages
def get_test_dependencies():
# Test dependencies exposed as extras, based on:
# https://stackoverflow.com/questions/29870629
return [
"pandas",
"pytest>=3.1",
"pytest-cov",
"pytest-benchmark>=3.1",
"pytest-ordering>=0.5",
]
#-------------------------------------------------------------------------------
# Determine compiler settings
#-------------------------------------------------------------------------------
@memoize()
def get_llvm(with_version=False):
curdir = os.path.dirname(os.path.abspath(__file__))
llvmdir = None
for LLVMX in ["LLVM4", "LLVM5", "LLVM6"]:
d = os.path.join(curdir, "datatable/" + LLVMX.lower())
if LLVMX in os.environ:
llvmdir = os.environ[LLVMX]
if llvmdir:
break
elif os.path.isdir(d):
llvmdir = d
break
if llvmdir and not os.path.isdir(llvmdir):
raise SystemExit("Environment variable %s = %r is not a directory"
% (LLVMX, llvmdir))
if not llvmdir:
raise SystemExit("Environment variables LLVM4, LLVM5 or LLVM6 are not "
"set. Please set one of these variables to the location"
" of the Clang+Llvm distribution, which you can "
" downloadfrom http://releases.llvm.org/download.html")
if with_version:
return llvmdir, LLVMX
return llvmdir
@memoize()
def get_rpath():
if sys.platform == "darwin":
return "@loader_path/."
else:
return "$ORIGIN/."
@memoize()
def get_cc(with_isystem=False):
cc = os.path.join(get_llvm(), "bin", "clang++")
if not os.path.exists(cc):
raise SystemExit("Cannot find CLang compiler at `%r`" % cc)
if with_isystem and sysconfig.get_config_var("CONFINCLUDEPY"):
cc += " -isystem " + sysconfig.get_config_var("CONFINCLUDEPY")
return cc
@memoize()
def get_default_compile_flags():
flags = sysconfig.get_config_var("PY_CFLAGS")
# remove -arch XXX flags, and add "-m64" to force 64-bit only builds
flags = re.sub(r"-arch \w+\s*", "", flags) + " -m64"
# remove -WXXX flags, because we set up all warnings manually afterwards
flags = re.sub(r"\s*-W[a-zA-Z\-]+\s*", " ", flags)
# remove -O3 flag since we'll be setting it manually to either -O0 or -O3
# depending on the debug mode
flags = re.sub(r"\s*-O\d\s*", " ", flags)
# remove -DNDEBUG so that the program can easier use asserts
flags = re.sub(r"\s*-DNDEBUG\s*", " ", flags)
return flags
@memoize()
def get_extra_compile_flags():
flags = []
if sysconfig.get_config_var("CONFINCLUDEPY"):
# Marking this directory as "isystem" prevents Clang from issuing
# warnings for those files
flags += ["-isystem " + sysconfig.get_config_var("CONFINCLUDEPY"),
"-I" + sysconfig.get_config_var("CONFINCLUDEPY")]
flags += ["-std=gnu++11", "-stdlib=libc++", "-x", "c++"]
# Path to source files / Python include files
flags += ["-Ic",
"-I" + os.path.join(sys.prefix, "include")]
# Include path to C++ header files
flags += ["-I" + get_llvm() + "/include/c++/v1",
"-I" + get_llvm() + "/include",
"-isystem " + get_llvm() + "/include/c++/v1"]
# Enable/disable OpenMP support
if "DTNOOPENMP" in os.environ:
flags.append("-DDTNOOMP")
flags.append("-Wno-source-uses-openmp")
else:
flags.insert(0, "-fopenmp")
if "DTDEBUG" in os.environ:
flags += ["-g", "-ggdb", "-O0"]
elif "DTASAN" in os.environ:
flags += ["-g", "-ggdb", "-O0",
"-fsanitize=address",
"-fsanitize-address-use-after-scope",
"-shared-libasan"]
elif "DTCOVERAGE" in os.environ:
flags += ["-g", "--coverage", "-O0"]
else:
flags += ["-O3"]
if "CI_EXTRA_COMPILE_ARGS" in os.environ:
flags += [os.environ["CI_EXTRA_COMPILE_ARGS"]]
if "-O0" in flags:
flags += ["-DDTDEBUG"]
# Ignored warnings:
# -Wcovered-switch-default: we add `default` statement to
# an exhaustive switch to guard against memory
# corruption and careless enum definition expansion.
# -Wfloat-equal: this warning is just plain wrong...
# Comparing x == 0 or x == 1 is always safe.
# -Wgnu-statement-expression: we use GNU statement-as-
# expression syntax in some macros...
# -Wswitch-enum: generates spurious warnings about missing
# cases even if `default` clause is present. -Wswitch
# does not suffer from this drawback.
# -Wdeprecated: warning about compiling .c files under C++
# mode... we should just rename those files at some point.
flags += [
"-Weverything",
"-Wno-covered-switch-default",
"-Wno-float-equal",
"-Wno-gnu-statement-expression",
"-Wno-switch-enum",
"-Wno-old-style-cast",
"-Wno-c++98-compat-pedantic",
"-Wno-nested-anon-types",
"-Wno-c99-extensions",
"-Wno-deprecated",
"-Werror=implicit-function-declaration",
"-Werror=incompatible-pointer-types",
"-Wno-weak-vtables", # TODO: Remove
"-Wno-weak-template-vtables",
]
return flags
@memoize()
def get_default_link_flags():
flags = sysconfig.get_config_var("LDSHARED")
# remove the name of the linker program
flags = re.sub(r"^\w+[\w.\-]+\s+", "", flags)
# remove -arch XXX flags, and add "-m64" to force 64-bit only builds
flags = re.sub(r"-arch \w+\s*", "", flags) + " -m64"
# Add "-isystem" path with system libraries
flags += " -isystem " + sysconfig.get_config_var("CONFINCLUDEPY")
return flags
@memoize()
def get_extra_link_args():
flags = ["-L%s" % os.path.join(get_llvm(), "lib"),
"-Wl,-rpath,%s" % get_rpath()]
if sys.platform == "linux":
flags += ["-lc++"]
if not("DTNOOPENMP" in os.environ):
flags += ["-fopenmp"]
if "DTASAN" in os.environ:
flags += ["-fsanitize=address", "-shared-libasan"]
if "DTCOVERAGE" in os.environ:
flags += ["--coverage", "-O0"]
# On linux we need to pass proper flag to clang linker which
# is not used for some reason at linux
if sys.platform == "linux":
flags += ["-shared"]
return flags
#-------------------------------------------------------------------------------
# Process extra commands
#-------------------------------------------------------------------------------
def process_args(cmd):
"""
Support for additional setup.py commands:
python setup.py get_CC
python setup.py get_CCFLAGS
python setup.py get_LDFLAGS
python setup.py get_EXTEXT
"""
if not cmd.startswith("get_"):
return
os.environ["DTDEBUG"] = "1" # Force debug flag
cmd = cmd[4:]
if cmd == "EXTEXT":
print(sysconfig.get_config_var("EXT_SUFFIX"))
elif cmd == "CC":
print(get_cc())
elif cmd == "CCFLAGS":
flags = [get_default_compile_flags()] + get_extra_compile_flags()
print(" ".join(flags))
elif cmd == "LDFLAGS":
flags = [get_default_link_flags()] + get_extra_link_args()
print(" ".join(flags))
else:
raise SystemExit("Unknown setup.py command '%s'" % cmd)
sys.exit(0)
argcmd = ""
if len(sys.argv) == 2:
argcmd = sys.argv[1]
process_args(argcmd)
#-------------------------------------------------------------------------------
# Prepare the environment
#-------------------------------------------------------------------------------
# Verify the LLVM4/LLVM5 installation directory
llvmx, llvmver = get_llvm(True)
llvm_config = os.path.join(llvmx, "bin", "llvm-config")
clang = os.path.join(llvmx, "bin", "clang++")
libsdir = os.path.join(llvmx, "lib")
includes = os.path.join(llvmx, "include")
llvmlite_req = (">=0.20.0,<0.21.0" if llvmver == "LLVM4" else
">=0.21.0,<0.23.0" if llvmver == "LLVM5" else
">=0.23.0 " if llvmver == "LLVM6" else None)
for ff in [llvm_config, clang, libsdir, includes]:
if not os.path.exists(ff):
raise SystemExit("Cannot find %s folder. "
"Is this a valid Llvm installation?" % ff)
# Compiler
os.environ["CC"] = os.environ["CXX"] = get_cc(True)
# Linker
# On linux we need to pass proper flag to clang linker which
# is not used for some reason at linux
if "DTCOVERAGE" in os.environ and sys.platform == "linux":
os.environ["LDSHARED"] = clang
# Compute runtime libpath with respect to bundled LLVM libraries
if sys.platform == "darwin":
extra_libs = ["libomp.dylib"]
else:
extra_libs = ["libomp.so", "libc++.so.1", "libc++abi.so.1"]
# Copy system libraries into the datatable/lib folder, so that they can be
# packaged with the wheel
if not os.path.exists(os.path.join("datatable", "lib")):
os.mkdir(os.path.join("datatable", "lib"))
for libname in extra_libs:
srcfile = os.path.join(libsdir, libname)
tgtfile = os.path.join("datatable", "lib", libname)
if not os.path.exists(tgtfile):
print("Copying %s to %s" % (srcfile, tgtfile), file=stderr)
shutil.copy(srcfile, tgtfile)
# Force to build for a 64-bit platform only
os.environ["ARCHFLAGS"] = "-m64"
# If we need to install llvmlite, this would help
os.environ["LLVM_CONFIG"] = llvm_config
print("Setting environment variables:", file=stderr)
for n in ["CC", "CXX", "LDFLAGS", "ARCHFLAGS", "LLVM_CONFIG"]:
print(" %s = %s" % (n, os.environ.get(n, "")), file=stderr)
#-------------------------------------------------------------------------------
# Main setup
#-------------------------------------------------------------------------------
setup(
name="datatable",
version=get_version(),
description="Python library for fast multi-threaded data manipulation and "
"munging.",
long_description="""
This is a Python package for manipulating 2-dimensional tabular data
structures (aka data frames). It is close in spirit to pandas or SFrame;
however we put specific emphasis on speed and big data support. As the
name suggests, the package is closely related to R's data.table and
attempts to mimic its core algorithms and API.
See https://github.com/h2oai/datatable for more details.
""",
# The homepage
url="https://github.com/h2oai/datatable",
# Author details
author="Pasha Stetsenko",
author_email="pasha@h2o.ai",
license="Mozilla Public License v2.0",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Topic :: Scientific/Engineering :: Information Analysis",
],
keywords=["datatable", "data", "dataframe", "frame", "data.table",
"munging", "numpy", "pandas", "data processing", "ETL"],
packages=get_py_sources(),
# Runtime dependencies
install_requires=[
"typesentry>=0.2.4",
"blessed",
"llvmlite" + llvmlite_req,
],
python_requires=">=3.5",
tests_require=get_test_dependencies(),
extras_require={
"testing": get_test_dependencies()
},
zip_safe=True,
ext_modules=[
Extension(
"datatable/lib/_datatable",
include_dirs=["c"],
sources=get_c_sources("c"),
extra_compile_args=get_extra_compile_flags(),
extra_link_args=get_extra_link_args(),
language="c++",
),
],
package_dir={"datatable": "datatable"},
package_data={"datatable": ["lib/*.*"]},
)
| 1 | 10,923 | Better remove this dependency altogether | h2oai-datatable | py |
@@ -944,4 +944,7 @@ void h2o_cleanup_thread(void)
h2o_mem_clear_recycle(&h2o_mem_pool_allocator);
h2o_mem_clear_recycle(&h2o_http2_wbuf_buffer_prototype.allocator);
h2o_mem_clear_recycle(&h2o_socket_buffer_prototype.allocator);
+
+ extern __thread h2o_buffer_prototype_t h2o__http2client_wbuf_buffer_prototype;
+ h2o_mem_clear_recycle(&h2o__http2client_wbuf_buffer_prototype.allocator);
} | 1 | /*
* Copyright (c) 2014-2016 DeNA Co., Ltd., Kazuho Oku, Satoh Hiroh
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <assert.h>
#include <inttypes.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdarg.h>
#include "h2o.h"
#include "h2o/http1.h"
#include "h2o/http2.h"
#include "h2o/hiredis_.h"
struct st_h2o_accept_data_t {
h2o_accept_ctx_t *ctx;
h2o_socket_t *sock;
h2o_timer_t timeout;
struct timeval connected_at;
};
struct st_h2o_memcached_resumption_accept_data_t {
struct st_h2o_accept_data_t super;
h2o_memcached_req_t *get_req;
};
struct st_h2o_redis_resumption_accept_data_t {
struct st_h2o_accept_data_t super;
h2o_redis_command_t *get_command;
};
static void on_accept_timeout(h2o_timer_t *entry);
static void on_redis_accept_timeout(h2o_timer_t *entry);
static void on_memcached_accept_timeout(h2o_timer_t *entry);
static struct {
struct {
h2o_memcached_context_t *ctx;
} memcached;
struct {
h2o_iovec_t host;
uint16_t port;
h2o_iovec_t prefix;
} redis;
unsigned expiration;
} async_resumption_context;
static struct st_h2o_accept_data_t *create_accept_data(h2o_accept_ctx_t *ctx, h2o_socket_t *sock, struct timeval connected_at,
h2o_timer_cb timeout_cb, size_t sz)
{
struct st_h2o_accept_data_t *data = h2o_mem_alloc(sz);
data->ctx = ctx;
data->sock = sock;
h2o_timer_init(&data->timeout, timeout_cb);
h2o_timer_link(ctx->ctx->loop, ctx->ctx->globalconf->handshake_timeout, &data->timeout);
data->connected_at = connected_at;
return data;
}
static struct st_h2o_accept_data_t *create_default_accept_data(h2o_accept_ctx_t *ctx, h2o_socket_t *sock,
struct timeval connected_at)
{
struct st_h2o_accept_data_t *data =
create_accept_data(ctx, sock, connected_at, on_accept_timeout, sizeof(struct st_h2o_accept_data_t));
return data;
}
static struct st_h2o_accept_data_t *create_redis_accept_data(h2o_accept_ctx_t *ctx, h2o_socket_t *sock, struct timeval connected_at)
{
struct st_h2o_redis_resumption_accept_data_t *data = (struct st_h2o_redis_resumption_accept_data_t *)create_accept_data(
ctx, sock, connected_at, on_redis_accept_timeout, sizeof(struct st_h2o_redis_resumption_accept_data_t));
data->get_command = NULL;
return &data->super;
}
static struct st_h2o_accept_data_t *create_memcached_accept_data(h2o_accept_ctx_t *ctx, h2o_socket_t *sock,
struct timeval connected_at)
{
struct st_h2o_memcached_resumption_accept_data_t *data = (struct st_h2o_memcached_resumption_accept_data_t *)create_accept_data(
ctx, sock, connected_at, on_memcached_accept_timeout, sizeof(struct st_h2o_memcached_resumption_accept_data_t));
data->get_req = NULL;
return &data->super;
}
static void destroy_accept_data(struct st_h2o_accept_data_t *data)
{
h2o_timer_unlink(&data->timeout);
free(data);
}
static void destroy_default_accept_data(struct st_h2o_accept_data_t *_accept_data)
{
destroy_accept_data(_accept_data);
}
static void destroy_redis_accept_data(struct st_h2o_accept_data_t *_accept_data)
{
struct st_h2o_redis_resumption_accept_data_t *accept_data = (struct st_h2o_redis_resumption_accept_data_t *)_accept_data;
assert(accept_data->get_command == NULL);
destroy_accept_data(&accept_data->super);
}
static void destroy_memcached_accept_data(struct st_h2o_accept_data_t *_accept_data)
{
struct st_h2o_memcached_resumption_accept_data_t *accept_data =
(struct st_h2o_memcached_resumption_accept_data_t *)_accept_data;
assert(accept_data->get_req == NULL);
destroy_accept_data(&accept_data->super);
}
static struct {
struct st_h2o_accept_data_t *(*create)(h2o_accept_ctx_t *ctx, h2o_socket_t *sock, struct timeval connected_at);
void (*destroy)(struct st_h2o_accept_data_t *accept_data);
} accept_data_callbacks = {
create_default_accept_data,
destroy_default_accept_data,
};
static void memcached_resumption_on_get(h2o_iovec_t session_data, void *_accept_data)
{
struct st_h2o_memcached_resumption_accept_data_t *accept_data = _accept_data;
accept_data->get_req = NULL;
h2o_socket_ssl_resume_server_handshake(accept_data->super.sock, session_data);
}
static void memcached_resumption_get(h2o_socket_t *sock, h2o_iovec_t session_id)
{
struct st_h2o_memcached_resumption_accept_data_t *data = sock->data;
data->get_req = h2o_memcached_get(async_resumption_context.memcached.ctx, data->super.ctx->libmemcached_receiver, session_id,
memcached_resumption_on_get, data, H2O_MEMCACHED_ENCODE_KEY | H2O_MEMCACHED_ENCODE_VALUE);
}
static void memcached_resumption_new(h2o_socket_t *sock, h2o_iovec_t session_id, h2o_iovec_t session_data)
{
h2o_memcached_set(async_resumption_context.memcached.ctx, session_id, session_data,
(uint32_t)time(NULL) + async_resumption_context.expiration,
H2O_MEMCACHED_ENCODE_KEY | H2O_MEMCACHED_ENCODE_VALUE);
}
void h2o_accept_setup_memcached_ssl_resumption(h2o_memcached_context_t *memc, unsigned expiration)
{
async_resumption_context.memcached.ctx = memc;
async_resumption_context.expiration = expiration;
h2o_socket_ssl_async_resumption_init(memcached_resumption_get, memcached_resumption_new);
accept_data_callbacks.create = create_memcached_accept_data;
accept_data_callbacks.destroy = destroy_memcached_accept_data;
}
static void on_redis_connect(void)
{
h2o_error_printf("connected to redis at %s:%" PRIu16 "\n", async_resumption_context.redis.host.base,
async_resumption_context.redis.port);
}
static void on_redis_close(const char *errstr)
{
if (errstr == NULL) {
h2o_error_printf("disconnected from redis at %s:%" PRIu16 "\n", async_resumption_context.redis.host.base,
async_resumption_context.redis.port);
} else {
h2o_error_printf("redis connection failure: %s\n", errstr);
}
}
static void dispose_redis_connection(void *client)
{
h2o_redis_free((h2o_redis_client_t *)client);
}
static h2o_redis_client_t *get_redis_client(h2o_context_t *ctx)
{
static size_t key = SIZE_MAX;
h2o_redis_client_t **client = (h2o_redis_client_t **)h2o_context_get_storage(ctx, &key, dispose_redis_connection);
if (*client == NULL) {
*client = h2o_redis_create_client(ctx->loop, sizeof(h2o_redis_client_t));
(*client)->on_connect = on_redis_connect;
(*client)->on_close = on_redis_close;
}
return *client;
}
#define BASE64_LENGTH(len) (((len) + 2) / 3 * 4 + 1)
static h2o_iovec_t build_redis_key(h2o_iovec_t session_id, h2o_iovec_t prefix)
{
h2o_iovec_t key;
key.base = h2o_mem_alloc(prefix.len + BASE64_LENGTH(session_id.len));
if (prefix.len != 0) {
memcpy(key.base, prefix.base, prefix.len);
}
key.len = prefix.len;
key.len += h2o_base64_encode(key.base + key.len, session_id.base, session_id.len, 1);
return key;
}
static h2o_iovec_t build_redis_value(h2o_iovec_t session_data)
{
h2o_iovec_t value;
value.base = h2o_mem_alloc(BASE64_LENGTH(session_data.len));
value.len = h2o_base64_encode(value.base, session_data.base, session_data.len, 1);
return value;
}
#undef BASE64_LENGTH
static void redis_resumption_on_get(redisReply *reply, void *_accept_data, const char *errstr)
{
struct st_h2o_redis_resumption_accept_data_t *accept_data = _accept_data;
accept_data->get_command = NULL;
h2o_iovec_t session_data;
if (reply != NULL && reply->type == REDIS_REPLY_STRING) {
session_data = h2o_decode_base64url(NULL, reply->str, reply->len);
} else {
session_data = h2o_iovec_init(NULL, 0);
}
h2o_socket_ssl_resume_server_handshake(accept_data->super.sock, session_data);
if (session_data.base != NULL)
free(session_data.base);
}
static void on_redis_resumption_get_failed(h2o_timer_t *timeout_entry)
{
struct st_h2o_redis_resumption_accept_data_t *accept_data =
H2O_STRUCT_FROM_MEMBER(struct st_h2o_redis_resumption_accept_data_t, super.timeout, timeout_entry);
accept_data->get_command = NULL;
h2o_socket_ssl_resume_server_handshake(accept_data->super.sock, h2o_iovec_init(NULL, 0));
h2o_timer_unlink(timeout_entry);
}
static void redis_resumption_get(h2o_socket_t *sock, h2o_iovec_t session_id)
{
struct st_h2o_redis_resumption_accept_data_t *accept_data = sock->data;
h2o_redis_client_t *client = get_redis_client(accept_data->super.ctx->ctx);
if (client->state == H2O_REDIS_CONNECTION_STATE_CONNECTED) {
h2o_iovec_t key = build_redis_key(session_id, async_resumption_context.redis.prefix);
accept_data->get_command = h2o_redis_command(client, redis_resumption_on_get, accept_data, "GET %s", key.base);
free(key.base);
} else {
if (client->state == H2O_REDIS_CONNECTION_STATE_CLOSED) {
// try to connect
h2o_redis_connect(client, async_resumption_context.redis.host.base, async_resumption_context.redis.port);
}
// abort resumption
h2o_timer_unlink(&accept_data->super.timeout);
accept_data->super.timeout.cb = on_redis_resumption_get_failed;
h2o_timer_link(accept_data->super.ctx->ctx->loop, 0, &accept_data->super.timeout);
}
}
static void redis_resumption_new(h2o_socket_t *sock, h2o_iovec_t session_id, h2o_iovec_t session_data)
{
struct st_h2o_redis_resumption_accept_data_t *accept_data = sock->data;
h2o_redis_client_t *client = get_redis_client(accept_data->super.ctx->ctx);
if (client->state == H2O_REDIS_CONNECTION_STATE_CLOSED) {
// try to connect
h2o_redis_connect(client, async_resumption_context.redis.host.base, async_resumption_context.redis.port);
}
h2o_iovec_t key = build_redis_key(session_id, async_resumption_context.redis.prefix);
h2o_iovec_t value = build_redis_value(session_data);
h2o_redis_command(client, NULL, NULL, "SETEX %s %d %s", key.base, async_resumption_context.expiration * 10, value.base);
free(key.base);
free(value.base);
}
void h2o_accept_setup_redis_ssl_resumption(const char *host, uint16_t port, unsigned expiration, const char *prefix)
{
async_resumption_context.redis.host = h2o_strdup(NULL, host, SIZE_MAX);
async_resumption_context.redis.port = port;
async_resumption_context.redis.prefix = h2o_strdup(NULL, prefix, SIZE_MAX);
async_resumption_context.expiration = expiration;
h2o_socket_ssl_async_resumption_init(redis_resumption_get, redis_resumption_new);
accept_data_callbacks.create = create_redis_accept_data;
accept_data_callbacks.destroy = destroy_redis_accept_data;
}
static void accept_timeout(struct st_h2o_accept_data_t *data)
{
/* TODO log */
h2o_socket_t *sock = data->sock;
accept_data_callbacks.destroy(data);
h2o_socket_close(sock);
}
static void on_accept_timeout(h2o_timer_t *entry)
{
struct st_h2o_accept_data_t *data = H2O_STRUCT_FROM_MEMBER(struct st_h2o_accept_data_t, timeout, entry);
accept_timeout(data);
}
static void on_redis_accept_timeout(h2o_timer_t *entry)
{
struct st_h2o_redis_resumption_accept_data_t *data =
H2O_STRUCT_FROM_MEMBER(struct st_h2o_redis_resumption_accept_data_t, super.timeout, entry);
if (data->get_command != NULL) {
data->get_command->cb = NULL;
data->get_command = NULL;
}
accept_timeout(&data->super);
}
static void on_memcached_accept_timeout(h2o_timer_t *entry)
{
struct st_h2o_memcached_resumption_accept_data_t *data =
H2O_STRUCT_FROM_MEMBER(struct st_h2o_memcached_resumption_accept_data_t, super.timeout, entry);
if (data->get_req != NULL) {
h2o_memcached_cancel_get(async_resumption_context.memcached.ctx, data->get_req);
data->get_req = NULL;
}
accept_timeout(&data->super);
}
static void on_ssl_handshake_complete(h2o_socket_t *sock, const char *err)
{
struct st_h2o_accept_data_t *data = sock->data;
sock->data = NULL;
if (err != NULL) {
++data->ctx->ctx->ssl.errors;
h2o_socket_close(sock);
goto Exit;
}
/* stats for handshake */
struct timeval handshake_completed_at = h2o_gettimeofday(data->ctx->ctx->loop);
int64_t handshake_time = h2o_timeval_subtract(&data->connected_at, &handshake_completed_at);
if (h2o_socket_get_ssl_session_reused(sock)) {
++data->ctx->ctx->ssl.handshake_resume;
data->ctx->ctx->ssl.handshake_accum_time_resume += handshake_time;
} else {
++data->ctx->ctx->ssl.handshake_full;
data->ctx->ctx->ssl.handshake_accum_time_full += handshake_time;
}
h2o_iovec_t proto = h2o_socket_ssl_get_selected_protocol(sock);
const h2o_iovec_t *ident;
for (ident = h2o_http2_alpn_protocols; ident->len != 0; ++ident) {
if (proto.len == ident->len && memcmp(proto.base, ident->base, proto.len) == 0) {
/* connect as http2 */
++data->ctx->ctx->ssl.alpn_h2;
h2o_http2_accept(data->ctx, sock, data->connected_at);
goto Exit;
}
}
/* connect as http1 */
if (proto.len != 0)
++data->ctx->ctx->ssl.alpn_h1;
h2o_http1_accept(data->ctx, sock, data->connected_at);
Exit:
accept_data_callbacks.destroy(data);
}
static ssize_t parse_proxy_line(char *src, size_t len, struct sockaddr *sa, socklen_t *salen)
{
#define CHECK_EOF() \
if (p == end) \
return -2
#define EXPECT_CHAR(ch) \
do { \
CHECK_EOF(); \
if (*p++ != ch) \
return -1; \
} while (0)
#define SKIP_TO_WS() \
do { \
do { \
CHECK_EOF(); \
} while (*p++ != ' '); \
--p; \
} while (0)
char *p = src, *end = p + len;
void *addr;
in_port_t *port;
/* "PROXY "*/
EXPECT_CHAR('P');
EXPECT_CHAR('R');
EXPECT_CHAR('O');
EXPECT_CHAR('X');
EXPECT_CHAR('Y');
EXPECT_CHAR(' ');
/* "TCP[46] " */
CHECK_EOF();
if (*p++ != 'T') {
*salen = 0; /* indicate that no data has been obtained */
goto SkipToEOL;
}
EXPECT_CHAR('C');
EXPECT_CHAR('P');
CHECK_EOF();
switch (*p++) {
case '4':
*salen = sizeof(struct sockaddr_in);
memset(sa, 0, sizeof(struct sockaddr_in));
sa->sa_family = AF_INET;
addr = &((struct sockaddr_in *)sa)->sin_addr;
port = &((struct sockaddr_in *)sa)->sin_port;
break;
case '6':
*salen = sizeof(struct sockaddr_in6);
memset(sa, 0, sizeof(struct sockaddr_in6));
sa->sa_family = AF_INET6;
addr = &((struct sockaddr_in6 *)sa)->sin6_addr;
port = &((struct sockaddr_in6 *)sa)->sin6_port;
break;
default:
return -1;
}
EXPECT_CHAR(' ');
/* parse peer address */
char *addr_start = p;
SKIP_TO_WS();
*p = '\0';
if (inet_pton(sa->sa_family, addr_start, addr) != 1)
return -1;
*p++ = ' ';
/* skip local address */
SKIP_TO_WS();
++p;
/* parse peer port */
char *port_start = p;
SKIP_TO_WS();
*p = '\0';
unsigned short usval;
if (sscanf(port_start, "%hu", &usval) != 1)
return -1;
*port = htons(usval);
*p++ = ' ';
SkipToEOL:
do {
CHECK_EOF();
} while (*p++ != '\r');
CHECK_EOF();
if (*p++ != '\n')
return -2;
return p - src;
#undef CHECK_EOF
#undef EXPECT_CHAR
#undef SKIP_TO_WS
}
static void on_read_proxy_line(h2o_socket_t *sock, const char *err)
{
struct st_h2o_accept_data_t *data = sock->data;
if (err != NULL) {
accept_data_callbacks.destroy(data);
h2o_socket_close(sock);
return;
}
struct sockaddr_storage addr;
socklen_t addrlen;
ssize_t r = parse_proxy_line(sock->input->bytes, sock->input->size, (void *)&addr, &addrlen);
switch (r) {
case -1: /* error, just pass the input to the next handler */
break;
case -2: /* incomplete */
return;
default:
h2o_buffer_consume(&sock->input, r);
if (addrlen != 0)
h2o_socket_setpeername(sock, (void *)&addr, addrlen);
break;
}
if (data->ctx->ssl_ctx != NULL) {
h2o_socket_ssl_handshake(sock, data->ctx->ssl_ctx, NULL, h2o_iovec_init(NULL, 0), on_ssl_handshake_complete);
} else {
struct st_h2o_accept_data_t *data = sock->data;
sock->data = NULL;
h2o_http1_accept(data->ctx, sock, data->connected_at);
accept_data_callbacks.destroy(data);
}
}
void h2o_accept(h2o_accept_ctx_t *ctx, h2o_socket_t *sock)
{
struct timeval connected_at = h2o_gettimeofday(ctx->ctx->loop);
if (ctx->expect_proxy_line || ctx->ssl_ctx != NULL) {
sock->data = accept_data_callbacks.create(ctx, sock, connected_at);
if (ctx->expect_proxy_line) {
h2o_socket_read_start(sock, on_read_proxy_line);
} else {
h2o_socket_ssl_handshake(sock, ctx->ssl_ctx, NULL, h2o_iovec_init(NULL, 0), on_ssl_handshake_complete);
}
} else {
h2o_http1_accept(ctx, sock, connected_at);
}
}
size_t h2o_stringify_protocol_version(char *dst, int version)
{
char *p = dst;
if (version < 0x200) {
assert(version <= 0x109);
#define PREFIX "HTTP/1."
memcpy(p, PREFIX, sizeof(PREFIX) - 1);
p += sizeof(PREFIX) - 1;
#undef PREFIX
*p++ = '0' + (version & 0xff);
} else {
#define PREFIX "HTTP/"
memcpy(p, PREFIX, sizeof(PREFIX) - 1);
p += sizeof(PREFIX) - 1;
#undef PREFIX
*p++ = (version >> 8) + '0';
}
*p = '\0';
return p - dst;
}
size_t h2o_stringify_proxy_header(h2o_conn_t *conn, char *buf)
{
struct sockaddr_storage ss;
socklen_t sslen;
size_t strlen;
uint16_t peerport;
char *dst = buf;
if ((sslen = conn->callbacks->get_peername(conn, (void *)&ss)) == 0)
goto Unknown;
switch (ss.ss_family) {
case AF_INET:
memcpy(dst, "PROXY TCP4 ", 11);
dst += 11;
break;
case AF_INET6:
memcpy(dst, "PROXY TCP6 ", 11);
dst += 11;
break;
default:
goto Unknown;
}
if ((strlen = h2o_socket_getnumerichost((void *)&ss, sslen, dst)) == SIZE_MAX)
goto Unknown;
dst += strlen;
*dst++ = ' ';
peerport = h2o_socket_getport((void *)&ss);
if ((sslen = conn->callbacks->get_sockname(conn, (void *)&ss)) == 0)
goto Unknown;
if ((strlen = h2o_socket_getnumerichost((void *)&ss, sslen, dst)) == SIZE_MAX)
goto Unknown;
dst += strlen;
*dst++ = ' ';
dst += sprintf(dst, "%" PRIu16 " %" PRIu16 "\r\n", peerport, (uint16_t)h2o_socket_getport((void *)&ss));
return dst - buf;
Unknown:
memcpy(buf, "PROXY UNKNOWN\r\n", 15);
return 15;
}
static h2o_iovec_t to_push_path(h2o_mem_pool_t *pool, h2o_iovec_t url, h2o_iovec_t base_path, const h2o_url_scheme_t *input_scheme,
h2o_iovec_t input_authority, const h2o_url_scheme_t *base_scheme, h2o_iovec_t *base_authority,
int allow_cross_origin_push)
{
h2o_url_t parsed, resolved;
/* check the authority, and extract absolute path */
if (h2o_url_parse_relative(url.base, url.len, &parsed) != 0)
goto Invalid;
/* fast-path for abspath form */
if (base_scheme == NULL && parsed.scheme == NULL && parsed.authority.base == NULL && url.len != 0 && url.base[0] == '/') {
return h2o_strdup(pool, url.base, url.len);
}
/* check scheme and authority if given URL contains either of the two, or if base is specified */
h2o_url_t base = {input_scheme, input_authority, {NULL}, base_path, 65535};
if (base_scheme != NULL) {
base.scheme = base_scheme;
base.authority = *base_authority;
}
h2o_url_resolve(pool, &base, &parsed, &resolved);
if (input_scheme != resolved.scheme)
goto Invalid;
if (!allow_cross_origin_push &&
!h2o_lcstris(input_authority.base, input_authority.len, resolved.authority.base, resolved.authority.len))
goto Invalid;
return resolved.path;
Invalid:
return h2o_iovec_init(NULL, 0);
}
void h2o_extract_push_path_from_link_header(h2o_mem_pool_t *pool, const char *value, size_t value_len, h2o_iovec_t base_path,
const h2o_url_scheme_t *input_scheme, h2o_iovec_t input_authority,
const h2o_url_scheme_t *base_scheme, h2o_iovec_t *base_authority,
void (*cb)(void *ctx, const char *path, size_t path_len, int is_critical), void *cb_ctx,
h2o_iovec_t *filtered_value, int allow_cross_origin_push)
{
h2o_iovec_t iter = h2o_iovec_init(value, value_len), token_value;
const char *token;
size_t token_len;
*filtered_value = h2o_iovec_init(NULL, 0);
#define PUSH_FILTERED_VALUE(s, e) \
do { \
if (filtered_value->len != 0) { \
memcpy(filtered_value->base + filtered_value->len, ", ", 2); \
filtered_value->len += 2; \
} \
memcpy(filtered_value->base + filtered_value->len, (s), (e) - (s)); \
filtered_value->len += (e) - (s); \
} while (0)
/* extract URL values from Link: </pushed.css>; rel=preload */
do {
if ((token = h2o_next_token(&iter, ';', &token_len, NULL)) == NULL)
break;
/* first element should be <URL> */
if (!(token_len >= 2 && token[0] == '<' && token[token_len - 1] == '>'))
break;
h2o_iovec_t url_with_brackets = h2o_iovec_init(token, token_len);
/* find rel=preload */
int preload = 0, nopush = 0, push_only = 0, critical = 0;
while ((token = h2o_next_token(&iter, ';', &token_len, &token_value)) != NULL &&
!h2o_memis(token, token_len, H2O_STRLIT(","))) {
if (h2o_lcstris(token, token_len, H2O_STRLIT("rel")) &&
h2o_lcstris(token_value.base, token_value.len, H2O_STRLIT("preload"))) {
preload = 1;
} else if (h2o_lcstris(token, token_len, H2O_STRLIT("nopush"))) {
nopush = 1;
} else if (h2o_lcstris(token, token_len, H2O_STRLIT("x-http2-push-only"))) {
push_only = 1;
} else if (h2o_lcstris(token, token_len, H2O_STRLIT("critical"))) {
critical = 1;
}
}
/* push the path */
if (!nopush && preload) {
h2o_iovec_t path = to_push_path(pool, h2o_iovec_init(url_with_brackets.base + 1, url_with_brackets.len - 2), base_path,
input_scheme, input_authority, base_scheme, base_authority, allow_cross_origin_push);
if (path.len != 0)
(*cb)(cb_ctx, path.base, path.len, critical);
}
/* store the elements that needs to be preserved to filtered_value */
if (push_only) {
if (filtered_value->base == NULL) {
/* the max. size of filtered_value would be x2 in the worst case, when "," is converted to ", " */
filtered_value->base = h2o_mem_alloc_pool(pool, char, value_len * 2);
const char *prev_comma = h2o_memrchr(value, ',', url_with_brackets.base - value);
if (prev_comma != NULL)
PUSH_FILTERED_VALUE(value, prev_comma);
}
} else if (filtered_value->base != NULL) {
PUSH_FILTERED_VALUE(url_with_brackets.base, token != NULL ? token : value + value_len);
}
} while (token != NULL);
if (filtered_value->base != NULL) {
if (token != NULL)
PUSH_FILTERED_VALUE(token, value + value_len);
} else {
*filtered_value = h2o_iovec_init(value, value_len);
}
#undef PUSH_FILTERED_VALUE
}
int h2o_get_compressible_types(const h2o_headers_t *headers)
{
size_t header_index;
int compressible_types = 0;
for (header_index = 0; header_index != headers->size; ++header_index) {
const h2o_header_t *header = headers->entries + header_index;
if (H2O_UNLIKELY(header->name == &H2O_TOKEN_ACCEPT_ENCODING->buf)) {
h2o_iovec_t iter = h2o_iovec_init(header->value.base, header->value.len);
const char *token = NULL;
size_t token_len = 0;
while ((token = h2o_next_token(&iter, ',', &token_len, NULL)) != NULL) {
if (h2o_lcstris(token, token_len, H2O_STRLIT("gzip")))
compressible_types |= H2O_COMPRESSIBLE_GZIP;
else if (h2o_lcstris(token, token_len, H2O_STRLIT("br")))
compressible_types |= H2O_COMPRESSIBLE_BROTLI;
}
}
}
return compressible_types;
}
h2o_iovec_t h2o_build_destination(h2o_req_t *req, const char *prefix, size_t prefix_len, int use_path_normalized)
{
h2o_iovec_t parts[4];
size_t num_parts = 0;
int conf_ends_with_slash = req->pathconf->path.base[req->pathconf->path.len - 1] == '/', prefix_ends_with_slash;
/* destination starts with given prefix, if any */
if (prefix_len != 0) {
parts[num_parts++] = h2o_iovec_init(prefix, prefix_len);
prefix_ends_with_slash = prefix[prefix_len - 1] == '/';
} else {
prefix_ends_with_slash = 0;
}
/* make adjustments depending on the trailing slashes */
if (conf_ends_with_slash != prefix_ends_with_slash) {
if (conf_ends_with_slash) {
parts[num_parts++] = h2o_iovec_init(H2O_STRLIT("/"));
} else {
if (req->path_normalized.len != req->pathconf->path.len)
parts[num_parts - 1].len -= 1;
}
}
/* append suffix path and query */
if (use_path_normalized) {
parts[num_parts++] = h2o_uri_escape(&req->pool, req->path_normalized.base + req->pathconf->path.len,
req->path_normalized.len - req->pathconf->path.len, "/@:");
if (req->query_at != SIZE_MAX) {
parts[num_parts++] = h2o_iovec_init(req->path.base + req->query_at, req->path.len - req->query_at);
}
} else {
if (req->path.len > 1) {
/*
* When proxying, we want to modify the input URL as little
* as possible. We use norm_indexes to find the start of
* the path we want to forward.
*/
size_t next_unnormalized;
if (req->norm_indexes && req->pathconf->path.len > 1) {
next_unnormalized = req->norm_indexes[req->pathconf->path.len - 1];
} else {
next_unnormalized = req->pathconf->path.len;
}
/*
* Special case: the input path didn't have any '/' including the first,
* so the first character is actually found at '0'
*/
if (req->path.base[0] != '/' && next_unnormalized == 1) {
next_unnormalized = 0;
}
parts[num_parts++] = (h2o_iovec_t){req->path.base + next_unnormalized, req->path.len - next_unnormalized};
}
}
return h2o_concat_list(&req->pool, parts, num_parts);
}
#define SERVER_TIMING_DURATION_LONGEST_STR "dur=" H2O_INT32_LONGEST_STR ".000"
size_t stringify_duration(char *buf, int64_t usec)
{
int32_t msec = (int32_t)(usec / 1000);
usec -= ((int64_t)msec * 1000);
char *pos = buf;
pos += sprintf(pos, "dur=%" PRId32, msec);
if (usec != 0) {
*pos++ = '.';
int denom;
for (denom = 100; denom != 0; denom /= 10) {
int d = (int)usec / denom;
*pos++ = '0' + d;
usec -= d * denom;
if (usec == 0)
break;
}
}
return pos - buf;
}
#define DELIMITER ", "
#define ELEMENT_LONGEST_STR(name) name "; " SERVER_TIMING_DURATION_LONGEST_STR
static void emit_server_timing_element(h2o_req_t *req, h2o_iovec_t *dst, const char *name,
int (*compute_func)(h2o_req_t *, int64_t *), size_t max_len)
{
int64_t usec;
if (compute_func(req, &usec) == 0)
return;
if (dst->len == 0) {
if (max_len != SIZE_MAX)
dst->base = h2o_mem_alloc_pool(&req->pool, *dst->base, max_len);
} else {
dst->base[dst->len++] = ',';
dst->base[dst->len++] = ' ';
}
size_t name_len = strlen(name);
memcpy(dst->base + dst->len, name, name_len);
dst->len += name_len;
dst->base[dst->len++] = ';';
dst->base[dst->len++] = ' ';
dst->len += stringify_duration(dst->base + dst->len, usec);
}
void h2o_add_server_timing_header(h2o_req_t *req, int uses_trailer)
{
/* caller needs to make sure that trailers can be used */
if (0x101 <= req->version && req->version < 0x200)
assert(req->content_length == SIZE_MAX);
/* emit timings */
h2o_iovec_t dst = {NULL};
#define LONGEST_STR \
ELEMENT_LONGEST_STR("connect") \
DELIMITER ELEMENT_LONGEST_STR("request-header") DELIMITER ELEMENT_LONGEST_STR("request-body") \
DELIMITER ELEMENT_LONGEST_STR("request-total") DELIMITER ELEMENT_LONGEST_STR("process") \
DELIMITER ELEMENT_LONGEST_STR("proxy.idle") DELIMITER ELEMENT_LONGEST_STR("proxy.connect") \
DELIMITER ELEMENT_LONGEST_STR("proxy.request") DELIMITER ELEMENT_LONGEST_STR("proxy.process")
size_t max_len = sizeof(LONGEST_STR) - 1;
if ((req->send_server_timing & H2O_SEND_SERVER_TIMING_BASIC) != 0) {
emit_server_timing_element(req, &dst, "connect", h2o_time_compute_connect_time, max_len);
emit_server_timing_element(req, &dst, "request-header", h2o_time_compute_header_time, max_len);
emit_server_timing_element(req, &dst, "request-body", h2o_time_compute_body_time, max_len);
emit_server_timing_element(req, &dst, "request-total", h2o_time_compute_request_total_time, max_len);
emit_server_timing_element(req, &dst, "process", h2o_time_compute_process_time, max_len);
}
if ((req->send_server_timing & H2O_SEND_SERVER_TIMING_PROXY) != 0) {
emit_server_timing_element(req, &dst, "proxy.idle", h2o_time_compute_proxy_idle_time, max_len);
emit_server_timing_element(req, &dst, "proxy.connect", h2o_time_compute_proxy_connect_time, max_len);
emit_server_timing_element(req, &dst, "proxy.request", h2o_time_compute_proxy_request_time, max_len);
emit_server_timing_element(req, &dst, "proxy.process", h2o_time_compute_proxy_process_time, max_len);
}
#undef LONGEST_STR
if (uses_trailer)
h2o_add_header_by_str(&req->pool, &req->res.headers, H2O_STRLIT("trailer"), 0, NULL, H2O_STRLIT("server-timing"));
if (dst.len != 0)
h2o_add_header_by_str(&req->pool, &req->res.headers, H2O_STRLIT("server-timing"), 0, NULL, dst.base, dst.len);
}
h2o_iovec_t h2o_build_server_timing_trailer(h2o_req_t *req, const char *prefix, size_t prefix_len, const char *suffix,
size_t suffix_len)
{
h2o_iovec_t value;
#define LONGEST_STR \
ELEMENT_LONGEST_STR("response") \
DELIMITER ELEMENT_LONGEST_STR("total") DELIMITER ELEMENT_LONGEST_STR("proxy.response") \
DELIMITER ELEMENT_LONGEST_STR("proxy.total")
value.base = h2o_mem_alloc_pool(&req->pool, *value.base, prefix_len + suffix_len + sizeof(LONGEST_STR) - 1);
value.len = 0;
if (prefix_len != 0) {
memcpy(value.base + value.len, prefix, prefix_len);
value.len += prefix_len;
}
h2o_iovec_t dst = h2o_iovec_init(value.base + value.len, 0);
if ((req->send_server_timing & H2O_SEND_SERVER_TIMING_BASIC) != 0) {
emit_server_timing_element(req, &dst, "response", h2o_time_compute_response_time, SIZE_MAX);
emit_server_timing_element(req, &dst, "total", h2o_time_compute_total_time, SIZE_MAX);
}
if ((req->send_server_timing & H2O_SEND_SERVER_TIMING_PROXY) != 0) {
emit_server_timing_element(req, &dst, "proxy.response", h2o_time_compute_proxy_response_time, SIZE_MAX);
emit_server_timing_element(req, &dst, "proxy.total", h2o_time_compute_proxy_total_time, SIZE_MAX);
}
if (dst.len == 0)
return h2o_iovec_init(NULL, 0);
value.len += dst.len;
if (suffix_len != 0) {
memcpy(value.base + value.len, suffix, suffix_len);
value.len += suffix_len;
}
return value;
#undef LONGEST_STR
}
#undef ELEMENT_LONGEST_STR
#undef DELIMITER
/* h2-14 and h2-16 are kept for backwards compatibility, as they are often used */
#define ALPN_ENTRY(s) \
{ \
H2O_STRLIT(s) \
}
#define ALPN_PROTOCOLS_CORE ALPN_ENTRY("h2"), ALPN_ENTRY("h2-16"), ALPN_ENTRY("h2-14")
#define NPN_PROTOCOLS_CORE \
"\x02" \
"h2" \
"\x05" \
"h2-16" \
"\x05" \
"h2-14"
const h2o_iovec_t h2o_http2_alpn_protocols[] = {ALPN_PROTOCOLS_CORE, {NULL}};
const h2o_iovec_t h2o_alpn_protocols[] = {ALPN_PROTOCOLS_CORE, ALPN_ENTRY("http/1.1"), {NULL}};
const char h2o_http2_npn_protocols[] = NPN_PROTOCOLS_CORE;
const char h2o_npn_protocols[] = NPN_PROTOCOLS_CORE "\x08"
"http/1.1";
uint64_t h2o_connection_id = 0;
void h2o_cleanup_thread(void)
{
h2o_mem_clear_recycle(&h2o_mem_pool_allocator);
h2o_mem_clear_recycle(&h2o_http2_wbuf_buffer_prototype.allocator);
h2o_mem_clear_recycle(&h2o_socket_buffer_prototype.allocator);
}
| 1 | 13,876 | Could you please move the declaration to httpclient.h? I think we have a consistent pattern in declaring global variables in header files. | h2o-h2o | c |
@@ -61,11 +61,12 @@ public interface DataFile extends ContentFile<DataFile> {
"Splittable offsets");
Types.NestedField EQUALITY_IDS = optional(135, "equality_ids", ListType.ofRequired(136, IntegerType.get()),
"Equality comparison field IDs");
+ Types.NestedField SORT_ORDER_ID = optional(140, "sort_order_id", IntegerType.get(), "Sort order ID");
int PARTITION_ID = 102;
String PARTITION_NAME = "partition";
String PARTITION_DOC = "Partition data tuple, schema based on the partition spec";
- // NEXT ID TO ASSIGN: 140
+ // NEXT ID TO ASSIGN: 141
static StructType getType(StructType partitionType) {
// IDs start at 100 to leave room for changes to ManifestEntry | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
import java.util.List;
import org.apache.iceberg.types.Types;
import org.apache.iceberg.types.Types.BinaryType;
import org.apache.iceberg.types.Types.IntegerType;
import org.apache.iceberg.types.Types.ListType;
import org.apache.iceberg.types.Types.LongType;
import org.apache.iceberg.types.Types.MapType;
import org.apache.iceberg.types.Types.StringType;
import org.apache.iceberg.types.Types.StructType;
import static org.apache.iceberg.types.Types.NestedField.optional;
import static org.apache.iceberg.types.Types.NestedField.required;
/**
* Interface for data files listed in a table manifest.
*/
public interface DataFile extends ContentFile<DataFile> {
// fields for adding delete data files
Types.NestedField CONTENT = optional(134, "content", IntegerType.get(),
"Contents of the file: 0=data, 1=position deletes, 2=equality deletes");
Types.NestedField FILE_PATH = required(100, "file_path", StringType.get(), "Location URI with FS scheme");
Types.NestedField FILE_FORMAT = required(101, "file_format", StringType.get(),
"File format name: avro, orc, or parquet");
Types.NestedField RECORD_COUNT = required(103, "record_count", LongType.get(), "Number of records in the file");
Types.NestedField FILE_SIZE = required(104, "file_size_in_bytes", LongType.get(), "Total file size in bytes");
Types.NestedField COLUMN_SIZES = optional(108, "column_sizes", MapType.ofRequired(117, 118,
IntegerType.get(), LongType.get()), "Map of column id to total size on disk");
Types.NestedField VALUE_COUNTS = optional(109, "value_counts", MapType.ofRequired(119, 120,
IntegerType.get(), LongType.get()), "Map of column id to total count, including null and NaN");
Types.NestedField NULL_VALUE_COUNTS = optional(110, "null_value_counts", MapType.ofRequired(121, 122,
IntegerType.get(), LongType.get()), "Map of column id to null value count");
Types.NestedField NAN_VALUE_COUNTS = optional(137, "nan_value_counts", MapType.ofRequired(138, 139,
IntegerType.get(), LongType.get()), "Map of column id to number of NaN values in the column");
Types.NestedField LOWER_BOUNDS = optional(125, "lower_bounds", MapType.ofRequired(126, 127,
IntegerType.get(), BinaryType.get()), "Map of column id to lower bound");
Types.NestedField UPPER_BOUNDS = optional(128, "upper_bounds", MapType.ofRequired(129, 130,
IntegerType.get(), BinaryType.get()), "Map of column id to upper bound");
Types.NestedField KEY_METADATA = optional(131, "key_metadata", BinaryType.get(), "Encryption key metadata blob");
Types.NestedField SPLIT_OFFSETS = optional(132, "split_offsets", ListType.ofRequired(133, LongType.get()),
"Splittable offsets");
Types.NestedField EQUALITY_IDS = optional(135, "equality_ids", ListType.ofRequired(136, IntegerType.get()),
"Equality comparison field IDs");
int PARTITION_ID = 102;
String PARTITION_NAME = "partition";
String PARTITION_DOC = "Partition data tuple, schema based on the partition spec";
// NEXT ID TO ASSIGN: 140
static StructType getType(StructType partitionType) {
// IDs start at 100 to leave room for changes to ManifestEntry
return StructType.of(
CONTENT,
FILE_PATH,
FILE_FORMAT,
required(PARTITION_ID, PARTITION_NAME, partitionType, PARTITION_DOC),
RECORD_COUNT,
FILE_SIZE,
COLUMN_SIZES,
VALUE_COUNTS,
NULL_VALUE_COUNTS,
NAN_VALUE_COUNTS,
LOWER_BOUNDS,
UPPER_BOUNDS,
KEY_METADATA,
SPLIT_OFFSETS,
EQUALITY_IDS
);
}
/**
* @return the content stored in the file; one of DATA, POSITION_DELETES, or EQUALITY_DELETES
*/
@Override
default FileContent content() {
return FileContent.DATA;
}
@Override
default List<Integer> equalityFieldIds() {
return null;
}
}
| 1 | 30,844 | We will also need to add this to the spec. | apache-iceberg | java |
@@ -68,15 +68,8 @@ func makeRoundCowState(b roundCowParent, hdr bookkeeping.BlockHeader, prevTimest
lookupParent: b,
commitParent: nil,
proto: config.Consensus[hdr.CurrentProtocol],
- mods: ledgercore.StateDelta{
- Accts: make(map[basics.Address]basics.AccountData),
- Txids: make(map[transactions.Txid]basics.Round),
- Txleases: make(map[ledgercore.Txlease]basics.Round),
- Creatables: make(map[basics.CreatableIndex]ledgercore.ModifiedCreatable),
- Hdr: &hdr,
- PrevTimestamp: prevTimestamp,
- },
- sdeltas: make(map[basics.Address]map[storagePtr]*storageDelta),
+ mods: ledgercore.MakeStateDelta(&hdr, prevTimestamp),
+ sdeltas: make(map[basics.Address]map[storagePtr]*storageDelta),
}
}
| 1 | // Copyright (C) 2019-2021 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
package ledger
import (
"fmt"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/ledger/ledgercore"
)
// ___________________
// < cow = Copy On Write >
// -------------------
// \ ^__^
// \ (oo)\_______
// (__)\ )\/\
// ||----w |
// || ||
type roundCowParent interface {
lookup(basics.Address) (basics.AccountData, error)
checkDup(basics.Round, basics.Round, transactions.Txid, ledgercore.Txlease) error
txnCounter() uint64
getCreator(cidx basics.CreatableIndex, ctype basics.CreatableType) (basics.Address, bool, error)
compactCertNext() basics.Round
blockHdr(rnd basics.Round) (bookkeeping.BlockHeader, error)
getStorageCounts(addr basics.Address, aidx basics.AppIndex, global bool) (basics.StateSchema, error)
// note: getStorageLimits is redundant with the other methods
// and is provided to optimize state schema lookups
getStorageLimits(addr basics.Address, aidx basics.AppIndex, global bool) (basics.StateSchema, error)
allocated(addr basics.Address, aidx basics.AppIndex, global bool) (bool, error)
getKey(addr basics.Address, aidx basics.AppIndex, global bool, key string) (basics.TealValue, bool, error)
}
type roundCowState struct {
lookupParent roundCowParent
commitParent *roundCowState
proto config.ConsensusParams
mods ledgercore.StateDelta
// storage deltas populated as side effects of AppCall transaction
// 1. Opt-in/Close actions (see Allocate/Deallocate)
// 2. Stateful TEAL evaluation (see SetKey/DelKey)
// must be incorporated into mods.accts before passing deltas forward
sdeltas map[basics.Address]map[storagePtr]*storageDelta
}
func makeRoundCowState(b roundCowParent, hdr bookkeeping.BlockHeader, prevTimestamp int64) *roundCowState {
return &roundCowState{
lookupParent: b,
commitParent: nil,
proto: config.Consensus[hdr.CurrentProtocol],
mods: ledgercore.StateDelta{
Accts: make(map[basics.Address]basics.AccountData),
Txids: make(map[transactions.Txid]basics.Round),
Txleases: make(map[ledgercore.Txlease]basics.Round),
Creatables: make(map[basics.CreatableIndex]ledgercore.ModifiedCreatable),
Hdr: &hdr,
PrevTimestamp: prevTimestamp,
},
sdeltas: make(map[basics.Address]map[storagePtr]*storageDelta),
}
}
func (cb *roundCowState) deltas() ledgercore.StateDelta {
var err error
if len(cb.sdeltas) == 0 {
return cb.mods
}
// Apply storage deltas to account deltas
// 1. Ensure all addresses from sdeltas have entries in accts because
// SetKey/DelKey work only with sdeltas, so need to pull missing accounts
// 2. Call applyStorageDelta for every delta per account
for addr, smap := range cb.sdeltas {
var delta basics.AccountData
var exist bool
if delta, exist = cb.mods.Accts[addr]; !exist {
ad, err := cb.lookup(addr)
if err != nil {
panic(fmt.Sprintf("fetching account data failed for addr %s: %s", addr.String(), err.Error()))
}
delta = ad
}
for aapp, storeDelta := range smap {
if delta, err = applyStorageDelta(delta, aapp, storeDelta); err != nil {
panic(fmt.Sprintf("applying storage delta failed for addr %s app %d: %s", addr.String(), aapp.aidx, err.Error()))
}
}
cb.mods.Accts[addr] = delta
}
return cb.mods
}
func (cb *roundCowState) rewardsLevel() uint64 {
return cb.mods.Hdr.RewardsLevel
}
func (cb *roundCowState) round() basics.Round {
return cb.mods.Hdr.Round
}
func (cb *roundCowState) prevTimestamp() int64 {
return cb.mods.PrevTimestamp
}
func (cb *roundCowState) getCreator(cidx basics.CreatableIndex, ctype basics.CreatableType) (creator basics.Address, ok bool, err error) {
delta, ok := cb.mods.Creatables[cidx]
if ok {
if delta.Created && delta.Ctype == ctype {
return delta.Creator, true, nil
}
return basics.Address{}, false, nil
}
return cb.lookupParent.getCreator(cidx, ctype)
}
func (cb *roundCowState) lookup(addr basics.Address) (data basics.AccountData, err error) {
d, ok := cb.mods.Accts[addr]
if ok {
return d, nil
}
return cb.lookupParent.lookup(addr)
}
func (cb *roundCowState) checkDup(firstValid, lastValid basics.Round, txid transactions.Txid, txl ledgercore.Txlease) error {
_, present := cb.mods.Txids[txid]
if present {
return &ledgercore.TransactionInLedgerError{Txid: txid}
}
if cb.proto.SupportTransactionLeases && (txl.Lease != [32]byte{}) {
expires, ok := cb.mods.Txleases[txl]
if ok && cb.mods.Hdr.Round <= expires {
return ledgercore.MakeLeaseInLedgerError(txid, txl)
}
}
return cb.lookupParent.checkDup(firstValid, lastValid, txid, txl)
}
func (cb *roundCowState) txnCounter() uint64 {
return cb.lookupParent.txnCounter() + uint64(len(cb.mods.Txids))
}
func (cb *roundCowState) compactCertNext() basics.Round {
if cb.mods.CompactCertNext != 0 {
return cb.mods.CompactCertNext
}
return cb.lookupParent.compactCertNext()
}
func (cb *roundCowState) blockHdr(r basics.Round) (bookkeeping.BlockHeader, error) {
return cb.lookupParent.blockHdr(r)
}
func (cb *roundCowState) put(addr basics.Address, new basics.AccountData, newCreatable *basics.CreatableLocator, deletedCreatable *basics.CreatableLocator) {
cb.mods.Accts[addr] = new
if newCreatable != nil {
cb.mods.Creatables[newCreatable.Index] = ledgercore.ModifiedCreatable{
Ctype: newCreatable.Type,
Creator: newCreatable.Creator,
Created: true,
}
}
if deletedCreatable != nil {
cb.mods.Creatables[deletedCreatable.Index] = ledgercore.ModifiedCreatable{
Ctype: deletedCreatable.Type,
Creator: deletedCreatable.Creator,
Created: false,
}
}
}
func (cb *roundCowState) addTx(txn transactions.Transaction, txid transactions.Txid) {
cb.mods.Txids[txid] = txn.LastValid
cb.mods.Txleases[ledgercore.Txlease{Sender: txn.Sender, Lease: txn.Lease}] = txn.LastValid
}
func (cb *roundCowState) setCompactCertNext(rnd basics.Round) {
cb.mods.CompactCertNext = rnd
}
func (cb *roundCowState) child() *roundCowState {
return &roundCowState{
lookupParent: cb,
commitParent: cb,
proto: cb.proto,
mods: ledgercore.StateDelta{
Accts: make(map[basics.Address]basics.AccountData),
Txids: make(map[transactions.Txid]basics.Round),
Txleases: make(map[ledgercore.Txlease]basics.Round),
Creatables: make(map[basics.CreatableIndex]ledgercore.ModifiedCreatable),
Hdr: cb.mods.Hdr,
PrevTimestamp: cb.mods.PrevTimestamp,
},
sdeltas: make(map[basics.Address]map[storagePtr]*storageDelta),
}
}
func (cb *roundCowState) commitToParent() {
for addr, delta := range cb.mods.Accts {
cb.commitParent.mods.Accts[addr] = delta
}
for txid, lv := range cb.mods.Txids {
cb.commitParent.mods.Txids[txid] = lv
}
for txl, expires := range cb.mods.Txleases {
cb.commitParent.mods.Txleases[txl] = expires
}
for cidx, delta := range cb.mods.Creatables {
cb.commitParent.mods.Creatables[cidx] = delta
}
for addr, smod := range cb.sdeltas {
for aapp, nsd := range smod {
lsd, ok := cb.commitParent.sdeltas[addr][aapp]
if ok {
lsd.applyChild(nsd)
} else {
_, ok = cb.commitParent.sdeltas[addr]
if !ok {
cb.commitParent.sdeltas[addr] = make(map[storagePtr]*storageDelta)
}
cb.commitParent.sdeltas[addr][aapp] = nsd
}
}
}
cb.commitParent.mods.CompactCertNext = cb.mods.CompactCertNext
}
func (cb *roundCowState) modifiedAccounts() []basics.Address {
res := make([]basics.Address, len(cb.mods.Accts))
i := 0
for addr := range cb.mods.Accts {
res[i] = addr
i++
}
return res
}
| 1 | 41,465 | In startEvaluator, we have the paysetHint; we should use it to preinitialize the state delta allocation. The number of transactions and the deltas are typically growing in a linear fashion, so preallocating twice as much in the array initializer would be a good idea. | algorand-go-algorand | go |
@@ -44,6 +44,10 @@ import java.util.List;
* </code>
*/
public abstract class By {
+ static {
+ WebDriverException.scheduleIpHostResolving();
+ }
+
/**
* @param id The value of the "id" attribute to search for
* @return a By which locates elements by the value of the "id" attribute. | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium;
import org.openqa.selenium.internal.FindsByClassName;
import org.openqa.selenium.internal.FindsByCssSelector;
import org.openqa.selenium.internal.FindsById;
import org.openqa.selenium.internal.FindsByLinkText;
import org.openqa.selenium.internal.FindsByName;
import org.openqa.selenium.internal.FindsByTagName;
import org.openqa.selenium.internal.FindsByXPath;
import java.io.Serializable;
import java.util.List;
/**
* Mechanism used to locate elements within a document. In order to create your own locating
* mechanisms, it is possible to subclass this class and override the protected methods as required,
* though it is expected that that all subclasses rely on the basic finding mechanisms provided
* through static methods of this class:
*
* <code>
* public WebElement findElement(WebDriver driver) {
* WebElement element = driver.findElement(By.id(getSelector()));
* if (element == null)
* element = driver.findElement(By.name(getSelector());
* return element;
* }
* </code>
*/
public abstract class By {
/**
* @param id The value of the "id" attribute to search for
* @return a By which locates elements by the value of the "id" attribute.
*/
public static By id(final String id) {
if (id == null)
throw new IllegalArgumentException(
"Cannot find elements with a null id attribute.");
return new ById(id);
}
/**
* @param linkText The exact text to match against
* @return a By which locates A elements by the exact text it displays
*/
public static By linkText(final String linkText) {
if (linkText == null)
throw new IllegalArgumentException(
"Cannot find elements when link text is null.");
return new ByLinkText(linkText);
}
/**
* @param linkText The text to match against
* @return a By which locates A elements that contain the given link text
*/
public static By partialLinkText(final String linkText) {
if (linkText == null)
throw new IllegalArgumentException(
"Cannot find elements when link text is null.");
return new ByPartialLinkText(linkText);
}
/**
* @param name The value of the "name" attribute to search for
* @return a By which locates elements by the value of the "name" attribute.
*/
public static By name(final String name) {
if (name == null)
throw new IllegalArgumentException(
"Cannot find elements when name text is null.");
return new ByName(name);
}
/**
* @param name The element's tagName
* @return a By which locates elements by their tag name
*/
public static By tagName(final String name) {
if (name == null)
throw new IllegalArgumentException(
"Cannot find elements when name tag name is null.");
return new ByTagName(name);
}
/**
* @param xpathExpression The xpath to use
* @return a By which locates elements via XPath
*/
public static By xpath(final String xpathExpression) {
if (xpathExpression == null)
throw new IllegalArgumentException(
"Cannot find elements when the XPath expression is null.");
return new ByXPath(xpathExpression);
}
/**
* Finds elements based on the value of the "class" attribute. If an element has many classes then
* this will match against each of them. For example if the value is "one two onone", then the
* following "className"s will match: "one" and "two"
*
* @param className The value of the "class" attribute to search for
* @return a By which locates elements by the value of the "class" attribute.
*/
public static By className(final String className) {
if (className == null)
throw new IllegalArgumentException(
"Cannot find elements when the class name expression is null.");
return new ByClassName(className);
}
/**
* Finds elements via the driver's underlying W3 Selector engine. If the browser does not
* implement the Selector API, a best effort is made to emulate the API. In this case, we strive
* for at least CSS2 support, but offer no guarantees.
*/
public static By cssSelector(final String selector) {
if (selector == null)
throw new IllegalArgumentException(
"Cannot find elements when the selector is null");
return new ByCssSelector(selector);
}
/**
* Find a single element. Override this method if necessary.
*
* @param context A context to use to find the element
* @return The WebElement that matches the selector
*/
public WebElement findElement(SearchContext context) {
List<WebElement> allElements = findElements(context);
if (allElements == null || allElements.isEmpty())
throw new NoSuchElementException("Cannot locate an element using "
+ toString());
return allElements.get(0);
}
/**
* Find many elements.
*
* @param context A context to use to find the element
* @return A list of WebElements matching the selector
*/
public abstract List<WebElement> findElements(SearchContext context);
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
By by = (By) o;
return toString().equals(by.toString());
}
@Override
public int hashCode() {
return toString().hashCode();
}
@Override
public String toString() {
// A stub to prevent endless recursion in hashCode()
return "[unknown locator]";
}
public static class ById extends By implements Serializable {
private static final long serialVersionUID = 5341968046120372169L;
private final String id;
public ById(String id) {
this.id = id;
}
@Override
public List<WebElement> findElements(SearchContext context) {
if (context instanceof FindsById)
return ((FindsById) context).findElementsById(id);
return ((FindsByXPath) context).findElementsByXPath(".//*[@id = '" + id
+ "']");
}
@Override
public WebElement findElement(SearchContext context) {
if (context instanceof FindsById)
return ((FindsById) context).findElementById(id);
return ((FindsByXPath) context).findElementByXPath(".//*[@id = '" + id
+ "']");
}
@Override
public String toString() {
return "By.id: " + id;
}
}
public static class ByLinkText extends By implements Serializable {
private static final long serialVersionUID = 1967414585359739708L;
private final String linkText;
public ByLinkText(String linkText) {
this.linkText = linkText;
}
@Override
public List<WebElement> findElements(SearchContext context) {
return ((FindsByLinkText) context).findElementsByLinkText(linkText);
}
@Override
public WebElement findElement(SearchContext context) {
return ((FindsByLinkText) context).findElementByLinkText(linkText);
}
@Override
public String toString() {
return "By.linkText: " + linkText;
}
}
public static class ByPartialLinkText extends By implements Serializable {
private static final long serialVersionUID = 1163955344140679054L;
private final String linkText;
public ByPartialLinkText(String linkText) {
this.linkText = linkText;
}
@Override
public List<WebElement> findElements(SearchContext context) {
return ((FindsByLinkText) context)
.findElementsByPartialLinkText(linkText);
}
@Override
public WebElement findElement(SearchContext context) {
return ((FindsByLinkText) context).findElementByPartialLinkText(linkText);
}
@Override
public String toString() {
return "By.partialLinkText: " + linkText;
}
}
public static class ByName extends By implements Serializable {
private static final long serialVersionUID = 376317282960469555L;
private final String name;
public ByName(String name) {
this.name = name;
}
@Override
public List<WebElement> findElements(SearchContext context) {
if (context instanceof FindsByName)
return ((FindsByName) context).findElementsByName(name);
return ((FindsByXPath) context).findElementsByXPath(".//*[@name = '"
+ name + "']");
}
@Override
public WebElement findElement(SearchContext context) {
if (context instanceof FindsByName)
return ((FindsByName) context).findElementByName(name);
return ((FindsByXPath) context).findElementByXPath(".//*[@name = '"
+ name + "']");
}
@Override
public String toString() {
return "By.name: " + name;
}
}
public static class ByTagName extends By implements Serializable {
private static final long serialVersionUID = 4699295846984948351L;
private final String name;
public ByTagName(String name) {
this.name = name;
}
@Override
public List<WebElement> findElements(SearchContext context) {
if (context instanceof FindsByTagName)
return ((FindsByTagName) context).findElementsByTagName(name);
return ((FindsByXPath) context).findElementsByXPath(".//" + name);
}
@Override
public WebElement findElement(SearchContext context) {
if (context instanceof FindsByTagName)
return ((FindsByTagName) context).findElementByTagName(name);
return ((FindsByXPath) context).findElementByXPath(".//" + name);
}
@Override
public String toString() {
return "By.tagName: " + name;
}
}
public static class ByXPath extends By implements Serializable {
private static final long serialVersionUID = -6727228887685051584L;
private final String xpathExpression;
public ByXPath(String xpathExpression) {
this.xpathExpression = xpathExpression;
}
@Override
public List<WebElement> findElements(SearchContext context) {
return ((FindsByXPath) context).findElementsByXPath(xpathExpression);
}
@Override
public WebElement findElement(SearchContext context) {
return ((FindsByXPath) context).findElementByXPath(xpathExpression);
}
@Override
public String toString() {
return "By.xpath: " + xpathExpression;
}
}
public static class ByClassName extends By implements Serializable {
private static final long serialVersionUID = -8737882849130394673L;
private final String className;
public ByClassName(String className) {
this.className = className;
}
@Override
public List<WebElement> findElements(SearchContext context) {
if (context instanceof FindsByClassName)
return ((FindsByClassName) context).findElementsByClassName(className);
return ((FindsByXPath) context).findElementsByXPath(".//*["
+ containingWord("class", className) + "]");
}
@Override
public WebElement findElement(SearchContext context) {
if (context instanceof FindsByClassName)
return ((FindsByClassName) context).findElementByClassName(className);
return ((FindsByXPath) context).findElementByXPath(".//*["
+ containingWord("class", className) + "]");
}
/**
* Generates a partial xpath expression that matches an element whose specified attribute
* contains the given CSS word. So to match <div class='foo bar'> you would say "//div[" +
* containingWord("class", "foo") + "]".
*
* @param attribute name
* @param word name
* @return XPath fragment
*/
private String containingWord(String attribute, String word) {
return "contains(concat(' ',normalize-space(@" + attribute + "),' '),' "
+ word + " ')";
}
@Override
public String toString() {
return "By.className: " + className;
}
}
public static class ByCssSelector extends By implements Serializable {
private static final long serialVersionUID = -3910258723099459239L;
private final String selector;
public ByCssSelector(String selector) {
this.selector = selector;
}
@Override
public WebElement findElement(SearchContext context) {
if (context instanceof FindsByCssSelector) {
return ((FindsByCssSelector) context)
.findElementByCssSelector(selector);
}
throw new WebDriverException(
"Driver does not support finding an element by selector: " + selector);
}
@Override
public List<WebElement> findElements(SearchContext context) {
if (context instanceof FindsByCssSelector) {
return ((FindsByCssSelector) context)
.findElementsByCssSelector(selector);
}
throw new WebDriverException(
"Driver does not support finding elements by selector: " + selector);
}
@Override
public String toString() {
return "By.cssSelector: " + selector;
}
}
}
| 1 | 12,103 | we shouldn't add a bunch of static initializers 'everywhere' in the code. Probably just one would be good, during the construction of the 'RemoteWebDriver' class. | SeleniumHQ-selenium | js |
@@ -54,6 +54,10 @@ namespace System.Text.Json.Serialization.Tests
await JsonSerializer.SerializeAsync(_memoryStream, _value);
}
+ [BenchmarkCategory(Categories.CoreFX, Categories.JSON)]
+ [Benchmark]
+ public string SerializeObjectProperty() => JsonSerializer.Serialize(new { Prop = (object)_value });
+
[GlobalCleanup]
public void Cleanup() => _memoryStream.Dispose();
} | 1 | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
using BenchmarkDotNet.Attributes;
using MicroBenchmarks;
using MicroBenchmarks.Serializers;
using System.Collections;
using System.Collections.Generic;
using System.Collections.Immutable;
using System.IO;
using System.Threading.Tasks;
namespace System.Text.Json.Serialization.Tests
{
[GenericTypeArguments(typeof(LoginViewModel))]
[GenericTypeArguments(typeof(Location))]
[GenericTypeArguments(typeof(IndexViewModel))]
[GenericTypeArguments(typeof(MyEventsListerViewModel))]
[GenericTypeArguments(typeof(BinaryData))]
[GenericTypeArguments(typeof(Dictionary<string, string>))]
[GenericTypeArguments(typeof(ImmutableDictionary<string, string>))]
[GenericTypeArguments(typeof(ImmutableSortedDictionary<string, string>))]
[GenericTypeArguments(typeof(HashSet<string>))]
[GenericTypeArguments(typeof(ArrayList))]
[GenericTypeArguments(typeof(Hashtable))]
public class WriteJson<T>
{
private T _value;
private MemoryStream _memoryStream;
[GlobalSetup]
public async Task Setup()
{
_value = DataGenerator.Generate<T>();
_memoryStream = new MemoryStream(capacity: short.MaxValue);
await JsonSerializer.SerializeAsync(_memoryStream, _value);
}
[BenchmarkCategory(Categories.CoreFX, Categories.JSON)]
[Benchmark]
public string SerializeToString() => JsonSerializer.Serialize(_value);
[BenchmarkCategory(Categories.CoreFX, Categories.JSON)]
[Benchmark]
public byte[] SerializeToUtf8Bytes() => JsonSerializer.SerializeToUtf8Bytes(_value);
[BenchmarkCategory(Categories.CoreFX, Categories.JSON)]
[Benchmark]
public async Task SerializeToStream()
{
_memoryStream.Position = 0;
await JsonSerializer.SerializeAsync(_memoryStream, _value);
}
[GlobalCleanup]
public void Cleanup() => _memoryStream.Dispose();
}
}
| 1 | 10,278 | Would it be possible to move the creation of the `new { Prop = (object)_value }` out of the benchmark? In the current version, the benchmark includes the cost of creating and serializing the object, while if possible it should be the cost of serialization only. | dotnet-performance | .cs |
@@ -23,4 +23,6 @@ return [
Knp\Bundle\MenuBundle\KnpMenuBundle::class => ['all' => true],
Translation\Bundle\TranslationBundle::class => ['all' => true],
Symfony\WebpackEncoreBundle\WebpackEncoreBundle::class => ['all' => true],
+ Translation\PlatformAdapter\Loco\Bridge\Symfony\TranslationAdapterLocoBundle::class => ['all' => true],
+ Http\HttplugBundle\HttplugBundle::class => ['all' => true],
]; | 1 | <?php
return [
ApiPlatform\Core\Bridge\Symfony\Bundle\ApiPlatformBundle::class => ['all' => true],
DAMA\DoctrineTestBundle\DAMADoctrineTestBundle::class => ['test' => true],
Doctrine\Bundle\DoctrineBundle\DoctrineBundle::class => ['all' => true],
Doctrine\Bundle\DoctrineCacheBundle\DoctrineCacheBundle::class => ['all' => true],
Doctrine\Bundle\FixturesBundle\DoctrineFixturesBundle::class => ['dev' => true, 'test' => true, 'local' => true],
Doctrine\Bundle\MigrationsBundle\DoctrineMigrationsBundle::class => ['all' => true],
Nelmio\CorsBundle\NelmioCorsBundle::class => ['all' => true],
Sensio\Bundle\FrameworkExtraBundle\SensioFrameworkExtraBundle::class => ['all' => true],
Stof\DoctrineExtensionsBundle\StofDoctrineExtensionsBundle::class => ['all' => true],
Symfony\Bundle\DebugBundle\DebugBundle::class => ['dev' => true, 'test' => true, 'local' => true],
Symfony\Bundle\FrameworkBundle\FrameworkBundle::class => ['all' => true],
Symfony\Bundle\MakerBundle\MakerBundle::class => ['dev' => true, 'local' => true],
Symfony\Bundle\MonologBundle\MonologBundle::class => ['all' => true],
Symfony\Bundle\SecurityBundle\SecurityBundle::class => ['all' => true],
Symfony\Bundle\SwiftmailerBundle\SwiftmailerBundle::class => ['all' => true],
Symfony\Bundle\TwigBundle\TwigBundle::class => ['all' => true],
Symfony\Bundle\WebProfilerBundle\WebProfilerBundle::class => ['dev' => true, 'test' => true, 'local' => true],
Symfony\Bundle\WebServerBundle\WebServerBundle::class => ['dev' => true, 'test' => true, 'local' => true],
WhiteOctober\PagerfantaBundle\WhiteOctoberPagerfantaBundle::class => ['all' => true],
Knp\Bundle\MenuBundle\KnpMenuBundle::class => ['all' => true],
Translation\Bundle\TranslationBundle::class => ['all' => true],
Symfony\WebpackEncoreBundle\WebpackEncoreBundle::class => ['all' => true],
];
| 1 | 10,915 | only dev and local | bolt-core | php |
@@ -30,7 +30,7 @@ class DbJobsFailedJobsUpdate extends Migration
public function down()
{
Schema::table($this->getTableName(), function (Blueprint $table) {
- $table->tinyInteger('reserved')->unsigned();
+ $table->tinyInteger('reserved')->unsigned()->nullable();
$table->dropIndex('jobs_queue_reserved_at_index');
});
| 1 | <?php
use October\Rain\Database\Schema\Blueprint;
use October\Rain\Database\Updates\Migration;
class DbJobsFailedJobsUpdate extends Migration
{
/**
* Run the migrations.
*
* @return void
*/
public function up()
{
Schema::table($this->getTableName(), function (Blueprint $table) {
$table->dropColumn('reserved');
$table->index(['queue', 'reserved_at']);
});
Schema::table($this->getFailedTableName(), function (Blueprint $table) {
$table->longText('exception')->nullable()->after('payload');
});
}
/**
* Reverse the migrations.
*
* @return void
*/
public function down()
{
Schema::table($this->getTableName(), function (Blueprint $table) {
$table->tinyInteger('reserved')->unsigned();
$table->dropIndex('jobs_queue_reserved_at_index');
});
Schema::table($this->getFailedTableName(), function (Blueprint $table) {
$table->dropColumn('exception');
});
}
protected function getTableName()
{
return Config::get('queue.connections.database.table', 'jobs');
}
protected function getFailedTableName()
{
return Config::get('queue.failed.table', 'failed_jobs');
}
}
| 1 | 12,718 | This should not be nullable, instead it should be `->default(0);` | octobercms-october | php |
@@ -65,8 +65,8 @@ public class IcebergGenerics {
return this;
}
- public ScanBuilder select(String... columns) {
- this.columns = ImmutableList.copyOf(columns);
+ public ScanBuilder select(String... selectColumns) {
+ this.columns = ImmutableList.copyOf(selectColumns);
return this;
}
| 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.data;
import com.google.common.collect.ImmutableList;
import java.util.List;
import org.apache.iceberg.Table;
import org.apache.iceberg.expressions.Expression;
import org.apache.iceberg.expressions.Expressions;
public class IcebergGenerics {
private IcebergGenerics() {
}
/**
* Returns a builder to configure a read of the given table that produces generic records.
*
* @param table an Iceberg table
* @return a builder to configure the scan
*/
public static ScanBuilder read(Table table) {
return new ScanBuilder(table);
}
public static class ScanBuilder {
private final Table table;
private Expression where = Expressions.alwaysTrue();
private List<String> columns = ImmutableList.of("*");
private boolean reuseContainers = false;
private boolean caseSensitive = true;
public ScanBuilder(Table table) {
this.table = table;
}
public ScanBuilder reuseContainers() {
this.reuseContainers = true;
return this;
}
public ScanBuilder where(Expression rowFilter) {
this.where = Expressions.and(where, rowFilter);
return this;
}
public ScanBuilder caseInsensitive() {
this.caseSensitive = false;
return this;
}
public ScanBuilder select(String... columns) {
this.columns = ImmutableList.copyOf(columns);
return this;
}
public Iterable<Record> build() {
return new TableScanIterable(
table
.newScan()
.filter(where)
.caseSensitive(caseSensitive)
.select(columns),
reuseContainers
);
}
}
}
| 1 | 13,500 | What about `selectedColumns`? I believe it is frequently used throughout the project. | apache-iceberg | java |
@@ -43,6 +43,7 @@ import org.apache.solr.uninverting.UninvertingReader.Type;
public class TextField extends FieldType {
protected boolean autoGeneratePhraseQueries;
protected boolean enableGraphQueries;
+ protected boolean synonymBoostByPayload;
protected SolrQueryParserBase.SynonymQueryStyle synonymQueryStyle;
/** | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.schema;
import java.io.IOException;
import java.util.Locale;
import java.util.Map;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
import org.apache.lucene.index.IndexableField;
import org.apache.lucene.queries.function.ValueSource;
import org.apache.lucene.queries.function.valuesource.SortedSetFieldSource;
import org.apache.lucene.search.*;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.QueryBuilder;
import org.apache.solr.common.SolrException;
import org.apache.solr.parser.SolrQueryParserBase;
import org.apache.solr.query.SolrRangeQuery;
import org.apache.solr.response.TextResponseWriter;
import org.apache.solr.search.QParser;
import org.apache.solr.uninverting.UninvertingReader.Type;
/** <code>TextField</code> is the basic type for configurable text analysis.
* Analyzers for field types using this implementation should be defined in the schema.
*
*/
public class TextField extends FieldType {
protected boolean autoGeneratePhraseQueries;
protected boolean enableGraphQueries;
protected SolrQueryParserBase.SynonymQueryStyle synonymQueryStyle;
/**
* Analyzer set by schema for text types to use when searching fields
* of this type, subclasses can set analyzer themselves or override
* getIndexAnalyzer()
* This analyzer is used to process wildcard, prefix, regex and other multiterm queries. It
* assembles a list of tokenizer +filters that "make sense" for this, primarily accent folding and
* lowercasing filters, and charfilters.
*
* @see #getMultiTermAnalyzer
* @see #setMultiTermAnalyzer
*/
protected Analyzer multiTermAnalyzer=null;
private boolean isExplicitMultiTermAnalyzer = false;
@Override
protected void init(IndexSchema schema, Map<String,String> args) {
properties |= TOKENIZED;
if (schema.getVersion() > 1.1F &&
// only override if it's not explicitly true
0 == (trueProperties & OMIT_TF_POSITIONS)) {
properties &= ~OMIT_TF_POSITIONS;
}
if (schema.getVersion() > 1.3F) {
autoGeneratePhraseQueries = false;
} else {
autoGeneratePhraseQueries = true;
}
String autoGeneratePhraseQueriesStr = args.remove(AUTO_GENERATE_PHRASE_QUERIES);
if (autoGeneratePhraseQueriesStr != null)
autoGeneratePhraseQueries = Boolean.parseBoolean(autoGeneratePhraseQueriesStr);
synonymQueryStyle = SolrQueryParserBase.SynonymQueryStyle.AS_SAME_TERM;
String synonymQueryStyle = args.remove(SYNONYM_QUERY_STYLE);
if (synonymQueryStyle != null) {
this.synonymQueryStyle = SolrQueryParserBase.SynonymQueryStyle.valueOf(synonymQueryStyle.toUpperCase(Locale.ROOT));
}
enableGraphQueries = true;
String enableGraphQueriesStr = args.remove(ENABLE_GRAPH_QUERIES);
if (enableGraphQueriesStr != null)
enableGraphQueries = Boolean.parseBoolean(enableGraphQueriesStr);
super.init(schema, args);
}
/**
* Returns the Analyzer to be used when searching fields of this type when mult-term queries are specified.
* <p>
* This method may be called many times, at any time.
* </p>
* @see #getIndexAnalyzer
*/
public Analyzer getMultiTermAnalyzer() {
return multiTermAnalyzer;
}
public void setMultiTermAnalyzer(Analyzer analyzer) {
this.multiTermAnalyzer = analyzer;
}
public boolean getAutoGeneratePhraseQueries() {
return autoGeneratePhraseQueries;
}
public boolean getEnableGraphQueries() {
return enableGraphQueries;
}
public SolrQueryParserBase.SynonymQueryStyle getSynonymQueryStyle() {return synonymQueryStyle;}
@Override
public SortField getSortField(SchemaField field, boolean reverse) {
/* :TODO: maybe warn if isTokenized(), but doesn't use LimitTokenCountFilter in its chain? */
return getSortedSetSortField(field,
// historical behavior based on how the early versions of the FieldCache
// would deal with multiple indexed terms in a singled valued field...
//
// Always use the 'min' value from the (Uninverted) "psuedo doc values"
SortedSetSelector.Type.MIN,
reverse, SortField.STRING_FIRST, SortField.STRING_LAST);
}
@Override
public ValueSource getValueSource(SchemaField field, QParser parser) {
return new SortedSetFieldSource(field.getName());
}
@Override
public Type getUninversionType(SchemaField sf) {
return Type.SORTED_SET_BINARY;
}
@Override
public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException {
writer.writeStr(name, f.stringValue(), true);
}
@Override
public Query getFieldQuery(QParser parser, SchemaField field, String externalVal) {
return parseFieldQuery(parser, getQueryAnalyzer(), field.getName(), externalVal);
}
@Override
public Object toObject(SchemaField sf, BytesRef term) {
return term.utf8ToString();
}
@Override
protected boolean supportsAnalyzers() {
return true;
}
@Override
public Query getRangeQuery(QParser parser, SchemaField field, String part1, String part2, boolean minInclusive, boolean maxInclusive) {
Analyzer multiAnalyzer = getMultiTermAnalyzer();
BytesRef lower = analyzeMultiTerm(field.getName(), part1, multiAnalyzer);
BytesRef upper = analyzeMultiTerm(field.getName(), part2, multiAnalyzer);
return new SolrRangeQuery(field.getName(), lower, upper, minInclusive, maxInclusive);
}
public static BytesRef analyzeMultiTerm(String field, String part, Analyzer analyzerIn) {
if (part == null || analyzerIn == null) return null;
try (TokenStream source = analyzerIn.tokenStream(field, part)){
source.reset();
TermToBytesRefAttribute termAtt = source.getAttribute(TermToBytesRefAttribute.class);
if (!source.incrementToken())
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,"analyzer returned no terms for multiTerm term: " + part);
BytesRef bytes = BytesRef.deepCopyOf(termAtt.getBytesRef());
if (source.incrementToken())
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,"analyzer returned too many terms for multiTerm term: " + part);
source.end();
return bytes;
} catch (IOException e) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,"error analyzing range part: " + part, e);
}
}
static Query parseFieldQuery(QParser parser, Analyzer analyzer, String field, String queryText) {
// note, this method always worked this way (but nothing calls it?) because it has no idea of quotes...
return new QueryBuilder(analyzer).createPhraseQuery(field, queryText);
}
public void setIsExplicitMultiTermAnalyzer(boolean isExplicitMultiTermAnalyzer) {
this.isExplicitMultiTermAnalyzer = isExplicitMultiTermAnalyzer;
}
public boolean isExplicitMultiTermAnalyzer() {
return isExplicitMultiTermAnalyzer;
}
@Override
public Object marshalSortValue(Object value) {
return marshalStringSortValue(value);
}
@Override
public Object unmarshalSortValue(Object value) {
return unmarshalStringSortValue(value);
}
}
| 1 | 26,933 | I thought we switched the approach from a payload to boost attribute? Besides; it's not clear we need this toggle at all since the user could arrange for this behavior simply by having the new DelimitedBoost filter thing in the chain. | apache-lucene-solr | java |
@@ -233,15 +233,8 @@ module Unix::Exec
def environment_string env
return '' if env.empty?
env_array = self.environment_variable_string_pair_array( env )
-
environment_string = env_array.join(' ')
- command = 'env'
- punctuation = ''
- if self[:platform] =~ /cisco-5/
- command = 'export'
- punctuation = ';'
- end
- "#{command} #{environment_string}#{punctuation}"
+ "env #{environment_string}"
end
def environment_variable_string_pair_array env | 1 | module Unix::Exec
include Beaker::CommandFactory
def reboot
if self['platform'] =~ /solaris/
exec(Beaker::Command.new("reboot"), :expect_connection_failure => true)
else
exec(Beaker::Command.new("/sbin/shutdown -r now"), :expect_connection_failure => true)
end
sleep(10) #if we attempt a reconnect too quickly we end up blocking ¯\_(ツ)_/¯
end
def echo(msg, abs=true)
(abs ? '/bin/echo' : 'echo') + " #{msg}"
end
def touch(file, abs=true)
(abs ? '/bin/touch' : 'touch') + " #{file}"
end
def path
'/bin:/usr/bin'
end
def get_ip
if self['platform'].include?('solaris') || self['platform'].include?('osx')
execute("ifconfig -a inet| awk '/broadcast/ {print $2}' | cut -d/ -f1 | head -1").strip
else
execute("ip a|awk '/global/{print$2}' | cut -d/ -f1 | head -1").strip
end
end
# Create the provided directory structure on the host
# @param [String] dir The directory structure to create on the host
# @return [Boolean] True, if directory construction succeeded, otherwise False
def mkdir_p dir
cmd = "mkdir -p #{dir}"
result = exec(Beaker::Command.new(cmd), :acceptable_exit_codes => [0, 1])
result.exit_code == 0
end
# Recursively remove the path provided
# @param [String] path The path to remove
def rm_rf path
execute("rm -rf #{path}")
end
# Move the origin to destination. The destination is removed prior to moving.
# @param [String] orig The origin path
# @param [String] dest the destination path
# @param [Boolean] rm Remove the destination prior to move
def mv orig, dest, rm=true
rm_rf dest unless !rm
execute("mv #{orig} #{dest}")
end
# Attempt to ping the provided target hostname
# @param [String] target The hostname to ping
# @param [Integer] attempts Amount of times to attempt ping before giving up
# @return [Boolean] true of ping successful, overwise false
def ping target, attempts=5
try = 0
while try < attempts do
result = exec(Beaker::Command.new("ping -c 1 #{target}"), :accept_all_exit_codes => true)
if result.exit_code == 0
return true
end
try+=1
end
result.exit_code == 0
end
# Converts the provided environment file to a new shell script in /etc/profile.d, then sources that file.
# This is for sles and debian based hosts.
# @param [String] env_file The ssh environment file to read from
def mirror_env_to_profile_d env_file
if self[:platform] =~ /sles-|debian/
@logger.debug("mirroring environment to /etc/profile.d on sles platform host")
cur_env = exec(Beaker::Command.new("cat #{env_file}")).stdout
shell_env = ''
cur_env.each_line do |env_line|
shell_env << "export #{env_line}"
end
#here doc it over
exec(Beaker::Command.new("cat << EOF > #{self[:profile_d_env_file]}\n#{shell_env}EOF"))
#set permissions
exec(Beaker::Command.new("chmod +x #{self[:profile_d_env_file]}"))
#keep it current
exec(Beaker::Command.new("source #{self[:profile_d_env_file]}"))
else
#noop
@logger.debug("will not mirror environment to /etc/profile.d on non-sles/debian platform host")
end
end
#Add the provided key/val to the current ssh environment
#@param [String] key The key to add the value to
#@param [String] val The value for the key
#@example
# host.add_env_var('PATH', '/usr/bin:PATH')
def add_env_var key, val
key = key.to_s.upcase
env_file = self[:ssh_env_file]
escaped_val = Regexp.escape(val).gsub('/', '\/').gsub(';', '\;')
#see if the key/value pair already exists
if exec(Beaker::Command.new("grep ^#{key}=.*#{escaped_val} #{env_file}"), :accept_all_exit_codes => true ).exit_code == 0
return #nothing to do here, key value pair already exists
#see if the key already exists
elsif exec(Beaker::Command.new("grep ^#{key} #{env_file}"), :accept_all_exit_codes => true ).exit_code == 0
exec(Beaker::SedCommand.new(self['platform'], "s/^#{key}=/#{key}=#{escaped_val}:/", env_file))
else
exec(Beaker::Command.new("echo \"#{key}=#{val}\" >> #{env_file}"))
end
#update the profile.d to current state
#match it to the contents of ssh_env_file
mirror_env_to_profile_d(env_file)
end
#Delete the provided key/val from the current ssh environment
#@param [String] key The key to delete the value from
#@param [String] val The value to delete for the key
#@example
# host.delete_env_var('PATH', '/usr/bin:PATH')
def delete_env_var key, val
key = key.to_s.upcase
env_file = self[:ssh_env_file]
val = Regexp.escape(val).gsub('/', '\/').gsub(';', '\;')
#if the key only has that single value remove the entire line
exec(Beaker::SedCommand.new(self['platform'], "/#{key}=#{val}$/d", env_file))
#value in middle of list
exec(Beaker::SedCommand.new(self['platform'], "s/#{key}=\\(.*\\)[;:]#{val}/#{key}=\\1/", env_file))
#value in start of list
exec(Beaker::SedCommand.new(self['platform'], "s/#{key}=#{val}[;:]/#{key}=/", env_file))
#update the profile.d to current state
#match it to the contents of ssh_env_file
mirror_env_to_profile_d(env_file)
end
#Return the value of a specific env var
#@param [String] key The key to look for
#@example
# host.get_env_var('path')
def get_env_var key
key = key.to_s.upcase
exec(Beaker::Command.new("env | grep #{key}"), :accept_all_exit_codes => true).stdout.chomp
end
#Delete the environment variable from the current ssh environment
#@param [String] key The key to delete
#@example
# host.clear_env_var('PATH')
def clear_env_var key
key = key.to_s.upcase
env_file = self[:ssh_env_file]
#remove entire line
exec(Beaker::SedCommand.new(self['platform'], "/#{key}=.*$/d", env_file))
#update the profile.d to current state
#match it to the contents of ssh_env_file
mirror_env_to_profile_d(env_file)
end
# Restarts the SSH service.
#
# @return [Result] result of restarting the SSH service
def ssh_service_restart
case self['platform']
when /debian|ubuntu|cumulus/
exec(Beaker::Command.new("service ssh restart"))
when /el-7|centos-7|redhat-7|oracle-7|scientific-7|eos-7/
exec(Beaker::Command.new("systemctl restart sshd.service"))
when /el-|centos|fedora|redhat|oracle|scientific|eos/
exec(Beaker::Command.new("/sbin/service sshd restart"))
when /sles/
exec(Beaker::Command.new("rcsshd restart"))
when /solaris/
exec(Beaker::Command.new("svcadm restart svc:/network/ssh:default"))
when /(free|open)bsd/
exec(Beaker::Command.new("sudo /etc/rc.d/sshd restart"))
else
raise ArgumentError, "Unsupported Platform: '#{self['platform']}'"
end
end
# Sets the PermitUserEnvironment setting & restarts the SSH service.
#
# @api private
# @return [Result] result of the command restarting the SSH service
# (from {#ssh_service_restart}).
def ssh_permit_user_environment
case self['platform']
when /debian|ubuntu|cumulus/
directory = create_tmpdir_on(self)
exec(Beaker::Command.new("echo 'PermitUserEnvironment yes' | cat - /etc/ssh/sshd_config > #{directory}/sshd_config.permit"))
exec(Beaker::Command.new("mv #{directory}/sshd_config.permit /etc/ssh/sshd_config"))
when /el-7|centos-7|redhat-7|oracle-7|scientific-7|eos-7/
directory = create_tmpdir_on(self)
exec(Beaker::Command.new("echo 'PermitUserEnvironment yes' | cat - /etc/ssh/sshd_config > #{directory}/sshd_config.permit"))
exec(Beaker::Command.new("mv #{directory}/sshd_config.permit /etc/ssh/sshd_config"))
when /el-|centos|fedora|redhat|oracle|scientific|eos/
directory = create_tmpdir_on(self)
exec(Beaker::Command.new("echo 'PermitUserEnvironment yes' | cat - /etc/ssh/sshd_config > #{directory}/sshd_config.permit"))
exec(Beaker::Command.new("mv #{directory}/sshd_config.permit /etc/ssh/sshd_config"))
when /sles/
directory = create_tmpdir_on(self)
exec(Beaker::Command.new("echo 'PermitUserEnvironment yes' | cat - /etc/ssh/sshd_config > #{directory}/sshd_config.permit"))
exec(Beaker::Command.new("mv #{directory}/sshd_config.permit /etc/ssh/sshd_config"))
when /solaris/
# kept solaris here because refactoring it into its own Host module
# conflicts with the solaris hypervisor that already exists
directory = create_tmpdir_on(self)
exec(Beaker::Command.new("echo 'PermitUserEnvironment yes' | cat - /etc/ssh/sshd_config > #{directory}/sshd_config.permit"))
exec(Beaker::Command.new("mv #{directory}/sshd_config.permit /etc/ssh/sshd_config"))
when /(free|open)bsd/
exec(Beaker::Command.new("sudo perl -pi -e 's/^#?PermitUserEnvironment no/PermitUserEnvironment yes/' /etc/ssh/sshd_config"), {:pty => true} )
else
raise ArgumentError, "Unsupported Platform: '#{self['platform']}'"
end
ssh_service_restart()
end
# Construct the environment string for this command
#
# @param [Hash{String=>String}] env An optional Hash containing
# key-value pairs to be treated
# as environment variables that
# should be set for the duration
# of the puppet command.
#
# @return [String] Returns a string containing command line arguments that
# will ensure the environment is correctly set for the
# given host.
def environment_string env
return '' if env.empty?
env_array = self.environment_variable_string_pair_array( env )
environment_string = env_array.join(' ')
command = 'env'
punctuation = ''
if self[:platform] =~ /cisco-5/
command = 'export'
punctuation = ';'
end
"#{command} #{environment_string}#{punctuation}"
end
def environment_variable_string_pair_array env
env_array = []
env.each_key do |key|
val = env[key]
if val.is_a?(Array)
val = val.join(':')
else
val = val.to_s
end
env_array << "#{key.to_s.upcase}=\"#{val}\""
end
env_array
end
# Gets the specific prepend commands as needed for this host
#
# @param [String] command Command to be executed
# @param [String] user_pc List of user-specified commands to prepend
# @param [Hash] opts optional parameters
#
# @return [String] Command string as needed for this host
def prepend_commands(command = '', user_pc = '', opts = {})
if self[:platform] =~ /cisco-5/
return user_pc unless command.index('vsh').nil?
prepend_cmds = 'source /etc/profile; sudo ip netns exec '
prepend_cmds << ( self[:vrf] ? self[:vrf] : '' )
return prepend_cmds
end
user_pc
end
# Fills the user SSH environment file.
#
# @param [Hash{String=>String}] env Environment variables to set on the system,
# in the form of a hash of String variable
# names to their corresponding String values.
#
# @api private
# @return nil
def ssh_set_user_environment(env)
#ensure that ~/.ssh/environment exists
ssh_env_file_dir = Pathname.new(self[:ssh_env_file]).dirname
mkdir_p(ssh_env_file_dir)
exec(Beaker::Command.new("chmod 0600 #{ssh_env_file_dir}"))
exec(Beaker::Command.new("touch #{self[:ssh_env_file]}"))
#add the constructed env vars to this host
add_env_var('PATH', '$PATH')
# FIXME
if self['platform'] =~ /openbsd-(\d)\.?(\d)-(.+)/
version = "#{$1}.#{$2}"
arch = $3
arch = 'amd64' if ['x64', 'x86_64'].include?(arch)
add_env_var('PKG_PATH', "http://ftp.openbsd.org/pub/OpenBSD/#{version}/packages/#{arch}/")
elsif self['platform'] =~ /solaris-10/
add_env_var('PATH', '/opt/csw/bin')
end
#add the env var set to this test host
env.each_pair do |var, value|
add_env_var(var, value)
end
end
end
| 1 | 12,273 | the previous code used 'export' when platform is cisco-5, presumably because 'env' cannot be used to load variables? I have no idea if that's the case but this env vs export logic is not here | voxpupuli-beaker | rb |
@@ -104,11 +104,15 @@ type CloudInit struct {
// +optional
EnableSecureSecretsManager bool `json:"enableSecureSecretsManager,omitempty"`
- // SecretARN is the Amazon Resource Name of the secret. This is stored
+ // SecretCount is the number of secrets used to form the complete secret
+ // +optional
+ SecretCount int32 `json:"secretCount,omitempty"`
+
+ // SecretPrefix is the prefix for the secret name. This is stored
// temporarily, and deleted when the machine registers as a node against
// the workload cluster.
// +optional
- SecretARN string `json:"secretARN,omitempty"`
+ SecretPrefix string `json:"secretPrefix,omitempty"`
}
// AWSMachineStatus defines the observed state of AWSMachine | 1 | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha2
import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/cluster-api/errors"
)
const (
// MachineFinalizer allows ReconcileAWSMachine to clean up AWS resources associated with AWSMachine before
// removing it from the apiserver.
MachineFinalizer = "awsmachine.infrastructure.cluster.x-k8s.io"
)
// AWSMachineSpec defines the desired state of AWSMachine
type AWSMachineSpec struct {
// ProviderID is the unique identifier as specified by the cloud provider.
ProviderID *string `json:"providerID,omitempty"`
// AMI is the reference to the AMI from which to create the machine instance.
AMI AWSResourceReference `json:"ami,omitempty"`
// ImageLookupOrg is the AWS Organization ID to use for image lookup if AMI is not set.
ImageLookupOrg string `json:"imageLookupOrg,omitempty"`
// InstanceType is the type of instance to create. Example: m4.xlarge
InstanceType string `json:"instanceType,omitempty"`
// AdditionalTags is an optional set of tags to add to an instance, in addition to the ones added by default by the
// AWS provider. If both the AWSCluster and the AWSMachine specify the same tag name with different values, the
// AWSMachine's value takes precedence.
// +optional
AdditionalTags Tags `json:"additionalTags,omitempty"`
// IAMInstanceProfile is a name of an IAM instance profile to assign to the instance
// +optional
IAMInstanceProfile string `json:"iamInstanceProfile,omitempty"`
// PublicIP specifies whether the instance should get a public IP.
// Precedence for this setting is as follows:
// 1. This field if set
// 2. Cluster/flavor setting
// 3. Subnet default
// +optional
PublicIP *bool `json:"publicIP,omitempty"`
// AdditionalSecurityGroups is an array of references to security groups that should be applied to the
// instance. These security groups would be set in addition to any security groups defined
// at the cluster level or in the actuator.
// +optional
AdditionalSecurityGroups []AWSResourceReference `json:"additionalSecurityGroups,omitempty"`
// AvailabilityZone is references the AWS availability zone to use for this instance.
// If multiple subnets are matched for the availability zone, the first one return is picked.
// +optional
AvailabilityZone *string `json:"availabilityZone,omitempty"`
// Subnet is a reference to the subnet to use for this instance. If not specified,
// the cluster subnet will be used.
// +optional
Subnet *AWSResourceReference `json:"subnet,omitempty"`
// SSHKeyName is the name of the ssh key to attach to the instance.
SSHKeyName string `json:"sshKeyName,omitempty"`
// RootDeviceSize is the size of the root volume in gigabytes(GB).
// +optional
RootDeviceSize int64 `json:"rootDeviceSize,omitempty"`
// NetworkInterfaces is a list of ENIs to associate with the instance.
// A maximum of 2 may be specified.
// +optional
// +kubebuilder:validation:MaxItems=2
NetworkInterfaces []string `json:"networkInterfaces,omitempty"`
// CloudInit defines options related to the bootstrapping systems where
// CloudInit is used.
// +optional
CloudInit *CloudInit `json:"cloudInit,omitempty"`
}
// CloudInit defines options related to the bootstrapping systems where
// CloudInit is used.
type CloudInit struct {
// enableSecureSecretsManager, when set to true will use AWS Secrets Manager to ensure
// userdata privacy. A cloud-init boothook shell script is prepended to download
// the userdata from Secrets Manager and additionally delete the secret.
// +optional
EnableSecureSecretsManager bool `json:"enableSecureSecretsManager,omitempty"`
// SecretARN is the Amazon Resource Name of the secret. This is stored
// temporarily, and deleted when the machine registers as a node against
// the workload cluster.
// +optional
SecretARN string `json:"secretARN,omitempty"`
}
// AWSMachineStatus defines the observed state of AWSMachine
type AWSMachineStatus struct {
// Ready is true when the provider resource is ready.
// +optional
Ready bool `json:"ready"`
// Addresses contains the AWS instance associated addresses.
Addresses []v1.NodeAddress `json:"addresses,omitempty"`
// InstanceState is the state of the AWS instance for this machine.
// +optional
InstanceState *InstanceState `json:"instanceState,omitempty"`
// ErrorReason will be set in the event that there is a terminal problem
// reconciling the Machine and will contain a succinct value suitable
// for machine interpretation.
//
// This field should not be set for transitive errors that a controller
// faces that are expected to be fixed automatically over
// time (like service outages), but instead indicate that something is
// fundamentally wrong with the Machine's spec or the configuration of
// the controller, and that manual intervention is required. Examples
// of terminal errors would be invalid combinations of settings in the
// spec, values that are unsupported by the controller, or the
// responsible controller itself being critically misconfigured.
//
// Any transient errors that occur during the reconciliation of Machines
// can be added as events to the Machine object and/or logged in the
// controller's output.
// +optional
ErrorReason *errors.MachineStatusError `json:"errorReason,omitempty"`
// ErrorMessage will be set in the event that there is a terminal problem
// reconciling the Machine and will contain a more verbose string suitable
// for logging and human consumption.
//
// This field should not be set for transitive errors that a controller
// faces that are expected to be fixed automatically over
// time (like service outages), but instead indicate that something is
// fundamentally wrong with the Machine's spec or the configuration of
// the controller, and that manual intervention is required. Examples
// of terminal errors would be invalid combinations of settings in the
// spec, values that are unsupported by the controller, or the
// responsible controller itself being critically misconfigured.
//
// Any transient errors that occur during the reconciliation of Machines
// can be added as events to the Machine object and/or logged in the
// controller's output.
// +optional
ErrorMessage *string `json:"errorMessage,omitempty"`
}
// +kubebuilder:object:root=true
// +kubebuilder:resource:path=awsmachines,scope=Namespaced,categories=cluster-api
// +kubebuilder:subresource:status
// AWSMachine is the Schema for the awsmachines API
type AWSMachine struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec AWSMachineSpec `json:"spec,omitempty"`
Status AWSMachineStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// AWSMachineList contains a list of AWSMachine
type AWSMachineList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []AWSMachine `json:"items"`
}
func init() {
SchemeBuilder.Register(&AWSMachine{}, &AWSMachineList{})
}
| 1 | 13,624 | I'm wondering if we need to add some type of validation here that the SecretCount != 0... Or do we think it would be valid to have a SecretCount of 0? | kubernetes-sigs-cluster-api-provider-aws | go |
@@ -3752,7 +3752,7 @@ describe('Cursor', function () {
db.s.topology,
db.s.namespace,
{},
- { limit: 0, skip: 0, slaveOk: false, readPreference: 42 }
+ { limit: 0, skip: 0, secondaryOk: false, readPreference: 42 }
);
cursor.hasNext(err => { | 1 | 'use strict';
const { assert: test, filterForCommands, withClient, withMonitoredClient } = require('./shared');
const { setupDatabase } = require('./shared');
const fs = require('fs');
const os = require('os');
const path = require('path');
const { expect } = require('chai');
const BSON = require('bson');
const sinon = require('sinon');
const { Writable } = require('stream');
const { ReadPreference } = require('../../src/read_preference');
const { ServerType } = require('../../src/sdam/common');
const { formatSort } = require('../../src/sort');
const { FindCursor } = require('../../src/cursor/find_cursor');
describe('Cursor', function () {
before(function () {
return setupDatabase(this.configuration, [
'cursorkilltest1',
'cursor_session_tests',
'cursor_session_tests2'
]);
});
it('cursorShouldBeAbleToResetOnToArrayRunningQueryAgain', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
db.createCollection('test_to_a', (err, collection) => {
expect(err).to.not.exist;
collection.insert({ a: 1 }, configuration.writeConcernMax(), err => {
expect(err).to.not.exist;
const cursor = collection.find({});
this.defer(() => cursor.close());
cursor.toArray(err => {
expect(err).to.not.exist;
// Should fail if called again (cursor should be closed)
cursor.toArray(err => {
expect(err).to.not.exist;
// Should fail if called again (cursor should be closed)
cursor.forEach(
() => {},
err => {
expect(err).to.not.exist;
done();
}
);
});
});
});
});
});
}
});
it('cursor should close after first next operation', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
db.createCollection('close_on_next', (err, collection) => {
expect(err).to.not.exist;
collection.insert(
[{ a: 1 }, { a: 1 }, { a: 1 }],
configuration.writeConcernMax(),
err => {
expect(err).to.not.exist;
var cursor = collection.find({});
this.defer(() => cursor.close());
cursor.batchSize(2);
cursor.next(err => {
expect(err).to.not.exist;
done();
});
}
);
});
});
}
});
it('cursor should trigger getMore', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
db.createCollection('trigger_get_more', (err, collection) => {
expect(err).to.not.exist;
collection.insert(
[{ a: 1 }, { a: 1 }, { a: 1 }],
configuration.writeConcernMax(),
err => {
expect(err).to.not.exist;
const cursor = collection.find({}).batchSize(2);
this.defer(() => cursor.close());
cursor.toArray(err => {
expect(err).to.not.exist;
done();
});
}
);
});
});
}
});
it('shouldCorrectlyExecuteCursorExplain', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
db.createCollection('test_explain', (err, collection) => {
expect(err).to.not.exist;
collection.insert({ a: 1 }, configuration.writeConcernMax(), err => {
expect(err).to.not.exist;
collection.find({ a: 1 }).explain((err, explanation) => {
expect(err).to.not.exist;
expect(explanation).to.exist;
done();
});
});
});
});
}
});
it('shouldCorrectlyExecuteCursorCount', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
db.createCollection('test_count', (err, collection) => {
expect(err).to.not.exist;
collection.find().count(err => {
expect(err).to.not.exist;
function insert(callback) {
var total = 10;
for (var i = 0; i < 10; i++) {
collection.insert({ x: i }, configuration.writeConcernMax(), e => {
expect(e).to.not.exist;
total = total - 1;
if (total === 0) callback();
});
}
}
function finished() {
collection.find().count((err, count) => {
expect(err).to.not.exist;
test.equal(10, count);
test.ok(count.constructor === Number);
collection.find({}, { limit: 5 }).count((err, count) => {
expect(err).to.not.exist;
test.equal(5, count);
collection.find({}, { skip: 5 }).count((err, count) => {
expect(err).to.not.exist;
test.equal(5, count);
db.collection('acollectionthatdoesn').count((err, count) => {
expect(err).to.not.exist;
test.equal(0, count);
var cursor = collection.find();
cursor.count((err, count) => {
expect(err).to.not.exist;
test.equal(10, count);
cursor.forEach(
() => {},
err => {
expect(err).to.not.exist;
cursor.count((err, count2) => {
expect(err).to.not.exist;
expect(count2).to.equal(10);
expect(count2).to.equal(count);
done();
});
}
);
});
});
});
});
});
}
insert(function () {
finished();
});
});
});
});
}
});
it('Should correctly execute cursor count with secondary readPreference', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: 'replicaset' }
},
test: function (done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), {
maxPoolSize: 1,
monitorCommands: true
});
const bag = [];
client.on('commandStarted', filterForCommands(['count'], bag));
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
const cursor = db.collection('countTEST').find({ qty: { $gt: 4 } });
cursor.count({ readPreference: ReadPreference.SECONDARY }, err => {
expect(err).to.not.exist;
const selectedServerAddress = bag[0].address.replace('127.0.0.1', 'localhost');
const selectedServer = client.topology.description.servers.get(selectedServerAddress);
expect(selectedServer).property('type').to.equal(ServerType.RSSecondary);
done();
});
});
}
});
it('shouldCorrectlyExecuteCursorCountWithDottedCollectionName', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
db.createCollection('test_count.ext', (err, collection) => {
expect(err).to.not.exist;
collection.find().count(err => {
expect(err).to.not.exist;
function insert(callback) {
var total = 10;
for (var i = 0; i < 10; i++) {
collection.insert({ x: i }, configuration.writeConcernMax(), e => {
expect(e).to.not.exist;
total = total - 1;
if (total === 0) callback();
});
}
}
function finished() {
collection.find().count((err, count) => {
expect(err).to.not.exist;
test.equal(10, count);
test.ok(count.constructor === Number);
collection.find({}, { limit: 5 }).count((err, count) => {
expect(err).to.not.exist;
test.equal(5, count);
collection.find({}, { skip: 5 }).count((err, count) => {
expect(err).to.not.exist;
test.equal(5, count);
db.collection('acollectionthatdoesn').count((err, count) => {
expect(err).to.not.exist;
test.equal(0, count);
var cursor = collection.find();
cursor.count((err, count) => {
expect(err).to.not.exist;
test.equal(10, count);
cursor.forEach(
() => {},
err => {
expect(err).to.not.exist;
cursor.count((err, count2) => {
expect(err).to.not.exist;
expect(count2).to.equal(10);
expect(count2).to.equal(count);
done();
});
}
);
});
});
});
});
});
}
insert(function () {
finished();
});
});
});
});
}
});
it('shouldThrowErrorOnEachWhenMissingCallback', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
db.createCollection('test_each', (err, collection) => {
expect(err).to.not.exist;
function insert(callback) {
var total = 10;
for (var i = 0; i < 10; i++) {
collection.insert({ x: i }, configuration.writeConcernMax(), e => {
expect(e).to.not.exist;
total = total - 1;
if (total === 0) callback();
});
}
}
function finished() {
const cursor = collection.find();
test.throws(function () {
cursor.forEach();
});
done();
}
insert(function () {
finished();
});
});
});
}
});
it('shouldCorrectlyHandleLimitOnCursor', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
db.createCollection('test_cursor_limit', (err, collection) => {
function insert(callback) {
var total = 10;
for (var i = 0; i < 10; i++) {
collection.insert({ x: i }, configuration.writeConcernMax(), e => {
expect(e).to.not.exist;
total = total - 1;
if (total === 0) callback();
});
}
}
function finished() {
collection
.find()
.limit(5)
.toArray((err, items) => {
test.equal(5, items.length);
// Let's close the db
expect(err).to.not.exist;
done();
});
}
insert(function () {
finished();
});
});
});
}
});
it('shouldCorrectlyHandleNegativeOneLimitOnCursor', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
db.createCollection('test_cursor_negative_one_limit', (err, collection) => {
expect(err).to.not.exist;
function insert(callback) {
var total = 10;
for (var i = 0; i < 10; i++) {
collection.insert({ x: i }, configuration.writeConcernMax(), e => {
expect(e).to.not.exist;
total = total - 1;
if (total === 0) callback();
});
}
}
function finished() {
collection
.find()
.limit(-1)
.toArray((err, items) => {
expect(err).to.not.exist;
test.equal(1, items.length);
// Let's close the db
done();
});
}
insert(function () {
finished();
});
});
});
}
});
it('shouldCorrectlyHandleAnyNegativeLimitOnCursor', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
db.createCollection('test_cursor_any_negative_limit', (err, collection) => {
expect(err).to.not.exist;
function insert(callback) {
var total = 10;
for (var i = 0; i < 10; i++) {
collection.insert({ x: i }, configuration.writeConcernMax(), e => {
expect(e).to.not.exist;
total = total - 1;
if (total === 0) callback();
});
}
}
function finished() {
collection
.find()
.limit(-5)
.toArray((err, items) => {
expect(err).to.not.exist;
test.equal(5, items.length);
// Let's close the db
done();
});
}
insert(function () {
finished();
});
});
});
}
});
it('shouldCorrectlyReturnErrorsOnIllegalLimitValuesNotAnInt', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
db.createCollection('test_limit_exceptions_2', (err, collection) => {
expect(err).to.not.exist;
collection.insert({ a: 1 }, configuration.writeConcernMax(), err => {
expect(err).to.not.exist;
const cursor = collection.find();
this.defer(() => cursor.close());
try {
cursor.limit('not-an-integer');
} catch (err) {
test.equal('Operation "limit" requires an integer', err.message);
}
done();
});
});
});
}
});
it('shouldCorrectlyReturnErrorsOnIllegalLimitValuesIsClosedWithinNext', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
db.createCollection('test_limit_exceptions', (err, collection) => {
expect(err).to.not.exist;
collection.insert({ a: 1 }, configuration.writeConcernMax(), err => {
expect(err).to.not.exist;
const cursor = collection.find();
this.defer(() => cursor.close());
cursor.next(err => {
expect(err).to.not.exist;
expect(() => {
cursor.limit(1);
}).to.throw(/Cursor is already initialized/);
done();
});
});
});
});
}
});
// NOTE: who cares what you set when the cursor is closed?
it.skip('shouldCorrectlyReturnErrorsOnIllegalLimitValuesIsClosedWithinClose', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
db.createCollection('test_limit_exceptions_1', (err, collection) => {
expect(err).to.not.exist;
collection.insert({ a: 1 }, configuration.writeConcernMax(), err => {
expect(err).to.not.exist;
const cursor = collection.find();
cursor.close(err => {
expect(err).to.not.exist;
expect(() => {
cursor.limit(1);
}).to.throw(/not extensible/);
done();
});
});
});
});
}
});
it('shouldCorrectlySkipRecordsOnCursor', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
db.createCollection('test_skip', (err, collection) => {
expect(err).to.not.exist;
const insert = callback => {
var total = 10;
for (var i = 0; i < 10; i++) {
collection.insert({ x: i }, configuration.writeConcernMax(), e => {
expect(e).to.not.exist;
total = total - 1;
if (total === 0) callback();
});
}
};
insert(() => {
const cursor = collection.find();
this.defer(() => cursor.close());
cursor.count((err, count) => {
expect(err).to.not.exist;
test.equal(10, count);
});
const cursor2 = collection.find();
this.defer(() => cursor2.close());
cursor2.toArray((err, items) => {
expect(err).to.not.exist;
test.equal(10, items.length);
collection
.find()
.skip(2)
.toArray((err, items2) => {
expect(err).to.not.exist;
test.equal(8, items2.length);
// Check that we have the same elements
var numberEqual = 0;
var sliced = items.slice(2, 10);
for (var i = 0; i < sliced.length; i++) {
if (sliced[i].x === items2[i].x) numberEqual = numberEqual + 1;
}
test.equal(8, numberEqual);
done();
});
});
});
});
});
}
});
it('shouldCorrectlyReturnErrorsOnIllegalSkipValues', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
db.createCollection('test_skip_exceptions', (err, collection) => {
expect(err).to.not.exist;
collection.insert({ a: 1 }, configuration.writeConcernMax(), err => {
expect(err).to.not.exist;
try {
collection.find().skip('not-an-integer');
} catch (err) {
test.equal('Operation "skip" requires an integer', err.message);
}
const cursor = collection.find();
cursor.next(err => {
expect(err).to.not.exist;
// NOTE: who cares what you set when closed, if not initialized
// expect(() => {
// cursor.skip(1);
// }).to.throw(/not extensible/);
const cursor2 = collection.find();
cursor2.close(err => {
expect(err).to.not.exist;
// NOTE: who cares what you set when closed, if not initialized
// expect(() => {
// cursor2.skip(1);
// }).to.throw(/not extensible/);
done();
});
});
});
});
});
}
});
it('shouldReturnErrorsOnIllegalBatchSizes', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
db.createCollection('test_batchSize_exceptions', (err, collection) => {
expect(err).to.not.exist;
collection.insert({ a: 1 }, configuration.writeConcernMax(), err => {
expect(err).to.not.exist;
let cursor = collection.find();
try {
cursor.batchSize('not-an-integer');
test.ok(false);
} catch (err) {
test.equal('Operation "batchSize" requires an integer', err.message);
}
cursor = collection.find();
cursor.next(err => {
expect(err).to.not.exist;
cursor.next(err => {
expect(err).to.not.exist;
// NOTE: who cares what you set when closed, if not initialized
// expect(() => {
// cursor.batchSize(1);
// }).to.throw(/not extensible/);
const cursor2 = collection.find();
cursor2.close(err => {
expect(err).to.not.exist;
// NOTE: who cares what you set when closed, if not initialized
// expect(() => {
// cursor2.batchSize(1);
// }).to.throw(/not extensible/);
done();
});
});
});
});
});
});
}
});
it('shouldCorrectlyHandleBatchSize', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
db.createCollection('test_multiple_batch_size', (err, collection) => {
expect(err).to.not.exist;
//test with the last batch that is a multiple of batchSize
var records = 4;
var batchSize = 2;
var docs = [];
for (var i = 0; i < records; i++) {
docs.push({ a: i });
}
collection.insert(docs, configuration.writeConcernMax(), err => {
expect(err).to.not.exist;
const cursor = collection.find({}, { batchSize: batchSize });
//1st
cursor.next((err, items) => {
expect(err).to.not.exist;
test.equal(1, cursor.bufferedCount());
test.ok(items != null);
//2nd
cursor.next((err, items) => {
expect(err).to.not.exist;
test.equal(0, cursor.bufferedCount());
test.ok(items != null);
//3rd
cursor.next((err, items) => {
expect(err).to.not.exist;
test.equal(1, cursor.bufferedCount());
test.ok(items != null);
//4th
cursor.next((err, items) => {
expect(err).to.not.exist;
test.equal(0, cursor.bufferedCount());
test.ok(items != null);
//No more
cursor.next((err, items) => {
expect(err).to.not.exist;
test.ok(items == null);
test.ok(cursor.closed);
done();
});
});
});
});
});
});
});
});
}
});
it('shouldHandleWhenLimitBiggerThanBatchSize', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
db.createCollection('test_limit_greater_than_batch_size', (err, collection) => {
expect(err).to.not.exist;
var limit = 4;
var records = 10;
var batchSize = 3;
var docs = [];
for (var i = 0; i < records; i++) {
docs.push({ a: i });
}
collection.insert(docs, configuration.writeConcernMax(), err => {
expect(err).to.not.exist;
var cursor = collection.find({}, { batchSize: batchSize, limit: limit });
//1st
cursor.next(err => {
expect(err).to.not.exist;
test.equal(2, cursor.bufferedCount());
//2nd
cursor.next(err => {
expect(err).to.not.exist;
test.equal(1, cursor.bufferedCount());
//3rd
cursor.next(err => {
expect(err).to.not.exist;
test.equal(0, cursor.bufferedCount());
//4th
cursor.next(err => {
expect(err).to.not.exist;
//No more
cursor.next((err, items) => {
expect(err).to.not.exist;
test.ok(items == null);
test.ok(cursor.closed);
done();
});
});
});
});
});
});
});
});
}
});
it('shouldHandleLimitLessThanBatchSize', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
db.createCollection('test_limit_less_than_batch_size', (err, collection) => {
expect(err).to.not.exist;
var limit = 2;
var records = 10;
var batchSize = 4;
var docs = [];
for (var i = 0; i < records; i++) {
docs.push({ a: i });
}
collection.insert(docs, configuration.writeConcernMax(), err => {
expect(err).to.not.exist;
var cursor = collection.find({}, { batchSize: batchSize, limit: limit });
//1st
cursor.next(err => {
expect(err).to.not.exist;
test.equal(1, cursor.bufferedCount());
//2nd
cursor.next(err => {
expect(err).to.not.exist;
test.equal(0, cursor.bufferedCount());
//No more
cursor.next((err, items) => {
expect(err).to.not.exist;
test.ok(items == null);
test.ok(cursor.closed);
done();
});
});
});
});
});
});
}
});
it('shouldHandleSkipLimitChaining', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
var collection = db.collection('shouldHandleSkipLimitChaining');
function insert(callback) {
var total = 10;
for (var i = 0; i < 10; i++) {
collection.insert({ x: i }, configuration.writeConcernMax(), e => {
expect(e).to.not.exist;
total = total - 1;
if (total === 0) callback();
});
}
}
function finished() {
collection.find().toArray((err, items) => {
expect(err).to.not.exist;
test.equal(10, items.length);
collection
.find()
.limit(5)
.skip(3)
.toArray(function (err, items2) {
expect(err).to.not.exist;
test.equal(5, items2.length);
// Check that we have the same elements
var numberEqual = 0;
var sliced = items.slice(3, 8);
for (var i = 0; i < sliced.length; i++) {
if (sliced[i].x === items2[i].x) numberEqual = numberEqual + 1;
}
test.equal(5, numberEqual);
done();
});
});
}
insert(function () {
finished();
});
});
}
});
it('shouldCorrectlyHandleLimitSkipChainingInline', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
db.createCollection('test_limit_skip_chaining_inline', (err, collection) => {
expect(err).to.not.exist;
function insert(callback) {
var total = 10;
for (var i = 0; i < 10; i++) {
collection.insert({ x: i }, configuration.writeConcernMax(), e => {
expect(e).to.not.exist;
total = total - 1;
if (total === 0) callback();
});
}
}
function finished() {
collection.find().toArray((err, items) => {
expect(err).to.not.exist;
test.equal(10, items.length);
collection
.find()
.limit(5)
.skip(3)
.toArray(function (err, items2) {
expect(err).to.not.exist;
test.equal(5, items2.length);
// Check that we have the same elements
var numberEqual = 0;
var sliced = items.slice(3, 8);
for (var i = 0; i < sliced.length; i++) {
if (sliced[i].x === items2[i].x) numberEqual = numberEqual + 1;
}
test.equal(5, numberEqual);
done();
});
});
}
insert(function () {
finished();
});
});
});
}
});
it('shouldCloseCursorNoQuerySent', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
db.createCollection('test_close_no_query_sent', (err, collection) => {
expect(err).to.not.exist;
const cursor = collection.find();
cursor.close(err => {
expect(err).to.not.exist;
test.equal(true, cursor.closed);
done();
});
});
});
}
});
it('shouldCorrectlyRefillViaGetMoreCommand', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
var COUNT = 1000;
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
db.createCollection('test_refill_via_get_more', (err, collection) => {
expect(err).to.not.exist;
function insert(callback) {
var docs = [];
for (var i = 0; i < COUNT; i++) {
docs.push({ a: i });
}
collection.insertMany(docs, configuration.writeConcernMax(), callback);
}
function finished() {
collection.count((err, count) => {
expect(err).to.not.exist;
test.equal(COUNT, count);
});
var total = 0;
collection.find({}, {}).forEach(
item => {
total = total + item.a;
},
err => {
expect(err).to.not.exist;
test.equal(499500, total);
collection.count((err, count) => {
expect(err).to.not.exist;
test.equal(COUNT, count);
});
collection.count((err, count) => {
expect(err).to.not.exist;
test.equal(COUNT, count);
var total2 = 0;
collection.find().forEach(
item => {
total2 = total2 + item.a;
},
err => {
expect(err).to.not.exist;
test.equal(499500, total2);
collection.count((err, count) => {
expect(err).to.not.exist;
test.equal(COUNT, count);
test.equal(total, total2);
done();
});
}
);
});
}
);
}
insert(function () {
finished();
});
});
});
}
});
it('shouldCorrectlyRefillViaGetMoreAlternativeCollection', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
db.createCollection('test_refill_via_get_more_alt_coll', (err, collection) => {
expect(err).to.not.exist;
var COUNT = 1000;
function insert(callback) {
var docs = [];
for (var i = 0; i < COUNT; i++) {
docs.push({ a: i });
}
collection.insertMany(docs, configuration.writeConcernMax(), callback);
}
function finished() {
collection.count((err, count) => {
expect(err).to.not.exist;
test.equal(1000, count);
});
var total = 0;
collection.find().forEach(
doc => {
total = total + doc.a;
},
err => {
expect(err).to.not.exist;
test.equal(499500, total);
collection.count((err, count) => {
expect(err).to.not.exist;
test.equal(1000, count);
});
collection.count((err, count) => {
expect(err).to.not.exist;
test.equal(1000, count);
var total2 = 0;
collection.find().forEach(
doc => {
total2 = total2 + doc.a;
},
err => {
expect(err).to.not.exist;
expect(total2).to.equal(499500);
collection.count((err, count) => {
expect(err).to.not.exist;
expect(count).to.equal(1000);
expect(total2).to.equal(total);
done();
});
}
);
});
}
);
}
insert(function () {
finished();
});
});
});
}
});
it('shouldCloseCursorAfterQueryHasBeenSent', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
db.createCollection('test_close_after_query_sent', (err, collection) => {
expect(err).to.not.exist;
collection.insert({ a: 1 }, configuration.writeConcernMax(), err => {
expect(err).to.not.exist;
const cursor = collection.find({ a: 1 });
cursor.next(err => {
expect(err).to.not.exist;
cursor.close(err => {
expect(err).to.not.exist;
test.equal(true, cursor.closed);
done();
});
});
});
});
});
}
});
it('shouldCorrectlyExecuteCursorCountWithFields', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
db.createCollection('test_count_with_fields', (err, collection) => {
expect(err).to.not.exist;
collection.insertOne({ x: 1, a: 2 }, configuration.writeConcernMax(), err => {
expect(err).to.not.exist;
collection
.find({})
.project({ a: 1 })
.toArray((err, items) => {
expect(err).to.not.exist;
test.equal(1, items.length);
test.equal(2, items[0].a);
expect(items[0].x).to.not.exist;
done();
});
});
});
});
}
});
it('shouldCorrectlyCountWithFieldsUsingExclude', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
db.createCollection('test_count_with_fields_using_exclude', (err, collection) => {
expect(err).to.not.exist;
collection.insertOne({ x: 1, a: 2 }, configuration.writeConcernMax(), err => {
expect(err).to.not.exist;
collection.find({}, { projection: { x: 0 } }).toArray((err, items) => {
expect(err).to.not.exist;
test.equal(1, items.length);
test.equal(2, items[0].a);
expect(items[0].x).to.not.exist;
done();
});
});
});
});
}
});
it('Should correctly execute count on cursor', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
var docs = [];
for (var i = 0; i < 1000; i++) {
var d = new Date().getTime() + i * 1000;
docs[i] = { a: i, createdAt: new Date(d) };
}
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
db.createCollection('Should_correctly_execute_count_on_cursor_1', (err, collection) => {
expect(err).to.not.exist;
// insert all docs
collection.insert(docs, configuration.writeConcernMax(), err => {
expect(err).to.not.exist;
let total = 0;
// Create a cursor for the content
const cursor = collection.find({});
this.defer(() => cursor.close());
cursor.count(err => {
expect(err).to.not.exist;
// Ensure each returns all documents
cursor.forEach(
() => {
total++;
},
err => {
expect(err).to.not.exist;
cursor.count((err, c) => {
expect(err).to.not.exist;
expect(c).to.equal(1000);
expect(total).to.equal(1000);
done();
});
}
);
});
});
});
});
}
});
it('does not auto destroy streams', function (done) {
const docs = [];
for (var i = 0; i < 10; i++) {
docs.push({ a: i + 1 });
}
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
const db = client.db(configuration.db);
db.createCollection('does_not_autodestroy_streams', (err, collection) => {
expect(err).to.not.exist;
collection.insertMany(docs, configuration.writeConcernMax(), err => {
expect(err).to.not.exist;
const cursor = collection.find();
const stream = cursor.stream();
stream.on('close', () => {
expect.fail('extra close event must not be called');
});
stream.on('end', () => {
client.close();
done();
});
stream.on('data', doc => {
expect(doc).to.exist;
});
stream.resume();
});
});
});
});
it('should be able to stream documents', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
var docs = [];
for (var i = 0; i < 1000; i++) {
docs[i] = { a: i + 1 };
}
var count = 0;
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
db.createCollection('Should_be_able_to_stream_documents', (err, collection) => {
expect(err).to.not.exist;
// insert all docs
collection.insert(docs, configuration.writeConcernMax(), err => {
expect(err).to.not.exist;
var paused = 0,
closed = 0,
resumed = 0,
i = 0;
const cursor = collection.find();
const stream = cursor.stream();
stream.on('data', function (doc) {
test.equal(true, !!doc);
test.equal(true, !!doc.a);
count = count + 1;
if (paused > 0 && 0 === resumed) {
err = new Error('data emitted during pause');
return testDone();
}
if (++i === 3) {
stream.pause();
paused++;
setTimeout(function () {
stream.resume();
resumed++;
}, 20);
}
});
stream.once('error', function (er) {
err = er;
testDone();
});
stream.once('end', function () {
closed++;
testDone();
});
function testDone() {
expect(err).to.not.exist;
test.equal(i, docs.length);
test.equal(1, closed);
test.equal(1, paused);
test.equal(1, resumed);
test.strictEqual(cursor.closed, true);
done();
}
});
});
});
}
});
it('immediately destroying a stream prevents the query from executing', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
var i = 0,
docs = [{ b: 2 }, { b: 3 }],
doneCalled = 0;
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
db.createCollection(
'immediately_destroying_a_stream_prevents_the_query_from_executing',
(err, collection) => {
expect(err).to.not.exist;
// insert all docs
collection.insertMany(docs, configuration.writeConcernMax(), err => {
expect(err).to.not.exist;
const cursor = collection.find();
const stream = cursor.stream();
stream.on('data', function () {
i++;
});
cursor.once('close', testDone('close'));
stream.once('error', testDone('error'));
stream.destroy();
function testDone() {
return err => {
++doneCalled;
if (doneCalled === 1) {
expect(err).to.not.exist;
test.strictEqual(0, i);
test.strictEqual(true, cursor.closed);
done();
}
};
}
});
}
);
});
}
});
it('removes session wheen cloning a find cursor', function (done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
const db = client.db(configuration.db);
db.createCollection('clone_find_cursor_session', (err, collection) => {
expect(err).to.not.exist;
collection.insertOne({ a: 1 }, configuration.writeConcernMax(), err => {
expect(err).to.not.exist;
const cursor = collection.find();
const clonedCursor = cursor.clone();
cursor.toArray(err => {
expect(err).to.not.exist;
clonedCursor.toArray(err => {
expect(err).to.not.exist;
client.close();
done();
});
});
});
});
});
});
it('removes session wheen cloning an aggregation cursor', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
const db = client.db(configuration.db);
db.createCollection('clone_aggregation_cursor_session', (err, collection) => {
expect(err).to.not.exist;
collection.insertOne({ a: 1 }, configuration.writeConcernMax(), err => {
expect(err).to.not.exist;
const cursor = collection.aggregate([{ $match: { a: 1 } }]);
const clonedCursor = cursor.clone();
cursor.toArray(err => {
expect(err).to.not.exist;
clonedCursor.toArray(err => {
expect(err).to.not.exist;
client.close();
done();
});
});
});
});
});
}
});
it('destroying a stream stops it', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
db.createCollection('destroying_a_stream_stops_it', (err, collection) => {
expect(err).to.not.exist;
var docs = [];
for (var ii = 0; ii < 10; ++ii) docs.push({ b: ii + 1 });
// insert all docs
collection.insert(docs, configuration.writeConcernMax(), err => {
expect(err).to.not.exist;
var finished = 0,
i = 0;
const cursor = collection.find();
const stream = cursor.stream();
test.strictEqual(false, cursor.closed);
stream.on('data', function () {
if (++i === 5) {
stream.destroy();
}
});
cursor.once('close', testDone);
stream.once('error', testDone);
stream.once('end', testDone);
function testDone(err) {
++finished;
if (finished === 2) {
test.strictEqual(undefined, err);
test.strictEqual(5, i);
test.strictEqual(2, finished);
test.strictEqual(true, cursor.closed);
done();
}
}
});
});
});
}
});
// NOTE: skipped for use of topology manager
it.skip('cursor stream errors', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: { requires: { topology: ['single'] } },
test: function (done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
db.createCollection('cursor_stream_errors', (err, collection) => {
expect(err).to.not.exist;
var docs = [];
for (var ii = 0; ii < 10; ++ii) docs.push({ b: ii + 1 });
// insert all docs
collection.insert(docs, configuration.writeConcernMax(), err => {
expect(err).to.not.exist;
var finished = 0,
i = 0;
const cursor = collection.find({}, { batchSize: 5 });
const stream = cursor.stream();
stream.on('data', function () {
if (++i === 4) {
// Force restart
configuration.manager.stop(9);
}
});
stream.once('close', testDone('close'));
stream.once('error', testDone('error'));
function testDone() {
return function () {
++finished;
if (finished === 2) {
setTimeout(function () {
test.equal(5, i);
test.equal(true, cursor.closed);
client.close();
configuration.manager.start().then(function () {
done();
});
}, 150);
}
};
}
});
});
});
}
});
it('cursor stream pipe', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
db.createCollection('cursor_stream_pipe', (err, collection) => {
expect(err).to.not.exist;
var docs = [];
'Aaden Aaron Adrian Aditya Bob Joe'.split(' ').forEach(function (name) {
docs.push({ name: name });
});
// insert all docs
collection.insertMany(docs, configuration.writeConcernMax(), err => {
expect(err).to.not.exist;
const filename = path.join(os.tmpdir(), '_nodemongodbnative_stream_out.txt');
const out = fs.createWriteStream(filename);
const stream = collection.find().stream({
transform: doc => JSON.stringify(doc)
});
stream.pipe(out);
// Wait for output stream to close
out.on('close', testDone);
function testDone(err) {
// Object.prototype.toString = toString;
test.strictEqual(undefined, err);
var contents = fs.readFileSync(filename, 'utf8');
test.ok(/Aaden/.test(contents));
test.ok(/Aaron/.test(contents));
test.ok(/Adrian/.test(contents));
test.ok(/Aditya/.test(contents));
test.ok(/Bob/.test(contents));
test.ok(/Joe/.test(contents));
fs.unlinkSync(filename);
done();
}
});
});
});
}
});
it('should close dead tailable cursors', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] },
sessions: { skipLeakTests: true },
os: '!win32' // NODE-2943: timeout on windows
},
test: function (done) {
// http://www.mongodb.org/display/DOCS/Tailable+Cursors
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
const options = { capped: true, size: 10000000 };
db.createCollection(
'test_if_dead_tailable_cursors_close',
options,
function (err, collection) {
expect(err).to.not.exist;
let closeCount = 0;
const docs = Array.from({ length: 100 }).map(() => ({ a: 1 }));
collection.insertMany(docs, { w: 'majority', wtimeoutMS: 5000 }, err => {
expect(err).to.not.exist;
const cursor = collection.find({}, { tailable: true, awaitData: true });
const stream = cursor.stream();
stream.resume();
var validator = () => {
closeCount++;
if (closeCount === 2) {
done();
}
};
// we validate that the stream "ends" either cleanly or with an error
stream.on('end', validator);
stream.on('error', validator);
cursor.on('close', validator);
const docs = Array.from({ length: 100 }).map(() => ({ a: 1 }));
collection.insertMany(docs, err => {
expect(err).to.not.exist;
setTimeout(() => client.close());
});
});
}
);
});
}
});
it('shouldAwaitData', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
// http://www.mongodb.org/display/DOCS/Tailable+Cursors
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
const options = { capped: true, size: 8 };
db.createCollection(
'should_await_data_retry_tailable_cursor',
options,
(err, collection) => {
expect(err).to.not.exist;
collection.insert({ a: 1 }, configuration.writeConcernMax(), err => {
expect(err).to.not.exist;
// Create cursor with awaitData, and timeout after the period specified
const cursor = collection.find({}, { tailable: true, awaitData: true });
this.defer(() => cursor.close());
// Execute each
cursor.forEach(
() => cursor.close(),
() => {
// Even though cursor is exhausted, should not close session
// unless cursor is manually closed, due to awaitData / tailable
done();
}
);
});
}
);
});
}
});
it('shouldAwaitDataWithDocumentsAvailable', function (done) {
// http://www.mongodb.org/display/DOCS/Tailable+Cursors
const configuration = this.configuration;
const client = configuration.newClient({ maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
const options = { capped: true, size: 8 };
db.createCollection('should_await_data_no_docs', options, (err, collection) => {
expect(err).to.not.exist;
// Create cursor with awaitData, and timeout after the period specified
const cursor = collection.find({}, { tailable: true, awaitData: true });
this.defer(() => cursor.close());
cursor.forEach(
() => {},
err => {
expect(err).to.not.exist;
done();
}
);
});
});
});
// NOTE: should we continue to let users explicitly `kill` a cursor?
it.skip('Should correctly retry tailable cursor connection', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
// http://www.mongodb.org/display/DOCS/Tailable+Cursors
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
const options = { capped: true, size: 8 };
db.createCollection('should_await_data', options, (err, collection) => {
expect(err).to.not.exist;
collection.insert({ a: 1 }, configuration.writeConcernMax(), err => {
expect(err).to.not.exist;
// Create cursor with awaitData, and timeout after the period specified
var cursor = collection.find({}, { tailable: true, awaitData: true });
cursor.forEach(
() => cursor.kill(),
() => {
// kill cursor b/c cursor is tailable / awaitable
cursor.close(done);
}
);
});
});
});
}
});
it('shouldCorrectExecuteExplainHonoringLimit', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
var docs = [];
docs[0] = {
_keywords: [
'compact',
'ii2gd',
'led',
'24-48v',
'presse-etoupe',
'bexbgl1d24483',
'flash',
'48v',
'eexd',
'feu',
'presse',
'compris',
'rouge',
'etoupe',
'iic',
'ii2gdeexdiict5',
'red',
'aet'
]
};
docs[1] = {
_keywords: [
'reducteur',
'06212',
'd20/16',
'manch',
'd20',
'manchon',
'ard',
'sable',
'irl',
'red'
]
};
docs[2] = {
_keywords: [
'reducteur',
'06214',
'manch',
'd25/20',
'd25',
'manchon',
'ard',
'sable',
'irl',
'red'
]
};
docs[3] = {
_keywords: [
'bar',
'rac',
'boite',
'6790178',
'50-240/4-35',
'240',
'branch',
'coulee',
'ddc',
'red',
'ip2x'
]
};
docs[4] = {
_keywords: [
'bar',
'ip2x',
'boite',
'6790158',
'ddi',
'240',
'branch',
'injectee',
'50-240/4-35?',
'red'
]
};
docs[5] = {
_keywords: [
'bar',
'ip2x',
'boite',
'6790179',
'coulee',
'240',
'branch',
'sdc',
'50-240/4-35?',
'red',
'rac'
]
};
docs[6] = {
_keywords: [
'bar',
'ip2x',
'boite',
'6790159',
'240',
'branch',
'injectee',
'50-240/4-35?',
'sdi',
'red'
]
};
docs[7] = {
_keywords: [
'6000',
'r-6000',
'resin',
'high',
'739680',
'red',
'performance',
'brd',
'with',
'ribbon',
'flanges'
]
};
docs[8] = { _keywords: ['804320', 'for', 'paint', 'roads', 'brd', 'red'] };
docs[9] = { _keywords: ['38mm', 'padlock', 'safety', '813594', 'brd', 'red'] };
docs[10] = { _keywords: ['114551', 'r6900', 'for', 'red', 'bmp71', 'brd', 'ribbon'] };
docs[11] = {
_keywords: ['catena', 'diameter', '621482', 'rings', 'brd', 'legend', 'red', '2mm']
};
docs[12] = {
_keywords: ['catena', 'diameter', '621491', 'rings', '5mm', 'brd', 'legend', 'red']
};
docs[13] = {
_keywords: ['catena', 'diameter', '621499', 'rings', '3mm', 'brd', 'legend', 'red']
};
docs[14] = {
_keywords: ['catena', 'diameter', '621508', 'rings', '5mm', 'brd', 'legend', 'red']
};
docs[15] = {
_keywords: [
'insert',
'for',
'cable',
'3mm',
'carrier',
'621540',
'blank',
'brd',
'ademark',
'red'
]
};
docs[16] = {
_keywords: [
'insert',
'for',
'cable',
'621544',
'3mm',
'carrier',
'brd',
'ademark',
'legend',
'red'
]
};
docs[17] = {
_keywords: ['catena', 'diameter', '6mm', '621518', 'rings', 'brd', 'legend', 'red']
};
docs[18] = {
_keywords: ['catena', 'diameter', '621455', '8mm', 'rings', 'brd', 'legend', 'red']
};
docs[19] = {
_keywords: ['catena', 'diameter', '621464', 'rings', '5mm', 'brd', 'legend', 'red']
};
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
// Insert all the docs
var collection = db.collection('shouldCorrectExecuteExplainHonoringLimit');
collection.insert(docs, configuration.writeConcernMax(), err => {
expect(err).to.not.exist;
collection.createIndex({ _keywords: 1 }, configuration.writeConcernMax(), err => {
expect(err).to.not.exist;
collection
.find({ _keywords: 'red' })
.limit(10)
.toArray(function (err, result) {
expect(err).to.not.exist;
test.ok(result != null);
collection
.find({ _keywords: 'red' }, {})
.limit(10)
.explain(function (err, result) {
expect(err).to.not.exist;
test.ok(result != null);
done();
});
});
});
});
});
}
});
it('shouldNotExplainWhenFalse', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
var doc = { name: 'camera', _keywords: ['compact', 'ii2gd', 'led', 'red', 'aet'] };
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
var collection = db.collection('shouldNotExplainWhenFalse');
collection.insert(doc, configuration.writeConcernMax(), err => {
expect(err).to.not.exist;
collection
.find({ _keywords: 'red' })
.limit(10)
.toArray(function (err, result) {
expect(err).to.not.exist;
test.equal('camera', result[0].name);
done();
});
});
});
}
});
it('shouldFailToSetReadPreferenceOnCursor', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
try {
db.collection('shouldFailToSetReadPreferenceOnCursor')
.find()
.withReadPreference('notsecondary');
test.ok(false);
} catch (err) {} // eslint-disable-line
db.collection('shouldFailToSetReadPreferenceOnCursor')
.find()
.withReadPreference('secondary');
done();
});
}
});
it('should allow setting the cursors readConcern through a builder', {
metadata: { requires: { mongodb: '>=3.2' } },
test: withMonitoredClient(['find'], function (client, events, done) {
const db = client.db(this.configuration.db);
const cursor = db.collection('foo').find().withReadConcern('local');
expect(cursor).property('readConcern').to.have.property('level').equal('local');
cursor.toArray(err => {
expect(err).to.not.exist;
expect(events).to.have.length(1);
const findCommand = events[0];
expect(findCommand).nested.property('command.readConcern').to.eql({ level: 'local' });
done();
});
})
});
it('shouldNotFailDueToStackOverflowEach', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
db.createCollection('shouldNotFailDueToStackOverflowEach', (err, collection) => {
expect(err).to.not.exist;
var docs = [];
var total = 0;
for (var i = 0; i < 30000; i++) docs.push({ a: i });
var allDocs = [];
var left = 0;
while (docs.length > 0) {
allDocs.push(docs.splice(0, 1000));
}
// Get all batches we must insert
left = allDocs.length;
var totalI = 0;
// Execute inserts
for (i = 0; i < left; i++) {
collection.insert(allDocs.shift(), configuration.writeConcernMax(), function (err, d) {
expect(err).to.not.exist;
left = left - 1;
totalI = totalI + d.length;
if (left === 0) {
collection.find({}).forEach(
() => {
total++;
},
err => {
expect(err).to.not.exist;
expect(total).to.equal(30000);
done();
}
);
}
});
}
});
});
}
});
it('shouldNotFailDueToStackOverflowToArray', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
db.createCollection('shouldNotFailDueToStackOverflowToArray', (err, collection) => {
expect(err).to.not.exist;
var docs = [];
for (var i = 0; i < 30000; i++) docs.push({ a: i });
var allDocs = [];
var left = 0;
while (docs.length > 0) {
allDocs.push(docs.splice(0, 1000));
}
// Get all batches we must insert
left = allDocs.length;
var totalI = 0;
var timeout = 0;
// Execute inserts
for (i = 0; i < left; i++) {
setTimeout(function () {
collection.insert(
allDocs.shift(),
configuration.writeConcernMax(),
function (err, d) {
expect(err).to.not.exist;
left = left - 1;
totalI = totalI + d.length;
if (left === 0) {
collection.find({}).toArray((err, items) => {
expect(err).to.not.exist;
test.equal(30000, items.length);
done();
});
}
}
);
}, timeout);
timeout = timeout + 100;
}
});
});
}
});
it('shouldCorrectlySkipAndLimit', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
var collection = db.collection('shouldCorrectlySkipAndLimit');
var docs = [];
for (var i = 0; i < 100; i++) docs.push({ a: i, OrderNumber: i });
collection.insert(docs, configuration.writeConcernMax(), err => {
expect(err).to.not.exist;
collection
.find({}, { OrderNumber: 1 })
.skip(10)
.limit(10)
.toArray((err, items) => {
expect(err).to.not.exist;
test.equal(10, items[0].OrderNumber);
collection
.find({}, { OrderNumber: 1 })
.skip(10)
.limit(10)
.count((err, count) => {
expect(err).to.not.exist;
test.equal(10, count);
done();
});
});
});
});
}
});
it('shouldFailToTailANormalCollection', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
var collection = db.collection('shouldFailToTailANormalCollection');
var docs = [];
for (var i = 0; i < 100; i++) docs.push({ a: i, OrderNumber: i });
collection.insert(docs, configuration.writeConcernMax(), err => {
expect(err).to.not.exist;
const cursor = collection.find({}, { tailable: true });
cursor.forEach(
() => {},
err => {
test.ok(err instanceof Error);
test.ok(typeof err.code === 'number');
// Close cursor b/c we did not exhaust cursor
cursor.close();
done();
}
);
});
});
}
});
it('shouldCorrectlyUseFindAndCursorCount', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
// DOC_LINE var client = new MongoClient(new Server('localhost', 27017));
// DOC_START
// Establish connection to db
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
// Create a lot of documents to insert
var docs = [];
for (var i = 0; i < 100; i++) {
docs.push({ a: i });
}
// Create a collection
db.createCollection('test_close_function_on_cursor_2', (err, collection) => {
expect(err).to.not.exist;
// Insert documents into collection
collection.insert(docs, configuration.writeConcernMax(), err => {
expect(err).to.not.exist;
const cursor = collection.find({});
cursor.count((err, count) => {
expect(err).to.not.exist;
test.equal(100, count);
done();
});
});
});
});
// DOC_END
}
});
it('should correctly apply hint to count command for cursor', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: {
topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'],
mongodb: '>2.5.5'
}
},
test: function (done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
// DOC_LINE var client = new MongoClient(new Server('localhost', 27017));
// DOC_START
// Establish connection to db
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
var col = db.collection('count_hint');
col.insert([{ i: 1 }, { i: 2 }], { writeConcern: { w: 1 } }, err => {
expect(err).to.not.exist;
col.createIndex({ i: 1 }, err => {
expect(err).to.not.exist;
col.find({ i: 1 }, { hint: '_id_' }).count((err, count) => {
expect(err).to.not.exist;
test.equal(1, count);
col.find({}, { hint: '_id_' }).count((err, count) => {
expect(err).to.not.exist;
test.equal(2, count);
col.find({ i: 1 }, { hint: 'BAD HINT' }).count(err => {
test.ok(err != null);
col.createIndex({ x: 1 }, { sparse: true }, err => {
expect(err).to.not.exist;
col.find({ i: 1 }, { hint: 'x_1' }).count((err, count) => {
expect(err).to.not.exist;
test.equal(0, count);
col.find({}, { hint: 'i_1' }).count((err, count) => {
expect(err).to.not.exist;
test.equal(2, count);
done();
});
});
});
});
});
});
});
});
});
// DOC_END
}
});
it('Terminate each after first document by returning false', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
// Create a lot of documents to insert
var docs = [];
for (var i = 0; i < 100; i++) {
docs.push({ a: i });
}
// Create a collection
db.createCollection('terminate_each_returning_false', (err, collection) => {
expect(err).to.not.exist;
// Insert documents into collection
collection.insert(docs, configuration.writeConcernMax(), err => {
expect(err).to.not.exist;
var finished = false;
collection.find({}).forEach(
doc => {
expect(doc).to.exist;
test.equal(finished, false);
finished = true;
done();
return false;
},
err => {
expect(err).to.not.exist;
}
);
});
});
});
}
});
it('Should correctly handle maxTimeMS as part of findOne options', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
var donkey = {
color: 'brown'
};
db.collection('donkies').insertOne(donkey, function (err, result) {
expect(err).to.not.exist;
var query = { _id: result.insertedId };
var options = { maxTimeMS: 1000 };
db.collection('donkies').findOne(query, options, function (err, doc) {
expect(err).to.not.exist;
test.equal('brown', doc.color);
done();
});
});
});
}
});
it('Should correctly handle batchSize of 2', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
const collectionName = 'should_correctly_handle_batchSize_2';
db.collection(collectionName).insert([{ x: 1 }, { x: 2 }, { x: 3 }], err => {
expect(err).to.not.exist;
const cursor = db.collection(collectionName).find({}, { batchSize: 2 });
this.defer(() => cursor.close());
cursor.next(err => {
expect(err).to.not.exist;
cursor.next(err => {
expect(err).to.not.exist;
cursor.next(err => {
expect(err).to.not.exist;
done();
});
});
});
});
});
}
});
it('Should report database name and collection name', {
metadata: { requires: { topology: ['single'] } },
test: function (done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
const cursor = db.collection('myCollection').find({});
test.equal('myCollection', cursor.namespace.collection);
test.equal('integration_tests', cursor.namespace.db);
done();
});
}
});
it('Should correctly execute count on cursor with maxTimeMS', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
var docs = [];
for (var i = 0; i < 1000; i++) {
var d = new Date().getTime() + i * 1000;
docs[i] = { a: i, createdAt: new Date(d) };
}
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
db.createCollection(
'Should_correctly_execute_count_on_cursor_2',
function (err, collection) {
expect(err).to.not.exist;
// insert all docs
collection.insert(docs, configuration.writeConcernMax(), err => {
expect(err).to.not.exist;
// Create a cursor for the content
var cursor = collection.find({});
cursor.limit(100);
cursor.skip(10);
cursor.count({ maxTimeMS: 1000 }, err => {
expect(err).to.not.exist;
// Create a cursor for the content
var cursor = collection.find({});
cursor.limit(100);
cursor.skip(10);
cursor.maxTimeMS(100);
cursor.count(err => {
expect(err).to.not.exist;
done();
});
});
});
}
);
});
}
});
it('Should correctly execute count on cursor with maxTimeMS set using legacy method', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
var docs = [];
for (var i = 0; i < 1000; i++) {
var d = new Date().getTime() + i * 1000;
docs[i] = { a: i, createdAt: new Date(d) };
}
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
db.createCollection(
'Should_correctly_execute_count_on_cursor_3',
function (err, collection) {
expect(err).to.not.exist;
// insert all docs
collection.insert(docs, configuration.writeConcernMax(), err => {
expect(err).to.not.exist;
// Create a cursor for the content
var cursor = collection.find({}, { maxTimeMS: 100 });
cursor.toArray(err => {
expect(err).to.not.exist;
done();
});
});
}
);
});
}
});
it('Should correctly apply map to toArray', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
var docs = [];
for (var i = 0; i < 1000; i++) {
var d = new Date().getTime() + i * 1000;
docs[i] = { a: i, createdAt: new Date(d) };
}
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
var collection = db.collection('map_toArray');
// insert all docs
collection.insert(docs, configuration.writeConcernMax(), err => {
expect(err).to.not.exist;
// Create a cursor for the content
var cursor = collection
.find({})
.map(function () {
return { a: 1 };
})
.batchSize(5)
.limit(10);
cursor.toArray(function (err, docs) {
expect(err).to.not.exist;
test.equal(10, docs.length);
// Ensure all docs where mapped
docs.forEach(doc => {
expect(doc).property('a').to.equal(1);
});
done();
});
});
});
}
});
it('Should correctly apply map to next', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
const docs = [];
for (var i = 0; i < 1000; i++) {
const d = new Date().getTime() + i * 1000;
docs[i] = { a: i, createdAt: new Date(d) };
}
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
const collection = db.collection('map_next');
// insert all docs
collection.insert(docs, configuration.writeConcernMax(), err => {
expect(err).to.not.exist;
// Create a cursor for the content
const cursor = collection
.find({})
.map(function () {
return { a: 1 };
})
.batchSize(5)
.limit(10);
this.defer(() => cursor.close());
cursor.next((err, doc) => {
expect(err).to.not.exist;
test.equal(1, doc.a);
done();
});
});
});
}
});
it('Should correctly apply map to each', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
var docs = [];
for (var i = 0; i < 1000; i++) {
var d = new Date().getTime() + i * 1000;
docs[i] = { a: i, createdAt: new Date(d) };
}
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
const collection = db.collection('map_each');
// insert all docs
collection.insert(docs, configuration.writeConcernMax(), err => {
expect(err).to.not.exist;
// Create a cursor for the content
var cursor = collection
.find({})
.map(function () {
return { a: 1 };
})
.batchSize(5)
.limit(10);
cursor.forEach(
doc => {
test.equal(1, doc.a);
},
err => {
expect(err).to.not.exist;
done();
}
);
});
});
}
});
it('Should correctly apply map to forEach', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
var docs = [];
for (var i = 0; i < 1000; i++) {
var d = new Date().getTime() + i * 1000;
docs[i] = { a: i, createdAt: new Date(d) };
}
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
var collection = db.collection('map_forEach');
// insert all docs
collection.insert(docs, configuration.writeConcernMax(), err => {
expect(err).to.not.exist;
// Create a cursor for the content
var cursor = collection
.find({})
.map(function () {
return { a: 2 };
})
.map(function (x) {
return { a: x.a * x.a };
})
.batchSize(5)
.limit(10);
cursor.forEach(
doc => {
test.equal(4, doc.a);
},
err => {
expect(err).to.not.exist;
done();
}
);
});
});
}
});
it('Should correctly apply multiple uses of map and apply forEach', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
var docs = [];
for (var i = 0; i < 1000; i++) {
var d = new Date().getTime() + i * 1000;
docs[i] = { a: i, createdAt: new Date(d) };
}
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
var collection = db.collection('map_mapmapforEach');
// insert all docs
collection.insert(docs, configuration.writeConcernMax(), err => {
expect(err).to.not.exist;
// Create a cursor for the content
var cursor = collection
.find({})
.map(function () {
return { a: 1 };
})
.batchSize(5)
.limit(10);
cursor.forEach(
doc => {
expect(doc).property('a').to.equal(1);
},
err => {
expect(err).to.not.exist;
done();
}
);
});
});
}
});
it('Should correctly apply skip and limit to large set of documents', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: { requires: { topology: ['single', 'replicaset', 'ssl', 'heap', 'wiredtiger'] } },
test: function (done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
var collection = db.collection('cursor_limit_skip_correctly');
// Insert x number of docs
var ordered = collection.initializeUnorderedBulkOp();
for (var i = 0; i < 6000; i++) {
ordered.insert({ a: i });
}
ordered.execute({ writeConcern: { w: 1 } }, err => {
expect(err).to.not.exist;
// Let's attempt to skip and limit
collection
.find({})
.limit(2016)
.skip(2016)
.toArray(function (err, docs) {
expect(err).to.not.exist;
test.equal(2016, docs.length);
done();
});
});
});
}
});
it('should tail cursor using maxAwaitTimeMS for 3.2 or higher', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: { requires: { topology: ['single'], mongodb: '>3.1.9' } },
test: function (done) {
const configuration = this.configuration;
const client = configuration.newClient();
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
var options = { capped: true, size: 8 };
db.createCollection(
'should_await_data_max_awaittime_ms',
options,
function (err, collection) {
expect(err).to.not.exist;
collection.insert({ a: 1 }, configuration.writeConcernMax(), err => {
expect(err).to.not.exist;
// Create cursor with awaitData, and timeout after the period specified
var cursor = collection
.find({})
.addCursorFlag('tailable', true)
.addCursorFlag('awaitData', true)
.maxAwaitTimeMS(500);
const s = new Date();
cursor.forEach(
() => {
setTimeout(() => cursor.close(), 300);
},
() => {
test.ok(new Date().getTime() - s.getTime() >= 500);
done();
}
);
});
}
);
});
}
});
it('Should not emit any events after close event emitted due to cursor killed', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: { requires: { topology: ['single', 'replicaset', 'ssl', 'heap', 'wiredtiger'] } },
test: function (done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
var collection = db.collection('cursor_limit_skip_correctly');
// Insert x number of docs
var ordered = collection.initializeUnorderedBulkOp();
for (var i = 0; i < 100; i++) {
ordered.insert({ a: i });
}
ordered.execute({ writeConcern: { w: 1 } }, err => {
expect(err).to.not.exist;
// Let's attempt to skip and limit
var cursor = collection.find({}).batchSize(10);
const stream = cursor.stream();
stream.on('data', function () {
stream.destroy();
});
cursor.on('close', function () {
done();
});
});
});
}
});
it('shouldCorrectlyExecuteEnsureIndexWithNoCallback', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
var docs = [];
for (var i = 0; i < 1; i++) {
var d = new Date().getTime() + i * 1000;
docs[i] = { createdAt: new Date(d) };
}
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
db.createCollection(
'shouldCorrectlyExecuteEnsureIndexWithNoCallback',
function (err, collection) {
expect(err).to.not.exist;
// ensure index of createdAt index
collection.createIndex({ createdAt: 1 }, err => {
expect(err).to.not.exist;
// insert all docs
collection.insert(docs, configuration.writeConcernMax(), err => {
expect(err).to.not.exist;
// Find with sort
collection
.find()
.sort(['createdAt', 'asc'])
.toArray((err, items) => {
expect(err).to.not.exist;
test.equal(1, items.length);
done();
});
});
});
}
);
});
}
});
it('Should correctly execute count on cursor with limit and skip', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
var docs = [];
for (var i = 0; i < 50; i++) {
var d = new Date().getTime() + i * 1000;
docs[i] = { a: i, createdAt: new Date(d) };
}
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
db.createCollection('negative_batch_size_and_limit_set', (err, collection) => {
expect(err).to.not.exist;
// insert all docs
collection.insert(docs, configuration.writeConcernMax(), err => {
expect(err).to.not.exist;
// Create a cursor for the content
var cursor = collection.find({});
cursor
.limit(100)
.skip(0)
.count(function (err, c) {
expect(err).to.not.exist;
test.equal(50, c);
var cursor = collection.find({});
cursor
.limit(100)
.skip(0)
.toArray(err => {
expect(err).to.not.exist;
test.equal(50, c);
done();
});
});
});
});
});
}
});
it('Should correctly handle negative batchSize and set the limit', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
var docs = [];
const configuration = this.configuration;
for (var i = 0; i < 50; i++) {
var d = new Date().getTime() + i * 1000;
docs[i] = { a: i, createdAt: new Date(d) };
}
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
db.createCollection(
'Should_correctly_execute_count_on_cursor_1_',
function (err, collection) {
expect(err).to.not.exist;
// insert all docs
collection.insert(docs, configuration.writeConcernMax(), err => {
expect(err).to.not.exist;
// Create a cursor for the content
var cursor = collection.find({});
cursor.batchSize(-10).next(err => {
expect(err).to.not.exist;
test.ok(cursor.id.equals(BSON.Long.ZERO));
done();
});
});
}
);
});
}
});
it('Correctly decorate the cursor count command with skip, limit, hint, readConcern', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
var started = [];
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), {
maxPoolSize: 1,
monitorCommands: true
});
client.on('commandStarted', function (event) {
if (event.commandName === 'count') started.push(event);
});
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
db.collection('cursor_count_test', { readConcern: { level: 'local' } })
.find({ project: '123' })
.limit(5)
.skip(5)
.hint({ project: 1 })
.count(err => {
expect(err).to.not.exist;
test.equal(1, started.length);
if (started[0].command.readConcern)
test.deepEqual({ level: 'local' }, started[0].command.readConcern);
test.deepEqual({ project: 1 }, started[0].command.hint);
test.equal(5, started[0].command.skip);
test.equal(5, started[0].command.limit);
done();
});
});
}
});
it.skip('Correctly decorate the collection count command with skip, limit, hint, readConcern', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
var started = [];
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.on('commandStarted', function (event) {
if (event.commandName === 'count') started.push(event);
});
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
db.collection('cursor_count_test1', { readConcern: { level: 'local' } }).count(
{
project: '123'
},
{
readConcern: { level: 'local' },
limit: 5,
skip: 5,
hint: { project: 1 }
},
err => {
expect(err).to.not.exist;
test.equal(1, started.length);
if (started[0].command.readConcern)
test.deepEqual({ level: 'local' }, started[0].command.readConcern);
test.deepEqual({ project: 1 }, started[0].command.hint);
test.equal(5, started[0].command.skip);
test.equal(5, started[0].command.limit);
done();
}
);
});
}
});
// NOTE: should we allow users to explicitly `kill` a cursor anymore?
it.skip('Should properly kill a cursor', {
metadata: {
requires: {
topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'],
mongodb: '>=3.2.0'
}
},
test: function () {
// Load up the documents
const docs = [];
for (let i = 0; i < 1000; i += 1) {
docs.push({
a: i
});
}
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
let cleanup = () => {};
let caughtError = undefined;
return (
client
.connect()
.then(client => {
this.defer(() => client.close());
const db = client.db(configuration.db);
const collection = db.collection('cursorkilltest1');
// Insert 1000 documents
return collection.insert(docs).then(() => {
// Generate cursor for find operation
const cursor = collection.find({});
this.defer(() => cursor.close());
// Iterate cursor past first element
return cursor
.next()
.then(() => cursor.next())
.then(() => {
// Confirm that cursorId is non-zero
const longId = cursor.id;
expect(longId).to.be.an('object');
expect(Object.getPrototypeOf(longId)).to.haveOwnProperty('_bsontype', 'Long');
const id = longId.toNumber();
expect(id).to.not.equal(0);
// Kill cursor
return new Promise((resolve, reject) =>
cursor.kill((err, r) => (err ? reject(err) : resolve(r)))
).then(response => {
// sharded clusters will return a long, single return integers
if (
response &&
response.cursorsKilled &&
Array.isArray(response.cursorsKilled)
) {
response.cursorsKilled = response.cursorsKilled.map(id =>
typeof id === 'number' ? BSON.Long.fromNumber(id) : id
);
}
expect(response.ok).to.equal(1);
expect(response.cursorsKilled[0].equals(longId)).to.be.ok;
});
});
});
})
// Clean up. Make sure that even in case of error, we still always clean up connection
.catch(e => (caughtError = e))
.then(cleanup)
.then(() => {
if (caughtError) {
throw caughtError;
}
})
);
}
});
// NOTE: This is skipped because I don't think its correct or adds value. The expected error
// is not an error with hasNext (from server), but rather a local TypeError which should
// be caught anyway. The only solution here would be to wrap the entire top level call
// in a try/catch which is not going to happen.
it.skip('Should propagate hasNext errors when using a callback', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
const configuration = this.configuration;
var client = configuration.newClient({ w: 1 }, { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
const cursor = new FindCursor(
db.s.topology,
db.s.namespace,
{},
{ limit: 0, skip: 0, slaveOk: false, readPreference: 42 }
);
cursor.hasNext(err => {
test.ok(err !== null);
test.equal(err.message, 'readPreference must be a ReadPreference instance');
done();
});
});
}
});
it(
'should return implicit session to pool when client-side cursor exhausts results on initial query',
{
metadata: {
requires: {
topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'],
mongodb: '>=3.6.0'
}
},
test: function (done) {
const configuration = this.configuration;
const client = configuration.newClient({ w: 1 }, { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
const collection = db.collection('cursor_session_tests');
collection.insertMany([{ a: 1, b: 2 }], err => {
expect(err).to.not.exist;
const cursor = collection.find({});
cursor.next(function () {
test.equal(client.topology.s.sessions.size, 0);
done();
});
});
});
}
}
);
it(
'should return implicit session to pool when client-side cursor exhausts results after a getMore',
{
metadata: {
requires: {
topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'],
mongodb: '>=3.6.0'
}
},
test: function (done) {
const configuration = this.configuration;
const client = configuration.newClient({ w: 1 }, { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
const collection = db.collection('cursor_session_tests2');
const docs = [
{ a: 1, b: 2 },
{ a: 3, b: 4 },
{ a: 5, b: 6 },
{ a: 7, b: 8 },
{ a: 9, b: 10 }
];
collection.insertMany(docs, err => {
expect(err).to.not.exist;
const cursor = collection.find({}, { batchSize: 3 });
cursor.next(function () {
test.equal(client.topology.s.sessions.size, 1);
cursor.next(function () {
test.equal(client.topology.s.sessions.size, 1);
cursor.next(function () {
test.equal(client.topology.s.sessions.size, 1);
cursor.next(function () {
test.equal(client.topology.s.sessions.size, 0);
done();
});
});
});
});
});
});
}
}
);
describe('#clone', function () {
let client;
let db;
let collection;
beforeEach(function () {
client = this.configuration.newClient({ w: 1 });
return client.connect().then(client => {
db = client.db(this.configuration.db);
collection = db.collection('test_coll');
});
});
afterEach(function () {
return client.close();
});
context('when executing on a find cursor', function () {
it('removes the existing session from the cloned cursor', function () {
const docs = [{ name: 'test1' }, { name: 'test2' }];
return collection.insertMany(docs).then(() => {
const cursor = collection.find({}, { batchSize: 1 });
return cursor
.next()
.then(doc => {
expect(doc).to.exist;
const clonedCursor = cursor.clone();
expect(clonedCursor.cursorOptions.session).to.not.exist;
expect(clonedCursor.session).to.not.exist;
})
.finally(() => {
return cursor.close();
});
});
});
});
context('when executing on an aggregation cursor', function () {
it('removes the existing session from the cloned cursor', function () {
const docs = [{ name: 'test1' }, { name: 'test2' }];
return collection.insertMany(docs).then(() => {
const cursor = collection.aggregate([{ $match: {} }], { batchSize: 1 });
return cursor
.next()
.then(doc => {
expect(doc).to.exist;
const clonedCursor = cursor.clone();
expect(clonedCursor.cursorOptions.session).to.not.exist;
expect(clonedCursor.session).to.not.exist;
})
.finally(() => {
return cursor.close();
});
});
});
});
});
describe('Cursor forEach Error propagation', function () {
let configuration;
let client;
let cursor;
let collection;
beforeEach(async function () {
configuration = this.configuration;
client = configuration.newClient({ w: 1 }, { maxPoolSize: 1 });
await client.connect().catch(() => {
expect.fail('Failed to connect to client');
});
collection = client.db(configuration.db).collection('cursor_session_tests2');
});
afterEach(async function () {
await cursor.close();
await client.close();
});
// NODE-2035
it('should propagate error when exceptions are thrown from an awaited forEach call', async function () {
const docs = [{ unique_key_2035: 1 }, { unique_key_2035: 2 }, { unique_key_2035: 3 }];
await collection.insertMany(docs).catch(() => {
expect.fail('Failed to insert documents');
});
cursor = collection.find({
unique_key_2035: {
$exists: true
}
});
await cursor
.forEach(() => {
throw new Error('FAILURE IN FOREACH CALL');
})
.then(() => {
expect.fail('Error in forEach call not caught');
})
.catch(err => {
expect(err.message).to.deep.equal('FAILURE IN FOREACH CALL');
});
});
});
it('should return a promise when no callback supplied to forEach method', function () {
const configuration = this.configuration;
const client = configuration.newClient({ w: 1 }, { maxPoolSize: 1 });
return client.connect().then(() => {
this.defer(() => client.close());
const db = client.db(configuration.db);
const collection = db.collection('cursor_session_tests2');
const cursor = collection.find();
this.defer(() => cursor.close());
const promise = cursor.forEach(() => {});
expect(promise).to.exist.and.to.be.an.instanceof(Promise);
return promise;
});
});
it('should return false when exhausted and hasNext called more than once', function (done) {
const configuration = this.configuration;
const client = configuration.newClient({ w: 1 }, { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
db.createCollection('cursor_hasNext_test').then(() => {
const cursor = db.collection('cursor_hasNext_test').find();
this.defer(() => cursor.close());
cursor
.hasNext()
.then(val1 => {
expect(val1).to.equal(false);
return cursor.hasNext();
})
.then(val2 => {
expect(val2).to.equal(false);
done();
});
});
});
});
const testTransformStream = (config, _done) => {
const client = config.client;
const configuration = config.configuration;
const collectionName = config.collectionName;
const transformFunc = config.transformFunc;
const expectedSet = config.expectedSet;
let cursor;
const done = err => cursor.close(err2 => client.close(err3 => _done(err || err2 || err3)));
client.connect((err, client) => {
expect(err).to.not.exist;
const db = client.db(configuration.db);
let collection;
const docs = [
{ _id: 0, a: { b: 1, c: 0 } },
{ _id: 1, a: { b: 1, c: 0 } },
{ _id: 2, a: { b: 1, c: 0 } }
];
const resultSet = new Set();
const transformParam = transformFunc != null ? { transform: transformFunc } : null;
Promise.resolve()
.then(() => db.createCollection(collectionName))
.then(() => (collection = db.collection(collectionName)))
.then(() => collection.insertMany(docs))
.then(() => {
cursor = collection.find();
return cursor.stream(transformParam);
})
.then(stream => {
stream.on('data', function (doc) {
resultSet.add(doc);
});
stream.once('end', function () {
expect(resultSet).to.deep.equal(expectedSet);
done();
});
stream.once('error', e => {
done(e);
});
})
.catch(e => done(e));
});
};
it('stream should apply the supplied transformation function to each document in the stream', function (done) {
const configuration = this.configuration;
const client = configuration.newClient({ w: 1 }, { maxPoolSize: 1 });
const expectedDocs = [
{ _id: 0, b: 1, c: 0 },
{ _id: 1, b: 1, c: 0 },
{ _id: 2, b: 1, c: 0 }
];
const config = {
client: client,
configuration: configuration,
collectionName: 'stream-test-transform',
transformFunc: doc => ({ _id: doc._id, b: doc.a.b, c: doc.a.c }),
expectedSet: new Set(expectedDocs)
};
testTransformStream(config, done);
});
it('stream should return a stream of unmodified docs if no transform function applied', function (done) {
const configuration = this.configuration;
const client = configuration.newClient({ w: 1 }, { maxPoolSize: 1 });
const expectedDocs = [
{ _id: 0, a: { b: 1, c: 0 } },
{ _id: 1, a: { b: 1, c: 0 } },
{ _id: 2, a: { b: 1, c: 0 } }
];
const config = {
client: client,
configuration: configuration,
collectionName: 'transformStream-test-notransform',
transformFunc: null,
expectedSet: new Set(expectedDocs)
};
testTransformStream(config, done);
});
it.skip('should apply parent read preference to count command', function (done) {
// NOTE: this test is skipped because mongo orchestration does not test sharded clusters
// with secondaries. This behavior should be unit tested
const configuration = this.configuration;
const client = configuration.newClient(
{ w: 1, readPreference: ReadPreference.SECONDARY },
{ maxPoolSize: 1, connectWithNoPrimary: true }
);
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db(configuration.db);
let collection, cursor, spy;
const close = e => cursor.close(() => client.close(() => done(e)));
Promise.resolve()
.then(() => new Promise(resolve => setTimeout(() => resolve(), 500)))
.then(() => db.createCollection('test_count_readPreference'))
.then(() => (collection = db.collection('test_count_readPreference')))
.then(() => collection.find())
.then(_cursor => (cursor = _cursor))
.then(() => (spy = sinon.spy(cursor.topology, 'command')))
.then(() => cursor.count())
.then(() =>
expect(spy.firstCall.args[2])
.to.have.nested.property('readPreference.mode')
.that.equals('secondary')
)
.then(() => close())
.catch(e => close(e));
});
});
it('should not consume first document on hasNext when streaming', function (done) {
const configuration = this.configuration;
const client = configuration.newClient({ w: 1 }, { maxPoolSize: 1 });
client.connect(err => {
expect(err).to.not.exist;
this.defer(() => client.close());
const collection = client.db().collection('documents');
collection.drop(() => {
const docs = [{ a: 1 }, { a: 2 }, { a: 3 }];
collection.insertMany(docs, err => {
expect(err).to.not.exist;
const cursor = collection.find({}, { sort: { a: 1 } });
cursor.hasNext((err, hasNext) => {
expect(err).to.not.exist;
expect(hasNext).to.be.true;
const collected = [];
const stream = new Writable({
objectMode: true,
write: (chunk, encoding, next) => {
collected.push(chunk);
next(undefined, chunk);
}
});
const cursorStream = cursor.stream();
cursorStream.on('end', () => {
expect(collected).to.have.length(3);
expect(collected).to.eql(docs);
done();
});
cursorStream.pipe(stream);
});
});
});
});
});
describe('transforms', function () {
it('should correctly apply map transform to cursor as readable stream', function (done) {
const configuration = this.configuration;
const client = configuration.newClient();
client.connect(err => {
expect(err).to.not.exist;
this.defer(() => client.close());
const docs = 'Aaden Aaron Adrian Aditya Bob Joe'.split(' ').map(x => ({ name: x }));
const coll = client.db(configuration.db).collection('cursor_stream_mapping');
coll.insertMany(docs, err => {
expect(err).to.not.exist;
const bag = [];
const stream = coll
.find()
.project({ _id: 0, name: 1 })
.map(doc => ({ mapped: doc }))
.stream()
.on('data', doc => bag.push(doc));
stream.on('error', done).on('end', () => {
expect(bag.map(x => x.mapped)).to.eql(docs.map(x => ({ name: x.name })));
done();
});
});
});
});
it('should correctly apply map transform when converting cursor to array', function (done) {
const configuration = this.configuration;
const client = configuration.newClient();
client.connect(err => {
expect(err).to.not.exist;
this.defer(() => client.close());
const docs = 'Aaden Aaron Adrian Aditya Bob Joe'.split(' ').map(x => ({ name: x }));
const coll = client.db(configuration.db).collection('cursor_toArray_mapping');
coll.insertMany(docs, err => {
expect(err).to.not.exist;
coll
.find()
.project({ _id: 0, name: 1 })
.map(doc => ({ mapped: doc }))
.toArray((err, mappedDocs) => {
expect(err).to.not.exist;
expect(mappedDocs.map(x => x.mapped)).to.eql(docs.map(x => ({ name: x.name })));
done();
});
});
});
});
});
context('sort', function () {
const findSort = (input, output) =>
withMonitoredClient('find', function (client, events, done) {
const db = client.db('test');
const collection = db.collection('test_sort_dos');
const cursor = collection.find({}, { sort: input });
cursor.next(err => {
expect(err).to.not.exist;
expect(events[0].command.sort).to.be.instanceOf(Map);
expect(Array.from(events[0].command.sort)).to.deep.equal(Array.from(output));
cursor.close(done);
});
});
const cursorSort = (input, output) =>
withMonitoredClient('find', function (client, events, done) {
const db = client.db('test');
const collection = db.collection('test_sort_dos');
const cursor = collection.find({}).sort(input);
cursor.next(err => {
expect(err).to.not.exist;
expect(events[0].command.sort).to.be.instanceOf(Map);
expect(Array.from(events[0].command.sort)).to.deep.equal(Array.from(output));
cursor.close(done);
});
});
it('should use find options object', findSort({ alpha: 1 }, new Map([['alpha', 1]])));
it('should use find options string', findSort('alpha', new Map([['alpha', 1]])));
it('should use find options shallow array', findSort(['alpha', 1], new Map([['alpha', 1]])));
it('should use find options deep array', findSort([['alpha', 1]], new Map([['alpha', 1]])));
it('should use cursor.sort object', cursorSort({ alpha: 1 }, new Map([['alpha', 1]])));
it('should use cursor.sort string', cursorSort('alpha', new Map([['alpha', 1]])));
it('should use cursor.sort shallow array', cursorSort(['alpha', 1], new Map([['alpha', 1]])));
it('should use cursor.sort deep array', cursorSort([['alpha', 1]], new Map([['alpha', 1]])));
it('formatSort - one key', () => {
// TODO (NODE-3236): These are unit tests for a standalone function and should be moved out of the cursor context file
expect(formatSort('alpha')).to.deep.equal(new Map([['alpha', 1]]));
expect(formatSort(['alpha'])).to.deep.equal(new Map([['alpha', 1]]));
expect(formatSort('alpha', 1)).to.deep.equal(new Map([['alpha', 1]]));
expect(formatSort('alpha', 'asc')).to.deep.equal(new Map([['alpha', 1]]));
expect(formatSort([['alpha', 'asc']])).to.deep.equal(new Map([['alpha', 1]]));
expect(formatSort('alpha', 'ascending')).to.deep.equal(new Map([['alpha', 1]]));
expect(formatSort({ alpha: 1 })).to.deep.equal(new Map([['alpha', 1]]));
expect(formatSort('beta')).to.deep.equal(new Map([['beta', 1]]));
expect(formatSort(['beta'])).to.deep.equal(new Map([['beta', 1]]));
expect(formatSort('beta', -1)).to.deep.equal(new Map([['beta', -1]]));
expect(formatSort('beta', 'desc')).to.deep.equal(new Map([['beta', -1]]));
expect(formatSort('beta', 'descending')).to.deep.equal(new Map([['beta', -1]]));
expect(formatSort({ beta: -1 })).to.deep.equal(new Map([['beta', -1]]));
expect(formatSort({ alpha: { $meta: 'hi' } })).to.deep.equal(
new Map([['alpha', { $meta: 'hi' }]])
);
});
it('formatSort - multi key', () => {
expect(formatSort(['alpha', 'beta'])).to.deep.equal(
new Map([
['alpha', 1],
['beta', 1]
])
);
expect(formatSort({ alpha: 1, beta: 1 })).to.deep.equal(
new Map([
['alpha', 1],
['beta', 1]
])
);
expect(
formatSort([
['alpha', 'asc'],
['beta', 'ascending']
])
).to.deep.equal(
new Map([
['alpha', 1],
['beta', 1]
])
);
expect(
formatSort(
new Map([
['alpha', 'asc'],
['beta', 'ascending']
])
)
).to.deep.equal(
new Map([
['alpha', 1],
['beta', 1]
])
);
expect(
formatSort([
['3', 'asc'],
['1', 'ascending']
])
).to.deep.equal(
new Map([
['3', 1],
['1', 1]
])
);
expect(formatSort({ alpha: { $meta: 'hi' }, beta: 'ascending' })).to.deep.equal(
new Map([
['alpha', { $meta: 'hi' }],
['beta', 1]
])
);
});
it('should use allowDiskUse option on sort', {
metadata: { requires: { mongodb: '>=4.4' } },
test: withMonitoredClient('find', function (client, events, done) {
const db = client.db('test');
const collection = db.collection('test_sort_allow_disk_use');
const cursor = collection.find({}).sort(['alpha', 1]).allowDiskUse();
cursor.next(err => {
expect(err).to.not.exist;
const { command } = events.shift();
expect(command.sort).to.deep.equal(new Map([['alpha', 1]]));
expect(command.allowDiskUse).to.be.true;
cursor.close(done);
});
})
});
it('should error if allowDiskUse option used without sort', {
metadata: { requires: { mongodb: '>=4.4' } },
test: withClient(function (client, done) {
const db = client.db('test');
const collection = db.collection('test_sort_allow_disk_use');
expect(() => collection.find({}).allowDiskUse()).to.throw(
/Option "allowDiskUse" requires a sort specification/
);
done();
})
});
});
});
| 1 | 21,739 | This test is being skipped. I unskipped it, and it failed in the same way for both `slaveOk: false` and `secondaryOk: false`. I'm not sure how else to test this broken test. | mongodb-node-mongodb-native | js |
@@ -33,6 +33,8 @@ import (
// ResponseHeaders.
type OutboundCall struct {
// request attributes to fill if non-nil
+ to *Identifier
+ from *Identifier
headers []keyValuePair
shardKey *string
routingKey *string | 1 | // Copyright (c) 2018 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package yarpc
import (
"context"
"go.uber.org/yarpc/v2/yarpcerror"
)
// OutboundCall is an outgoing call. It holds per-call options for a request.
//
// Encoding authors may use OutboundCall to provide a CallOption-based request
// customization mechanism, including returning response headers through
// ResponseHeaders.
type OutboundCall struct {
// request attributes to fill if non-nil
headers []keyValuePair
shardKey *string
routingKey *string
routingDelegate *string
// If non-nil, response headers should be written here.
responseHeaders *map[string]string
}
// NewOutboundCall constructs a new OutboundCall with the given options.
func NewOutboundCall(options ...CallOption) *OutboundCall {
var call OutboundCall
for _, opt := range options {
opt.apply(&call)
}
return &call
}
// NewStreamOutboundCall constructs a new OutboundCall with the given
// options and enforces the OutboundCall is valid for streams.
func NewStreamOutboundCall(options ...CallOption) (*OutboundCall, error) {
call := NewOutboundCall(options...)
if call.responseHeaders != nil {
return nil, yarpcerror.InvalidArgumentErrorf("response headers are not supported for streams")
}
return call, nil
}
// WriteToRequest fills the given request with request-specific options from
// the call.
//
// The context MAY be replaced by the OutboundCall.
func (c *OutboundCall) WriteToRequest(ctx context.Context, req *Request) (context.Context, error) {
for _, h := range c.headers {
req.Headers = req.Headers.With(h.k, h.v)
}
if c.shardKey != nil {
req.ShardKey = *c.shardKey
}
if c.routingKey != nil {
req.RoutingKey = *c.routingKey
}
if c.routingDelegate != nil {
req.RoutingDelegate = *c.routingDelegate
}
// NB(abg): context and error are unused for now but we want to leave room
// for CallOptions which can fail or modify the context.
return ctx, nil
}
// ReadFromResponse reads information from the response for this call.
//
// This should be called only if the request is unary.
func (c *OutboundCall) ReadFromResponse(ctx context.Context, res *Response) (context.Context, error) {
// We're not using ctx right now but we may in the future.
if c.responseHeaders != nil && res.Headers.Len() > 0 {
// We make a copy of the response headers because Headers.Items() must
// never be mutated.
headers := make(map[string]string, res.Headers.Len())
for k, v := range res.Headers.Items() {
headers[k] = v
}
*c.responseHeaders = headers
}
// NB(abg): context and error are unused for now but we want to leave room
// for CallOptions which can fail or modify the context.
return ctx, nil
}
| 1 | 18,077 | I don't believe that we need a pointer to an interface for the `to` side of this since we're just assigning a value to the to field. | yarpc-yarpc-go | go |
@@ -74,13 +74,18 @@ def later(ms: int, command, win_id):
@cmdutils.register(maxsplit=1, no_cmd_split=True, no_replace_variables=True)
@cmdutils.argument('win_id', win_id=True)
-def repeat(times: int, command, win_id):
+@cmdutils.argument('count', count=True)
+def repeat(times: int, command, win_id, count=None):
"""Repeat a given command.
Args:
times: How many times to repeat.
command: The command to run, with optional args.
"""
+
+ if count is not None:
+ times *= count
+
if times < 0:
raise cmdexc.CommandError("A negative count doesn't make sense.")
commandrunner = runners.CommandRunner(win_id) | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2018 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Misc. utility commands exposed to the user."""
import functools
import os
import signal
import traceback
try:
import hunter
except ImportError:
hunter = None
from PyQt5.QtCore import QUrl
# so it's available for :debug-pyeval
from PyQt5.QtWidgets import QApplication # pylint: disable=unused-import
from qutebrowser.browser import qutescheme
from qutebrowser.utils import log, objreg, usertypes, message, debug, utils
from qutebrowser.commands import cmdutils, runners, cmdexc
from qutebrowser.config import config, configdata
from qutebrowser.misc import consolewidget
from qutebrowser.utils.version import pastebin_version
from qutebrowser.qt import sip
@cmdutils.register(maxsplit=1, no_cmd_split=True, no_replace_variables=True)
@cmdutils.argument('win_id', win_id=True)
def later(ms: int, command, win_id):
"""Execute a command after some time.
Args:
ms: How many milliseconds to wait.
command: The command to run, with optional args.
"""
if ms < 0:
raise cmdexc.CommandError("I can't run something in the past!")
commandrunner = runners.CommandRunner(win_id)
app = objreg.get('app')
timer = usertypes.Timer(name='later', parent=app)
try:
timer.setSingleShot(True)
try:
timer.setInterval(ms)
except OverflowError:
raise cmdexc.CommandError("Numeric argument is too large for "
"internal int representation.")
timer.timeout.connect(
functools.partial(commandrunner.run_safely, command))
timer.timeout.connect(timer.deleteLater)
timer.start()
except:
timer.deleteLater()
raise
@cmdutils.register(maxsplit=1, no_cmd_split=True, no_replace_variables=True)
@cmdutils.argument('win_id', win_id=True)
def repeat(times: int, command, win_id):
"""Repeat a given command.
Args:
times: How many times to repeat.
command: The command to run, with optional args.
"""
if times < 0:
raise cmdexc.CommandError("A negative count doesn't make sense.")
commandrunner = runners.CommandRunner(win_id)
for _ in range(times):
commandrunner.run_safely(command)
@cmdutils.register(maxsplit=1, no_cmd_split=True, no_replace_variables=True)
@cmdutils.argument('win_id', win_id=True)
@cmdutils.argument('count', count=True)
def run_with_count(count_arg: int, command, win_id, count=1):
"""Run a command with the given count.
If run_with_count itself is run with a count, it multiplies count_arg.
Args:
count_arg: The count to pass to the command.
command: The command to run, with optional args.
count: The count that run_with_count itself received.
"""
runners.CommandRunner(win_id).run(command, count_arg * count)
@cmdutils.register()
def message_error(text):
"""Show an error message in the statusbar.
Args:
text: The text to show.
"""
message.error(text)
@cmdutils.register()
@cmdutils.argument('count', count=True)
def message_info(text, count=1):
"""Show an info message in the statusbar.
Args:
text: The text to show.
count: How many times to show the message
"""
for _ in range(count):
message.info(text)
@cmdutils.register()
def message_warning(text):
"""Show a warning message in the statusbar.
Args:
text: The text to show.
"""
message.warning(text)
@cmdutils.register()
def clear_messages():
"""Clear all message notifications."""
message.global_bridge.clear_messages.emit()
@cmdutils.register(debug=True)
@cmdutils.argument('typ', choices=['exception', 'segfault'])
def debug_crash(typ='exception'):
"""Crash for debugging purposes.
Args:
typ: either 'exception' or 'segfault'.
"""
if typ == 'segfault':
os.kill(os.getpid(), signal.SIGSEGV)
raise Exception("Segfault failed (wat.)")
else:
raise Exception("Forced crash")
@cmdutils.register(debug=True)
def debug_all_objects():
"""Print a list of all objects to the debug log."""
s = debug.get_all_objects()
log.misc.debug(s)
@cmdutils.register(debug=True)
def debug_cache_stats():
"""Print LRU cache stats."""
prefix_info = configdata.is_valid_prefix.cache_info()
# pylint: disable=protected-access
render_stylesheet_info = config._render_stylesheet.cache_info()
# pylint: enable=protected-access
history_info = None
try:
from PyQt5.QtWebKit import QWebHistoryInterface
interface = QWebHistoryInterface.defaultInterface()
if interface is not None:
history_info = interface.historyContains.cache_info()
except ImportError:
pass
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window='last-focused')
# pylint: disable=protected-access
tab_bar = tabbed_browser.widget.tabBar()
tabbed_browser_info = tab_bar._minimum_tab_size_hint_helper.cache_info()
# pylint: enable=protected-access
log.misc.info('is_valid_prefix: {}'.format(prefix_info))
log.misc.info('_render_stylesheet: {}'.format(render_stylesheet_info))
log.misc.info('history: {}'.format(history_info))
log.misc.info('tab width cache: {}'.format(tabbed_browser_info))
@cmdutils.register(debug=True)
def debug_console():
"""Show the debugging console."""
try:
con_widget = objreg.get('debug-console')
except KeyError:
log.misc.debug('initializing debug console')
con_widget = consolewidget.ConsoleWidget()
objreg.register('debug-console', con_widget)
if con_widget.isVisible():
log.misc.debug('hiding debug console')
con_widget.hide()
else:
log.misc.debug('showing debug console')
con_widget.show()
@cmdutils.register(debug=True, maxsplit=0, no_cmd_split=True)
def debug_trace(expr=""):
"""Trace executed code via hunter.
Args:
expr: What to trace, passed to hunter.
"""
if hunter is None:
raise cmdexc.CommandError("You need to install 'hunter' to use this "
"command!")
try:
eval('hunter.trace({})'.format(expr))
except Exception as e:
raise cmdexc.CommandError("{}: {}".format(e.__class__.__name__, e))
@cmdutils.register(maxsplit=0, debug=True, no_cmd_split=True)
def debug_pyeval(s, file=False, quiet=False):
"""Evaluate a python string and display the results as a web page.
Args:
s: The string to evaluate.
file: Interpret s as a path to file, also implies --quiet.
quiet: Don't show the output in a new tab.
"""
if file:
quiet = True
path = os.path.expanduser(s)
try:
with open(path, 'r', encoding='utf-8') as f:
s = f.read()
except OSError as e:
raise cmdexc.CommandError(str(e))
try:
exec(s)
out = "No error"
except Exception:
out = traceback.format_exc()
else:
try:
r = eval(s)
out = repr(r)
except Exception:
out = traceback.format_exc()
qutescheme.pyeval_output = out
if quiet:
log.misc.debug("pyeval output: {}".format(out))
else:
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window='last-focused')
tabbed_browser.openurl(QUrl('qute://pyeval'), newtab=True)
@cmdutils.register(debug=True)
def debug_set_fake_clipboard(s=None):
"""Put data into the fake clipboard and enable logging, used for tests.
Args:
s: The text to put into the fake clipboard, or unset to enable logging.
"""
if s is None:
utils.log_clipboard = True
else:
utils.fake_clipboard = s
@cmdutils.register()
@cmdutils.argument('win_id', win_id=True)
@cmdutils.argument('count', count=True)
def repeat_command(win_id, count=None):
"""Repeat the last executed command.
Args:
count: Which count to pass the command.
"""
mode_manager = objreg.get('mode-manager', scope='window', window=win_id)
if mode_manager.mode not in runners.last_command:
raise cmdexc.CommandError("You didn't do anything yet.")
cmd = runners.last_command[mode_manager.mode]
commandrunner = runners.CommandRunner(win_id)
commandrunner.run(cmd[0], count if count is not None else cmd[1])
@cmdutils.register(debug=True, name='debug-log-capacity')
def log_capacity(capacity: int):
"""Change the number of log lines to be stored in RAM.
Args:
capacity: Number of lines for the log.
"""
if capacity < 0:
raise cmdexc.CommandError("Can't set a negative log capacity!")
else:
log.ram_handler.change_log_capacity(capacity)
@cmdutils.register(debug=True)
@cmdutils.argument('level', choices=sorted(
(level.lower() for level in log.LOG_LEVELS),
key=lambda e: log.LOG_LEVELS[e.upper()]))
def debug_log_level(level: str):
"""Change the log level for console logging.
Args:
level: The log level to set.
"""
log.change_console_formatter(log.LOG_LEVELS[level.upper()])
log.console_handler.setLevel(log.LOG_LEVELS[level.upper()])
@cmdutils.register(debug=True)
def debug_log_filter(filters: str):
"""Change the log filter for console logging.
Args:
filters: A comma separated list of logger names. Can also be "none" to
clear any existing filters.
"""
if log.console_filter is None:
raise cmdexc.CommandError("No log.console_filter. Not attached "
"to a console?")
if filters.strip().lower() == 'none':
log.console_filter.names = None
return
if not set(filters.split(',')).issubset(log.LOGGER_NAMES):
raise cmdexc.CommandError("filters: Invalid value {} - expected one "
"of: {}".format(filters,
', '.join(log.LOGGER_NAMES)))
log.console_filter.names = filters.split(',')
@cmdutils.register()
@cmdutils.argument('current_win_id', win_id=True)
def window_only(current_win_id):
"""Close all windows except for the current one."""
for win_id, window in objreg.window_registry.items():
# We could be in the middle of destroying a window here
if sip.isdeleted(window):
continue
if win_id != current_win_id:
window.close()
@cmdutils.register()
def nop():
"""Do nothing."""
pass
@cmdutils.register()
@cmdutils.argument('win_id', win_id=True)
def version(win_id, paste=False):
"""Show version information.
Args:
paste: Paste to pastebin.
"""
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=win_id)
tabbed_browser.openurl(QUrl('qute://version'), newtab=True)
if paste:
pastebin_version()
| 1 | 21,881 | No blank line after the docstring. | qutebrowser-qutebrowser | py |
@@ -17,8 +17,11 @@ package blobvar
import (
"context"
"errors"
+ "github.com/google/go-cmp/cmp"
+ "io/ioutil"
"os"
"path"
+ "path/filepath"
"testing"
"gocloud.dev/blob" | 1 | // Copyright 2019 The Go Cloud Development Kit Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package blobvar
import (
"context"
"errors"
"os"
"path"
"testing"
"gocloud.dev/blob"
"gocloud.dev/blob/fileblob"
"gocloud.dev/runtimevar"
"gocloud.dev/runtimevar/driver"
"gocloud.dev/runtimevar/drivertest"
)
type harness struct {
dir string
bucket *blob.Bucket
}
func newHarness(t *testing.T) (drivertest.Harness, error) {
dir := path.Join(os.TempDir(), "go-cloud-blobvar")
if err := os.MkdirAll(dir, os.ModePerm); err != nil {
return nil, err
}
b, err := fileblob.OpenBucket(dir, nil)
if err != nil {
return nil, err
}
return &harness{dir: dir, bucket: b}, nil
}
func (h *harness) MakeWatcher(ctx context.Context, name string, decoder *runtimevar.Decoder) (driver.Watcher, error) {
return newWatcher(h.bucket, name, decoder, nil), nil
}
func (h *harness) CreateVariable(ctx context.Context, name string, val []byte) error {
return h.bucket.WriteAll(ctx, name, val, nil)
}
func (h *harness) UpdateVariable(ctx context.Context, name string, val []byte) error {
return h.bucket.WriteAll(ctx, name, val, nil)
}
func (h *harness) DeleteVariable(ctx context.Context, name string) error {
return h.bucket.Delete(ctx, name)
}
func (h *harness) Close() {
_ = os.RemoveAll(h.dir)
}
func (h *harness) Mutable() bool { return true }
func TestConformance(t *testing.T) {
drivertest.RunConformanceTests(t, newHarness, []drivertest.AsTest{verifyAs{}})
}
type verifyAs struct{}
func (verifyAs) Name() string {
return "verify As"
}
func (verifyAs) SnapshotCheck(s *runtimevar.Snapshot) error {
return nil
}
func (verifyAs) ErrorCheck(v *runtimevar.Variable, err error) error {
var perr *os.PathError
if !v.ErrorAs(err, &perr) {
return errors.New("runtimevar.ErrorAs failed with *os.PathError")
}
return nil
}
| 1 | 15,144 | nit: move this to the group below. | google-go-cloud | go |
@@ -3045,8 +3045,11 @@ short Sort::generateTdb(Generator * generator,
CostScalar bufferSize = getDefault(GEN_SORT_MAX_BUFFER_SIZE);
- ULng32 bufferSize_as_ulong =
- (ULng32)(MINOF(CostScalar(UINT_MAX), bufferSize)).getValue();
+ UInt32 bufferSize_as_ulong =
+ (UInt32)(MINOF(CostScalar(UINT_MAX), bufferSize)).getValue();
+
+ // allocate buffer to hold atlease one row
+ bufferSize_as_ulong = MAXOF(bufferSize_as_ulong, sortRecLen);
GenAssert(sortRecLen <= bufferSize_as_ulong,
"Record Len greater than GEN_SORT_MAX_BUFFER_SIZE"); | 1 | /**********************************************************************
// @@@ START COPYRIGHT @@@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
// @@@ END COPYRIGHT @@@
**********************************************************************/
/* -*-C++-*-
******************************************************************************
*
* File: GenRelMisc.C
* RCS: $Id: GenRelMisc.cpp,v 1.1 2007/10/09 19:38:54 Exp $
* Description: MapValueId/Root/Tuple operators
*
* Created: 5/17/94
* Modified: $ $Date: 2007/10/09 19:38:54 $ (GMT)
* Language: C++
* Status: $State: Exp $
*
*
*
******************************************************************************
*/
#define SQLPARSERGLOBALS_FLAGS
#include "ComOptIncludes.h"
#include "GroupAttr.h"
#include "ItemColRef.h"
#include "ReadTableDef.h"
#include "RelEnforcer.h"
#include "RelJoin.h"
#include "RelExeUtil.h"
#include "RelMisc.h"
#include "RelSet.h"
#include "RelUpdate.h"
#include "RelScan.h"
#include "RelDCL.h"
#include "PartFunc.h"
#include "Cost.h"
#include "GenExpGenerator.h"
#include "GenResources.h"
#include "ComTdbRoot.h"
#include "ComTdbTuple.h"
#include "ComTdbUnion.h"
#include "ComTdbTupleFlow.h"
#include "ComTdbTranspose.h"
#include "ComTdbSort.h"
#include "ComTdbPackRows.h"
#include "ComTdbDDL.h"
#include "ComTdbExeUtil.h"
#include "ComTdbFirstN.h"
#include "ComTdbStats.h"
#include "ComTdbCancel.h"
#include "ExplainTuple.h"
#include "ComTdbHbaseAccess.h"
#include "ComTdbExplain.h"
#include "SchemaDB.h"
#include "ControlDB.h"
#include "NATable.h"
#include "BindWA.h"
#include "ComTransInfo.h"
#include "DefaultConstants.h"
#include "FragDir.h"
#include "PartInputDataDesc.h"
#include "ExpSqlTupp.h"
#include "sql_buffer.h"
#include "ComQueue.h"
#include "ComSqlId.h"
#include "MVInfo.h"
#include "SequenceGeneratorAttributes.h"
#include "CompilationStats.h"
#include "RelRoutine.h"
#include "hs_cont.h"
#include "ComUnits.h"
#include "StmtDDLCleanupObjects.h"
#ifndef HFS2DM
#define HFS2DM
#endif // HFS2DM
#include "ComDefs.h" // to get common defines (ROUND8)
#include "CmpStatement.h"
#include "ComSmallDefs.h"
#include "sql_buffer_size.h"
#include "ExSqlComp.h" // for NAExecTrans
#include "ComLocationNames.h"
#include "ComDistribution.h"
#include "OptimizerSimulator.h"
#include "ComCextdecs.h"
#include "TrafDDLdesc.h"
#include "SqlParserGlobals.h" // Parser Flags
// this comes from GenExplain.cpp (sorry, should have a header file)
TrafDesc * createVirtExplainTableDesc();
void deleteVirtExplainTableDesc(TrafDesc *);
/////////////////////////////////////////////////////////////////////
//
//
//
//////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////
//
// MapValueIds::codeGen()
//
/////////////////////////////////////////////////////////
short MapValueIds::codeGen(Generator * generator)
{
// The MapValueIds node does not result in any executor nodes. It
// does not generate a TDB. It simply 'maps' its lower values to
// its upper values. It does this by doing the following for each
// upper and lower pairing.
//
// - creates a new expressions which converts the lower value to
// the type of the upper value.
//
// - Associates this new expression with the ValueId of the upper
// value, by replacing the ItemExpr of the upper valueId with
// this new expression.
//
// The end result is that in nodes above this MapValueIds node,
// references to the ValueIds of the upper list, will get the new
// expression (the Cast of the lower value).
//
// This code was initially placed in the MapValueIds::preCodeGen(),
// but this caused some problems with Query trees involving
// triggers. The issue was that with triggers it is possible for a
// ValueId to be sourced (produced) in two different subtrees of the
// query tree. One being a MapValueId. So when the expressions of
// the upper valueIds were replaced, it not only affected references
// to the ValueIds above the MapValueIds, it affected the references
// to the ValueIds in other subtrees.
// generate code for the child
child(0)->codeGen(generator);
const ValueIdList & topValues = map_.getTopValues();
for (CollIndex i = 0; i < topValues.entries(); i++)
{
ValueId valId = topValues[i];
// if this value id is mapped to a different one (mappedId ) by
// this node, then convert the "mappedId" to the type of this
// 'valid'
//
ValueId mappedId;
map_.mapValueIdDown(valId,mappedId);
if (mappedId != valId)
{
// Convert the source (lower) value to the type of the
// target (upper) value.
//
ItemExpr * convValue
= new(generator->wHeap()) Cast (mappedId.getItemExpr(),
&(valId.getType()));
// bind/type propagate the new node
convValue->bindNode(generator->getBindWA());
// Replace upper value with converted value.
//
valId.replaceItemExpr(convValue);
}
}
// No TDB was generated. Parent will retrieve childs TDB
//
return 0;
}
/////////////////////////////////////////////////////////
//
// Some helper classes and functions for PartitionAccess::codeGen
//
/////////////////////////////////////////////////////////
class NodeCountHelper {
friend class NodeHashHelper;
public:
UInt32 getCount(void) { return nodeDiskCount_;}
void incCount(void) { nodeDiskCount_++; }
inline NABoolean operator==(const NodeCountHelper& other) const
{
return (nodeDiskCount_ == other.nodeDiskCount_);
}
private:
UInt32 nodeDiskCount_;
};
class NodeNameHelper {
friend class NodeHashHelper;
friend ULng32 nodeNameHashFunc( const NodeNameHelper& n );
public:
const char * const getNodeName(void) { return nodeName_; }
ULng32 hash() const { return nodeNameHashFunc(*this); }
inline NABoolean operator==(const NodeNameHelper& other) const
{
if (stricmp(nodeName_, other.nodeName_) != 0)
return FALSE;
else
return TRUE;
}
private:
char nodeName_[ComGUARDIAN_SYSTEM_NAME_PART_CHAR_MAX_LEN + 1];
};
ULng32 nodeNameHashFunc( const NodeNameHelper& n )
{
ULng32 retval = 0;
const char * const c = n.nodeName_;
Int32 i = 0;
do {
retval += c[i++];
} while (c[i]);
return retval;
}
class NodeDiskNameHelper {
friend class NodeHashHelper;
friend ULng32 nodeDiskNameHashFunc( const NodeDiskNameHelper& n );
public:
inline NABoolean operator==(const NodeDiskNameHelper& other) const
{
if (stricmp(nodeDiskName_, other.nodeDiskName_) != 0)
return FALSE;
else
return TRUE;
}
ULng32 hash() const { return nodeDiskNameHashFunc(*this); }
private:
char nodeDiskName_[
ComGUARDIAN_SYSTEM_NAME_PART_CHAR_MAX_LEN + 1 // + 1 for the dot.
+ ComGUARDIAN_VOLUME_NAME_PART_CHAR_MAX_LEN + 1 // + 1 for the \0.
];
};
ULng32 nodeDiskNameHashFunc( const NodeDiskNameHelper& n )
{
ULng32 retval = 0;
const char * const c = n.nodeDiskName_;
Int32 i = 0;
do {
retval += c[i++];
} while (c[i]);
return retval;
}
class NodeHashHelper {
public:
NodeHashHelper ( const char * partn )
{
memmove(nodeName_.nodeName_, partn, sizeof nodeName_.nodeName_);
// Convert 1st dot into a null termininator.
char * dotPos = strchr(nodeName_.nodeName_, '.');
*dotPos = '\0';
memmove(nodeDiskName_.nodeDiskName_, partn,
sizeof nodeDiskName_.nodeDiskName_);
// Convert 2nd dot into a null termininator.
dotPos = strchr(nodeDiskName_.nodeDiskName_, '.');
dotPos = strchr(dotPos+1, '.');
*dotPos = '\0';
nodeDiskCount_.nodeDiskCount_ = 1;
}
NodeNameHelper * getNodeName(void) { return &nodeName_; }
NodeDiskNameHelper * getNodeDiskName(void) { return &nodeDiskName_; }
NodeCountHelper * getNodeDiskCount(void) { return &nodeDiskCount_ ; }
private:
NodeHashHelper();
NodeCountHelper nodeDiskCount_ ;
NodeNameHelper nodeName_;
NodeDiskNameHelper nodeDiskName_;
};
static void replaceBaseValue(ItemExpr *incomingExpr,
ItemExpr *resultExpr,
CollHeap *heap,
NABoolean setFound)
{
static THREAD_P NABoolean found = FALSE;
found = setFound;
Int32 nc = incomingExpr->getArity();
// ITM_ASSIGN operator will not be a leaf node.
if ((nc != 0) && (found == FALSE)) //have not found yet.
{
ItemExpr *child1 = incomingExpr->child(1);
ItemExpr *newExpr = NULL;
if (incomingExpr->getOperatorType() == ITM_ASSIGN &&
(child1 != NULL && child1->getOperatorType() == ITM_ASSIGN)
)
{
newExpr = new(heap) BiArith(ITM_PLUS,
resultExpr,
child1);
newExpr->synthTypeAndValueId(TRUE);
//set type to original type after if has been changed to BigNum above
newExpr->getValueId().changeType(new (heap) SQLLargeInt(1 /* signed */,
0 /* not null */));
incomingExpr->setChild(1,newExpr);
found = TRUE;
return;
}
for (Lng32 i = 0; i < (Lng32)nc; i++)
{
if (found == FALSE)
replaceBaseValue(incomingExpr->child(i), resultExpr, heap, found);
}
}
return;
}
short GenericUtilExpr::codeGen(Generator * generator)
{
GenAssert(0, "GenericUtilExpr::codeGen. Should not reach here.");
return 0;
}
/////////////////////////////////////////////////////////
//
// DDLExpr::codeGen()
//
/////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////
//
// ExeUtilExpr::codeGen()
//
/////////////////////////////////////////////////////////
const char * DDLExpr::getVirtualTableName()
{ return (producesOutput() ? "DDL_EXPR__" : NULL); }
TrafDesc *DDLExpr::createVirtualTableDesc()
{
TrafDesc * table_desc = NULL;
if (producesOutput())
{
table_desc =
Generator::createVirtualTableDesc(getVirtualTableName(),
ComTdbDDL::getVirtTableNumCols(),
ComTdbDDL::getVirtTableColumnInfo(),
ComTdbDDL::getVirtTableNumKeys(),
ComTdbDDL::getVirtTableKeyInfo());
}
return table_desc;
}
short DDLExpr::codeGen(Generator * generator)
{
Space * space = generator->getSpace();
generator->verifyUpdatableTransMode(NULL, generator->getTransMode(), NULL);
// remove trailing blanks and append a semicolon, if one is not present.
char * ddlStmt = NULL;
#pragma nowarn(1506) // warning elimination
Int32 i = strlen(getDDLStmtText());
#pragma warn(1506) // warning elimination
while ((i > 0) && (getDDLStmtText()[i-1] == ' '))
i--;
if (getDDLStmtText()[i-1] == ';')
i--;
ddlStmt = space->allocateAlignedSpace(i+2);
strncpy(ddlStmt, getDDLStmtText(), i);
// add a semicolon to the end of str (required by the parser)
ddlStmt[i++] = ';';
ddlStmt[i] = '\0';
ex_cri_desc * givenDesc
= generator->getCriDesc(Generator::DOWN);
ex_cri_desc * returnedDesc = givenDesc;
ex_cri_desc * workCriDesc = NULL;
const Int32 work_atp = 1;
const Int32 ddl_row_atp_index = 2;
if (producesOutput())
{
// allocate a map table for the retrieved columns
generator->appendAtEnd();
returnedDesc = new(space) ex_cri_desc(givenDesc->noTuples() + 1, space);
workCriDesc = new(space) ex_cri_desc(4, space);
short rc = processOutputRow(generator, work_atp, ddl_row_atp_index,
returnedDesc);
if (rc)
{
return -1;
}
}
NAString catSchName =
generator->currentCmpContext()->schemaDB_->getDefaultSchema().getSchemaNameAsAnsiString();
CMPASSERT(!catSchName.isNull()); // not empty
CMPASSERT(catSchName.first('.') != NA_NPOS); // quick test: 'cat.sch'
char * catSchNameStr = space->allocateAlignedSpace(catSchName.length() + 1);
strcpy(catSchNameStr, catSchName.data());
ComTdbDDL * ddl_tdb = NULL;
if (returnStatus_)
{
ComTdbDDLwithStatus *ddl_ws_tdb = new(space)
ComTdbDDLwithStatus(ddlStmt,
strlen(ddlStmt),
(Int16)getDDLStmtTextCharSet(),
catSchNameStr, strlen(catSchNameStr),
0, 0, // no input expr
0, 0, // no output expr
workCriDesc, (producesOutput() ? ddl_row_atp_index : 0),
givenDesc,
returnedDesc,
(queue_index)getDefault(GEN_DDL_SIZE_DOWN),
(queue_index)getDefault(GEN_DDL_SIZE_UP),
getDefault(GEN_DDL_NUM_BUFFERS),
getDefault(GEN_DDL_BUFFER_SIZE));
ddl_ws_tdb->setReturnStatus(TRUE);
if (isCleanup_)
{
ddl_ws_tdb->setMDcleanup(TRUE);
StmtDDLCleanupObjects * co =
getExprNode()->castToElemDDLNode()->castToStmtDDLCleanupObjects();
if (co->checkOnly())
ddl_ws_tdb->setCheckOnly(TRUE);
if (co->returnDetails())
ddl_ws_tdb->setReturnDetails(TRUE);
}
ddl_tdb = ddl_ws_tdb;
}
else
ddl_tdb = new(space)
ComTdbDDL(ddlStmt,
strlen(ddlStmt),
(Int16)getDDLStmtTextCharSet(),
catSchNameStr, strlen(catSchNameStr),
0, 0, // no input expr
0, 0, // no output expr
workCriDesc, (producesOutput() ? ddl_row_atp_index : 0),
givenDesc,
returnedDesc,
(queue_index)getDefault(GEN_DDL_SIZE_DOWN),
(queue_index)getDefault(GEN_DDL_SIZE_UP),
getDefault(GEN_DDL_NUM_BUFFERS),
getDefault(GEN_DDL_BUFFER_SIZE));
if (isHbase_)
{
ddl_tdb->setHbaseDDL(TRUE);
if (hbaseDDLNoUserXn_)
ddl_tdb->setHbaseDDLNoUserXn(TRUE);
}
generator->initTdbFields(ddl_tdb);
if(!generator->explainDisabled()) {
generator->setExplainTuple(
addExplainInfo(ddl_tdb, 0, 0, generator));
}
// no tupps are returned
generator->setCriDesc((ex_cri_desc *)(generator->getCriDesc(Generator::DOWN)),
Generator::UP);
generator->setGenObj(this, ddl_tdb);
// Set the transaction flag.
if (xnNeeded())
{
if (NOT isHbase_)
generator->setTransactionFlag(-1);
else if (getExprNode() &&
getExprNode()->castToStmtDDLNode()->ddlXns() &&
(NOT hbaseDDLNoUserXn_))
{
// treat like a transactional IUD operation which need to be
// aborted in case of an error.
generator->setFoundAnUpdate(TRUE);
generator->setUpdAbortOnError(TRUE);
generator->setTransactionFlag(-1);
}
else if (NOT hbaseDDLNoUserXn_)
generator->setTransactionFlag(-1);
}
return 0;
}
/////////////////////////////////////////////////////////
//
// ExeUtilMetadataUpgrade::codeGen()
//
/////////////////////////////////////////////////////////
short ExeUtilMetadataUpgrade::codeGen(Generator * generator)
{
ExpGenerator * expGen = generator->getExpGenerator();
Space * space = generator->getSpace();
// allocate a map table for the retrieved columns
generator->appendAtEnd();
ex_cri_desc * givenDesc
= generator->getCriDesc(Generator::DOWN);
ex_cri_desc * returnedDesc
= new(space) ex_cri_desc(givenDesc->noTuples() + 1, space);
ex_cri_desc * workCriDesc = new(space) ex_cri_desc(4, space);
const Int32 work_atp = 1;
const Int32 exe_util_row_atp_index = 2;
short rc = processOutputRow(generator, work_atp, exe_util_row_atp_index,
returnedDesc);
if (rc)
{
return -1;
}
ComTdbDDLwithStatus * upgd_tdb = new(space)
ComTdbDDLwithStatus(NULL, 0, 0,
NULL, 0,
0, 0, // no input expr
0, 0, // no output expr
NULL, 0,
givenDesc,
returnedDesc,
(queue_index)getDefault(GEN_DDL_SIZE_DOWN),
(queue_index)getDefault(GEN_DDL_SIZE_UP),
getDefault(GEN_DDL_NUM_BUFFERS),
getDefault(GEN_DDL_BUFFER_SIZE));
if (getMDVersion())
upgd_tdb->setGetMDVersion(TRUE);
else if (getSWVersion())
upgd_tdb->setGetSWVersion(TRUE);
else
upgd_tdb->setMDupgrade(TRUE);
generator->initTdbFields(upgd_tdb);
if(!generator->explainDisabled()) {
generator->setExplainTuple(
addExplainInfo(upgd_tdb, 0, 0, generator));
}
generator->setCriDesc(givenDesc, Generator::DOWN);
generator->setCriDesc(returnedDesc, Generator::UP);
generator->setGenObj(this, upgd_tdb);
// Reset the transaction flag.
generator->setTransactionFlag(0);
return 0;
}
/////////////////////////////////////////////////////////
//
// FirstN::codeGen()
//
/////////////////////////////////////////////////////////
short FirstN::codeGen(Generator * generator)
{
ExpGenerator* expGen = generator->getExpGenerator();
Space * space = generator->getSpace();
ex_cri_desc * given_desc = generator->getCriDesc(Generator::DOWN);
// generate code for my child
child(0)->codeGen(generator);
ComTdb * child_tdb = (ComTdb *)(generator->getGenObj());
ExplainTuple *childExplainTuple = generator->getExplainTuple();
ex_cri_desc * returned_desc = generator->getCriDesc(Generator::UP);
ex_cri_desc * work_cri_desc = NULL;
ex_expr * firstNRowsExpr = NULL;
if (firstNRowsParam_)
{
Int32 work_atp = 1; // temps
Int32 work_atp_index = 2; // where the result row will be
work_cri_desc = new(space) ex_cri_desc(3, space);
// input param is typed as nullable. Make it non-nullable and unsigned.
NAType * newNAT =
firstNRowsParam_->getValueId().getType().newCopy(generator->wHeap());
newNAT->setNullable(FALSE, FALSE);
Cast * fnp = new(generator->wHeap()) Cast(firstNRowsParam_, newNAT);
fnp->bindNode(generator->getBindWA());
ValueIdList vidL;
vidL.insert(fnp->getValueId());
UInt32 firstNValLen = 0;
expGen->generateContiguousMoveExpr(vidL,
0, // no convert nodes,
work_atp, work_atp_index,
ExpTupleDesc::SQLARK_EXPLODED_FORMAT,
firstNValLen, &firstNRowsExpr,
NULL, ExpTupleDesc::SHORT_FORMAT);
}
ComTdbFirstN * firstN_tdb
= new(space) ComTdbFirstN(
child_tdb,
getFirstNRows(),
firstNRowsExpr,
work_cri_desc,
given_desc,
returned_desc,
child_tdb->getMaxQueueSizeDown(),
child_tdb->getMaxQueueSizeUp(),
1, 4096);
generator->initTdbFields(firstN_tdb);
if(!generator->explainDisabled()) {
generator->setExplainTuple(
addExplainInfo(firstN_tdb, childExplainTuple, 0, generator));
}
generator->setGenObj(this, firstN_tdb);
return 0;
}
/////////////////////////////////////////////////////////
//
// RelRoot::genSimilarityInfo()
//
/////////////////////////////////////////////////////////
TrafQuerySimilarityInfo * RelRoot::genSimilarityInfo(Generator *generator)
{
ExpGenerator * exp_gen = generator->getExpGenerator();
NABoolean recompOnTSMismatch = FALSE;
NABoolean errorOnTSMismatch = FALSE;
// generate the similarity info.
Space * space = generator->getSpace();
NABoolean disableAutoRecomp = (CmpCommon::getDefault(AUTOMATIC_RECOMPILATION) == DF_OFF);
Queue * siList = new(space) Queue(space);
TrafQuerySimilarityInfo * qsi = NULL;
if (generator->getTrafSimTableInfoList().entries() > 0)
qsi = new(space) TrafQuerySimilarityInfo(siList);
CollIndex i = 0;
for (CollIndex i = 0; i < generator->getTrafSimTableInfoList().entries(); i++)
{
TrafSimilarityTableInfo * genTsi =
(TrafSimilarityTableInfo *)(generator->getTrafSimTableInfoList()[i]);
char * genTablename =
space->allocateAndCopyToAlignedSpace(genTsi->tableName(), str_len(genTsi->tableName()), 0);
char * genRootDir =
space->allocateAndCopyToAlignedSpace(genTsi->hdfsRootDir(), str_len(genTsi->hdfsRootDir()), 0);
char * genHdfsHostName =
space->allocateAndCopyToAlignedSpace(genTsi->hdfsHostName(), str_len(genTsi->hdfsHostName()), 0);
TrafSimilarityTableInfo * si =
new(space) TrafSimilarityTableInfo(genTablename,
genTsi->isHive(),
genRootDir,
genTsi->modTS(),
genTsi->numPartnLevels(),
NULL,
genHdfsHostName,
genTsi->hdfsPort());
qsi->siList()->insert(si);
}
if (qsi)
{
qsi->setDisableAutoRecomp(disableAutoRecomp);
}
return qsi;
}
short RelRoot::codeGen(Generator * generator)
{
ExpGenerator * exp_gen = generator->getExpGenerator();
Space * space;
FragmentDir *compFragDir = generator->getFragmentDir();
NABoolean childTdbIsNull = FALSE;
// -- MVs
// Mark in the generator that we are doing an INTERNAL REFRESH command now.
if (isRootOfInternalRefresh())
generator->setInternalRefreshStatement();
MapTable * map_table = generator->appendAtEnd();
// create the fragment (independent code space) for the master executor
CollIndex myFragmentId = compFragDir->pushFragment(FragmentDir::MASTER);
// When the master executor gets the generated space, it assumes that
// the root node is stored at the very beginning of the space, so
// make sure that the master fragment is fragment 0 (the code
// below makes sure that the root node is at the beginning of its
// fragment).
GenAssert(myFragmentId == 0,"NOT myFragmentId == 0");
// now we can get a hold of the space object for this fragment
space = generator->getSpace();
// usually space for a node is generated after generating code for
// its children. In case of root, generate the root tdb first.
// This is done so that the start of the generated code space
// could point to the root tdb. The tdb, however, is not initialized
// yet.
ComTdbRoot *root_tdb = new(space) ComTdbRoot();
generator->initTdbFields(root_tdb);
// tell the root tdb whether we collect statistics or not
if (generator->computeStats())
{
root_tdb->setCollectStats(generator->computeStats());
root_tdb->setCollectStatsType(generator->collectStatsType());
root_tdb->setCollectRtsStats(generator->collectRtsStats());
}
//For consistent display of overflow_mode in stats.
root_tdb->setOverflowMode(generator->getOverflowMode());
// set the object for the top level fragment
compFragDir->setTopObj((char *) root_tdb);
// Copy the current context-wide TransMode,
// then overlay with this stmt's "FOR xxx ACCESS" setting, if any.
TransMode * transMode = new(space) TransMode();
transMode->updateTransMode(generator->getTransMode());
//
if (accessOptions().accessType() != ACCESS_TYPE_NOT_SPECIFIED_)
{
// "FOR xxx ACCESS" becomes an IsolationLevel, and both IL and AccessMode
// are set in the transMode
transMode->updateAccessModeFromIsolationLevel(
TransMode::ATtoIL(accessOptions().accessType()));
transMode->setStmtLevelAccessOptions();
}
else if ( ( generator->currentCmpContext()->internalCompile() == CmpContext::INTERNAL_MODULENAME) ||
( CmpCommon::statement()->isSMDRecompile() )
)
{
// As a nicety to everyone writing a trusted .mdf (RFORK, etc),
// we set this flag so that cli/Statement::execute() will not
// recompile those trusted stmts due to any TransMode mismatch.
// (Otherwise, everyone would need to add "FOR xxx ACCESS" to each stmt!)
//
transMode->setStmtLevelAccessOptions();
}
ex_expr * input_expr = 0;
ex_expr * output_expr = 0;
CollIndex i;
ex_expr * pkey_expr = NULL;
ULng32 pkey_len = 0;
ex_expr* pred_expr = NULL;
ULng32 cacheVarsSize = 0;
// unsigned long tablenameCacheVarsSize = 0;
// max number of rows in user rowwise rowset.
Lng32 rwrsMaxSize = 0;
// index into the user params to find the value of the number of
// actual rows in the rowwise rowset buffer.
short rwrsInputSizeIndex = 0;
// index into the user params to find the value of the max length
// of each row in the user rowwise rowset buffer.
short rwrsMaxInputRowlenIndex = 0;
// index into the user params to find the value of the address
// of rowwise rowset buffer in user space.
short rwrsBufferAddrIndex = 0;
// index into user params to find the value of the partition number
// where this rwrs need to be shipped to.
short rwrsPartnNumIndex = -1; // not specified
// length of the each internal tuple where user's row will be moved in
// at runtime.
Lng32 rwrsMaxInternalRowlen = 0;
RWRSInfo *rwrsInfo = NULL;
char *rwrsInfoBuf = NULL;
if (getHostArraysArea() && getHostArraysArea()->getRowwiseRowset())
{
rwrsInfo = (RWRSInfo *) new (space) char[sizeof(RWRSInfo)];
rwrsInfoBuf = (char*)rwrsInfo;
rwrsMaxSize =
(Lng32)((ConstValue*)getHostArraysArea()->rwrsMaxSize())->
getExactNumericValue();
NABoolean packedFormat = FALSE;
NABoolean compressed = FALSE;
NABoolean dcompressInMaster = FALSE;
NABoolean compressInMaster = FALSE;
NABoolean partnNumInBuffer = FALSE;
getHostArraysArea()->getBufferAttributes(packedFormat,
compressed, dcompressInMaster,
compressInMaster,
partnNumInBuffer);
rwrsInfo->setRWRSisCompressed(compressed);
rwrsInfo->setDcompressInMaster(dcompressInMaster);
rwrsInfo->setPartnNumInBuffer(partnNumInBuffer);
}
// inputVars() can contain multiple references to the same
// param/hostvar value, if it is specified more than once in
// a statement. (Seems like a bug to me, but no one is around
// to fix it, so i will just work around it). The CharacteristicInputs
// do not contain duplicate references. Use them to create a non-duplicate
// newInputVars list.
ValueIdList newInputVars, cacheVars, rwrsVars;
NABoolean userInputVars = FALSE;
short entry = 1;
for (i = 0; i < inputVars().entries(); i++)
{
// CharacteristicInputs contains constants. Don't add
// them as input. Add hostvar/params to map table, if not
// already added. This will remove duplicates.
// Add the non-duplicate input val-ids to the newInputVars list.
ValueId val_id = inputVars()[i];
ItemExpr * item_expr = val_id.getItemExpr();
// We create a dummy host var in case it gets a value id from an
// assignment in compound statements. Such variable will not need
// to be processed by Cli since it gets its value inside the statement
NABoolean blankHV = FALSE ;
if (item_expr->previousHostVar()) {
Int32 j = 0;
#pragma warning (disable : 4018) //warning elimination
for (j = 0; j < i; j++) {
#pragma warning (default : 4018) //warning elimination
ItemExpr *ie = inputVars()[j].getItemExpr();
if (ie->getOperatorType() == ITM_HOSTVAR) {
if (item_expr->previousName() == ((HostVar *) ie)->getName()) {
break;
}
}
}
#pragma warning (disable : 4018) //warning elimination
if (i == j) {
#pragma warning (default : 4018) //warning elimination
NAString str1 = "previousHV__";
char str2[30];
str_itoa(i, str2);
str1 += str2;
item_expr = new(generator->wHeap()) HostVar(str1,
new(generator->wHeap()) SQLUnknown);
item_expr->bindNode(generator->getBindWA());
blankHV = TRUE;
val_id = item_expr->getValueId();
}
}
OperatorTypeEnum op = item_expr->getOperatorType();
if ((op == ITM_HOSTVAR) ||
(op == ITM_DYN_PARAM)){
userInputVars = TRUE;
// Vicz: filter out the OUT HostVar/DynamicParam
ComColumnDirection paramMode = item_expr->getParamMode();
if(paramMode == COM_OUTPUT_COLUMN)
continue;
}
// the list of operator types that was present here in R1.8 code in
// almost equivalent to isAUserSuppliedInput()
// except for Constant which cannot be handled here as its atpindex
// should be 0 and not 2.
// The num_tupps local variable just outside this IF will create
// attributes with atpindex 2.
// Constants are added later to the MapTable.
if (((item_expr->isAUserSuppliedInput()) && // for evaluate once functions
(op != ITM_CONSTANT)) &&
(! generator->getMapInfoAsIs(val_id))) // not added yet
{
MapInfo *map = generator->addMapInfoToThis(generator->getLastMapTable(),
val_id, NULL);
// Transfer the information on rowsets that is in this host variable
// into its attribute so we know this information at run time
if (op == ITM_HOSTVAR || op == ITM_DYN_PARAM)
{
Attributes *attr = map->getAttr();
UInt32 rowsetInfo;
if (op == ITM_HOSTVAR)
{
HostVar *hv = (HostVar *) (val_id.getItemExpr());
rowsetInfo = hv->getRowsetInfo();
if (blankHV)
{
attr->setBlankHV();
}
}
else // (op == ITM_DYN_PARAM)
{
DynamicParam *dp = (DynamicParam *) (val_id.getItemExpr());
rowsetInfo = dp->getRowsetInfo();
if (dp->isDPRowsetForInputSize())
rwrsInputSizeIndex = entry;
else if (dp->isRowwiseRowsetInputMaxRowlen())
rwrsMaxInputRowlenIndex = entry;
else if (dp->isRowwiseRowsetInputBuffer())
rwrsBufferAddrIndex = entry;
else if (dp->isRowwiseRowsetPartnNum())
rwrsPartnNumIndex = entry;
}
attr->setRowsetInfo((Int16)rowsetInfo);
}
if ((op == ITM_DYN_PARAM) &&
((DynamicParam *)item_expr)->isRowInRowwiseRowset())
{
rwrsVars.insert(val_id);
}
else if (op == ITM_CACHE_PARAM)
{
// This is a parameter generated by Query Caching
cacheVars.insert(val_id);
}
else
{
newInputVars.insert(val_id);
entry++;
}
}
} // for
Int32 num_tupps = 2; /* atp_index 0 for constants, 1 for temps */
// create a row(tuple) with input param/hostvar values and pass
// it to the child.
// assign offset to elements in the input vars list.
// Offsets are assigned in the input row tuple (atp index = 2).
Attributes ** attrs = new(generator->wHeap())
Attributes * [newInputVars.entries()];
for (i = 0; i < newInputVars.entries(); i++)
{
attrs[i] = generator->addMapInfo(newInputVars[i], NULL)->getAttr();
}
ULng32 input_vars_size = 0;
exp_gen->processAttributes(newInputVars.entries(), attrs,
ExpTupleDesc::SQLARK_EXPLODED_FORMAT,
input_vars_size, 0 /*atp*/, num_tupps/*atpIdx*/);
//++Triggers,
// Save the offsets of triggerStatus and UniqueExecuteId,
// so ex_root_tcb can set it's value
Lng32 triggersStatusOffset = -1;
Lng32 uniqueExecuteIdOffset = -1;
for (i = 0; i < newInputVars.entries(); i++)
{
ItemExpr * item_expr = (newInputVars)[i].getItemExpr();
if (item_expr->getOperatorType() == ITM_UNIQUE_EXECUTE_ID)
#pragma nowarn(1506) // warning elimination
uniqueExecuteIdOffset = attrs[i]->getOffset();
#pragma warn(1506) // warning elimination
if (item_expr->getOperatorType() == ITM_GET_TRIGGERS_STATUS)
{
GenAssert(getTriggersList()->entries()>0,
"No triggers, yet TriggerStatusOffset != -1");
#pragma nowarn(1506) // warning elimination
triggersStatusOffset = attrs[i]->getOffset();
#pragma warn(1506) // warning elimination
}
}
//--Triggers,
num_tupps += ((newInputVars.entries() > 0) ? 1 : 0); // plus 1 to hold the input
// params and hostvars.
if (updateCurrentOf())
{
GenAssert(pkeyList().entries() > 0, "pkeyList().entries() must be > 0");
// create a row(tuple) with pkey hostvar values and pass
// it to the child.
// assign offset to elements in the pkeyList.
// Offset is assigned at atp index which one greater than where
// the input num_tupps is.
Attributes ** attrs = new(generator->wHeap())
Attributes * [pkeyList().entries()];
for (i = 0; i < pkeyList().entries(); i++)
{
attrs[i] = generator->addMapInfo(pkeyList()[i], NULL)->getAttr();
}
exp_gen->processAttributes(pkeyList().entries(), attrs,
ExpTupleDesc::SQLARK_EXPLODED_FORMAT,
pkey_len,0/*atp*/,num_tupps/*atpIdx*/);
num_tupps += 1;
}
// Process expressions generated by Query Caching.
Attributes ** cachedAttrs = NULL;
if (cacheVars.entries() > 0) {
cachedAttrs = new(generator->wHeap())
Attributes * [cacheVars.entries()];
for (i = 0; i < cacheVars.entries(); i++)
{
cachedAttrs[i] = generator->addMapInfo(cacheVars[i], NULL)->getAttr();
}
exp_gen->processAttributes(cacheVars.entries(), cachedAttrs,
ExpTupleDesc::SQLARK_EXPLODED_FORMAT,
cacheVarsSize,0/*atp*/,num_tupps/*atpIdx*/);
num_tupps += 1;
}
// Space needed for query caching. This is where the values of ConstantParameters
// will go
// char *queryCacheParameterBuffer = (char *) new (space) char[cacheVarsSize];
NABoolean qCacheInfoIsClass = FALSE;
if (CmpCommon::getDefault(QUERY_CACHE_RUNTIME) == DF_ON)
qCacheInfoIsClass = TRUE;
char *parameterBuffer = (char *) new (space) char[cacheVarsSize];
char *qCacheInfoBuf = NULL;
if (qCacheInfoIsClass)
{
QCacheInfo *qCacheInfo =
(QCacheInfo *) new (space) char[sizeof(QCacheInfo)];
qCacheInfo->setParameterBuffer(parameterBuffer);
qCacheInfoBuf = (char*)qCacheInfo;
}
else
qCacheInfoBuf = parameterBuffer;
// Check for reasons why the query plan should not be cached.
// Note: This does not influence the use of cache parameters,
// it's too late at this time to undo that.
const LIST(CSEInfo *) *cseInfoList = CmpCommon::statement()->getCSEInfoList();
if (cseInfoList &&
CmpCommon::getDefault(CSE_CACHE_TEMP_QUERIES) == DF_OFF)
for (CollIndex i=0; i<cseInfoList->entries(); i++)
if (cseInfoList->at(i)->usesATempTable())
generator->setNonCacheableCSEPlan(TRUE);
// compute offsets for rwrs attrs. Offsets are computed separately
// for rwrs vars since these values will be moved as part of input
// row at runtime. This input row should only contain values which are
// being inserted (ex, in the VALUES clause) and not any other input
// values (like, input size, buffer, etc).
// If rwrs vars was included in newInputVars before computing
// the offsets, then these offsets will also include the non-rwrs
// vars which will not be correct.
//
// Do not assign any atp index at this time.
// atp index will be determined at runtime
// when the actual rows that are extracted from the rowset,
// processed and moved up the queue.
Attributes ** rwrsAttrs = NULL;
// next var is used if buffer need to be decompressed using the unicode
// decoding alogorithm.
NABoolean useUnicodeDcompress = FALSE;
if (rwrsVars.entries() > 0)
{
rwrsAttrs = new(generator->wHeap())
Attributes * [rwrsVars.entries()];
for (i = 0; i < rwrsVars.entries(); i++)
{
rwrsAttrs[i] = generator->addMapInfo(rwrsVars[i], NULL)->getAttr();
if (rwrsAttrs[i]->getCharSet() != CharInfo::ISO88591)
useUnicodeDcompress = TRUE;
}
// assign offsets.
// No real atp index is to be assigned.
// Cannot make it -1 as processAttrs doesn't like that.
// Make atp index to be SHRT_MAX (out of reach).
ULng32 len;
exp_gen->processAttributes(rwrsVars.entries(), rwrsAttrs,
ExpTupleDesc::SQLARK_EXPLODED_FORMAT,
len,
0/*atp*/, SHRT_MAX/*atpIdx*/);
rwrsMaxInternalRowlen = len;
}
if (rwrsInfo)
{
rwrsInfo->setRwrsInfo(rwrsMaxSize, rwrsInputSizeIndex,
rwrsMaxInputRowlenIndex, rwrsBufferAddrIndex,
rwrsPartnNumIndex,
rwrsMaxInternalRowlen);
if (rwrsInfo->rwrsIsCompressed())
rwrsInfo->setUseUnicodeDcompress(useUnicodeDcompress);
}
// generate the input expression to move in hostvar/param values
// from user area.
// Include rwrs in newInputVars before generating the input expr
// since we want all of these vars to be returned to the used when
// they are 'described' at runtime.
if ((newInputVars.entries() > 0) ||
(rwrsVars.entries() > 0))
{
newInputVars.insert(rwrsVars);
exp_gen->generateInputExpr(newInputVars, ex_expr::exp_INPUT_OUTPUT,
&input_expr);
}
#pragma nowarn(1506) // warning elimination
ex_cri_desc * cri_desc = new(space) ex_cri_desc(num_tupps, space);
#pragma warn(1506) // warning elimination
generator->setCriDesc(cri_desc, Generator::DOWN);
generator->setInputExpr((void *)input_expr);
ExplainDesc *explainDesc = NULL;
if(!generator->explainDisabled())
{
// Create a space object for the explain fragment
if(generator->getExplainFragDirIndex() == NULL_COLL_INDEX)
{
// Create an Explain Fragment
generator->setExplainFragDirIndex(
generator->getFragmentDir()->pushFragment(FragmentDir::EXPLAIN,0));
generator->getFragmentDir()->popFragment();
}
ExplainFunc explainFunc;
TrafDesc *explainDescStruct = explainFunc.createVirtualTableDesc();
Space *explainSpace = generator->getFragmentDir()->
getSpace(generator->getExplainFragDirIndex());
TrafTableDesc *tableDesc = explainDescStruct->tableDesc();
// Determine the length of the Explain Tuple.
Lng32 recLength = tableDesc->record_length;
// Determine the number of columns in the Explain Tuple.
Lng32 numCols = tableDesc->colcount;
explainDesc =
new(explainSpace) ExplainDesc(numCols, recLength, explainSpace);
TrafDesc *cols = tableDesc->columns_desc;
// For each column of the Virtual Explain Table, extract the
// relevant info. from the table desc and put it into the ExplainDesc
for(Int32 c = 0; c < numCols; c++ /* no pun intended */)
{
TrafColumnsDesc *colsDesc = (cols->columnsDesc());
explainDesc->setColDescr(c,
colsDesc->datatype,
colsDesc->length,
colsDesc->offset,
colsDesc->isNullable());
cols = cols->next;
}
explainFunc.deleteVirtualTableDesc(explainDescStruct);
compFragDir->setTopObj(generator->getExplainFragDirIndex(),
(char *)explainDesc);
}
// Take note of whether this is a parallel extract query before
// generating the child tree.
NABoolean isExtractProducer = (numExtractStreams_ > 0 ? TRUE : FALSE);
NABoolean isExtractConsumer =
(childOperType() == REL_EXTRACT_SOURCE ? TRUE : FALSE);
// the tree below needs to know if this is a LRU operation, hence
// make this check before the children are codeGened.
if (containsLRU())
{
generator->setLRUOperation(TRUE);
}
if (getTolerateNonFatalError() == RelExpr::NOT_ATOMIC_)
{
generator->setTolerateNonFatalError(TRUE);
}
// Copy #BMOs value from Root node into the fragment
compFragDir->setNumBMOs(myFragmentId, getNumBMOs());
compFragDir->setBMOsMemoryUsage(myFragmentId, getBMOsMemoryUsage().value());
// generate child tree
child(0)->codeGen(generator);
ComTdb * child_tdb = (ComTdb *)(generator->getGenObj());
if (child_tdb == (ComTdb *)NULL)
childTdbIsNull = TRUE;
// Remap the allocation of ESPs to Nodes/CPUs.
if (ActiveSchemaDB()->getDefaults().getAsLong(AFFINITY_VALUE) == -2)
generator->remapESPAllocationRandomly();
else
generator->remapESPAllocationAS();
generator->compilerStatsInfo().affinityNumber()
= generator->getAffinityValueUsed();
// if an output expression is present, generate it.
if (compExpr_.entries() > 0)
{
// Special cases to consider are
// * stored procedure result sets
// * parallel extract consumers
//
// In these plans we want special table and column names in the
// output expression. The names will come from the root's child
// node and be pointed to by these two variables. For all other
// ("normal") statements, these two pointers will remain NULL.
//
ConstNAStringPtr *colNamesForExpr = NULL;
ConstQualifiedNamePtr *tblNamesForExpr = NULL;
OperatorTypeEnum childType =
child(0)->castToRelExpr()->getOperatorType();
ComUInt32 numColumns = getRETDesc()->getDegree();
if ((childType == REL_SP_PROXY || isExtractConsumer) &&
numColumns > 0)
{
ProxyFunc *proxy;
if (childType == REL_SP_PROXY)
{
// This is a stored procedure result set
proxy = (ProxyFunc *) child(0)->castToRelExpr();
}
else
{
// This is an extract consumer. The extract operator is not
// the direct child of the root. An exchange operator sits
// between the two.
GenAssert(childType == REL_EXCHANGE,
"Child of root should be exchange for consumer query");
GenAssert(child(0)->child(0),
"Child of root should not be a leaf for consumer query");
OperatorTypeEnum grandChildType =
child(0)->child(0)->castToRelExpr()->getOperatorType();
GenAssert(grandChildType == REL_EXTRACT_SOURCE,
"Grandchild of root has unexpected type for consumer query");
proxy = (ProxyFunc *) child(0)->child(0)->castToRelExpr();
}
// Populate the table and column name collections that will be
// used below to generate the output expression.
colNamesForExpr = new (generator->wHeap())
ConstNAStringPtr[numColumns];
tblNamesForExpr = new (generator->wHeap())
ConstQualifiedNamePtr[numColumns];
for (ComUInt32 i = 0; i < numColumns; i++)
{
colNamesForExpr[i] = proxy->getColumnNameForDescriptor(i);
tblNamesForExpr[i] = proxy->getTableNameForDescriptor(i);
}
}
exp_gen->generateOutputExpr(compExpr_,
ex_expr::exp_INPUT_OUTPUT,
&output_expr,
getRETDesc(),
getSpOutParams(),
colNamesForExpr,
tblNamesForExpr);
}
if (getPredExprTree())
{
// ItemExpr * newPredTree = executorPred().rebuildExprTree(ITM_AND,TRUE,TRUE);
exp_gen->generateExpr(getPredExprTree()->getValueId(), ex_expr::exp_SCAN_PRED,
&pred_expr);
}
// if child's primary key columns are to be returned to be passed
// on to UPDATE WHERE CURRENT OF query, generate an
// expression to compute the pkey row.
ex_cri_desc * work_cri_desc = NULL;
if (updatableSelect() == TRUE)
{
GenAssert(pkeyList().entries() > 0, "pkeyList().entries() must be > 0");
work_cri_desc = new(space) ex_cri_desc(3, space);
exp_gen->generateContiguousMoveExpr(pkeyList(),
1, // add convert nodes,
1, 2,
ExpTupleDesc::SQLARK_EXPLODED_FORMAT,
pkey_len, &pkey_expr,
NULL, ExpTupleDesc::SHORT_FORMAT);
}
// if this is an 'update where current of' query, pass in
// the fetched cursor name or hvar number root tdb.
char * fetchedCursorName = NULL;
short fetchedCursorHvar = -1;
if (updateCurrentOf())
{
if (currOfCursorName_->getOperatorType() == ITM_CONSTANT)
{
// cursor name is specified as a literal
NABoolean tv;
ConstValue * cv = currOfCursorName_->castToConstValue(tv);
fetchedCursorName =
space->allocateAndCopyToAlignedSpace((char *)(cv->getConstValue()),
cv->getStorageSize(),
0);
}
else
{
// cursor name was specified as a hvar
HostVar * cursorHvar = (HostVar *)currOfCursorName_;
// search for this hostvar in the input hvar list
for (i = 0; i < inputVars().entries(); i++)
{
ValueId val_id = inputVars()[i];
if (cursorHvar->getOperatorType() == ITM_HOSTVAR)
{
HostVar * hv = (HostVar *)(val_id.getItemExpr());
if (hv->getName() == cursorHvar->getName())
#pragma nowarn(1506) // warning elimination
fetchedCursorHvar = (short)i+1; // 1-based
#pragma warn(1506) // warning elimination
}
} // more input
} // cursor name is in a hvar
}
// Create a list of update columns for UPDATE CURRENT OF or updateable
// SELECT statements (cursors). For UPDATE CURRENT OF, the update columns
// are obtained from the GenericUpate Node. For cursors, the update columns
// are obtained from this (RelRoot) node.
Lng32 numUpdateCol = 0;
Lng32 *updateColList = NULL;
if (updateCurrentOf()) // UPDATE/DELETE ... where CURRENT OF query.
{
GenAssert(updateCol().entries() == 0,
"UPDATE CURRENT OF: updateCol non-zero");
// Get the update node.
GenericUpdate *update = (GenericUpdate*)generator->updateCurrentOfRel();
GenAssert(update != NULL, "UPDATE CURRENT OF: NULL update node");
/*
// create the update col list for UPDATE...WHERE CURRENT OF CURSOR only.
// The updateCurrentOf() is set for both UPDATE and DELETE queries.
if ((update->getOperatorType() == REL_DP2_UPDATE_CURSOR) ||
(update->getOperatorType() == REL_DP2_UPDATE_UNIQUE))
{
// Get the list of assignment expressions from the STOI of the
// update node.
SqlTableOpenInfo *updateStoi = update->getOptStoi()->getStoi();
// Allocate an array for the column list.
numUpdateCol = updateStoi->getColumnListCount();
GenAssert(numUpdateCol > 0,
"UPDATE CURRENT OF: No update columns");
updateColList = new(space) Lng32[numUpdateCol];
// Populate the array with the update columns from the left side
// of each expression which is a column.
for (i = 0; (Lng32)i < numUpdateCol; i++)
{
updateColList[i] = updateStoi->getUpdateColumn(i);
}
} // update...where current of.
*/
}
else if (updatableSelect())
{
numUpdateCol = updateCol().entries();
if (numUpdateCol > 0)
{
// Allocate an array for the column list.
updateColList = new(space) Lng32[numUpdateCol];
// Populate the array with the update columns of this node.
for (i = 0; (Lng32)i < numUpdateCol; i++)
{
ValueId val_id = updateCol()[i];
GenAssert(val_id.getItemExpr()->getOperatorType() == ITM_BASECOLUMN,
"UpdateCol should be BaseColumn");
BaseColumn *col = (BaseColumn*)val_id.getItemExpr();
updateColList[i] = col->getColNumber();
}
}
else
{
// All columns are updateable.
numUpdateCol = -1;
}
}
// copy all the tables open information into the generator space
// and pass it to the root tdb, which is used to check the
// security when opening the sql tables or views.
short noOfTables = (short) generator->getSqlTableOpenInfoList().entries();
SqlTableOpenInfo **stoiList;
stoiList = new (space) SqlTableOpenInfo*[noOfTables];
// The Executor Statement class has logic to retry blown away
// opens on some statements. We can safely do so if we know that
// disk has not been dirtied, and no rows have been returned
// to the application.
// The Executor can deduce this if the blown away open occurs
// on the first input row, and if the plan writes to at most
// one object. This follows because an open can be blown away
// only if no locks are held on any partition of the object.
// The two variables below are used to compute if the plan
// accesses more than one object. If not, and if the query is
// an IUD, we will flag the root tdb as retryable.
NABoolean moreThanOneTable = FALSE;
char *firstTable = NULL;
// ++ Triggers
// While copying the stoi info, make sure the subjectTable flag is
// not set more than once for the same table ansi name
LIST(NAString) *subjectTables = NULL;
if (getTriggersList())
subjectTables = new (generator->wHeap())
LIST(NAString) (generator->wHeap());
short j=0;
for (; j < noOfTables; j++)
{
stoiList[j] = new (space) SqlTableOpenInfo;
SqlTableOpenInfo * genStoi =
(SqlTableOpenInfo *)generator->getSqlTableOpenInfoList()[j];
*(stoiList[j]) = *genStoi;
if (moreThanOneTable)
{
// No need to check any further.
}
else
{
if (firstTable == NULL)
{
firstTable = genStoi->fileName();
}
else if (stricmp( firstTable, genStoi->fileName()))
{
// there is more than one distinct object name in the Stoi list
moreThanOneTable = TRUE;
}
}
stoiList[j]->setFileName(new (space) char[strlen(genStoi->fileName()) + 1]);
strcpy(stoiList[j]->fileName(), genStoi->fileName());
stoiList[j]->setAnsiName(new (space) char[strlen(genStoi->ansiName()) + 1]);
strcpy(stoiList[j]->nonConstAnsiName(), genStoi->ansiName());
// -- Triggers
// Prevent duplicate entries for the same subject table
// and entries for views
NAString const ansiName(genStoi->ansiName(), generator->wHeap());
if (genStoi->subjectTable() && subjectTables &&
!(subjectTables->contains(ansiName)) && !genStoi->isView())
{
stoiList[j]->setSubjectTable(TRUE);
subjectTables->insert(ansiName);
}
else
stoiList[j]->setSubjectTable(FALSE);
if (genStoi->getColumnListCount())
{
stoiList[j]->setColumnList(new (space)
short[genStoi->getColumnListCount()]);
for (short k = 0; k < genStoi->getColumnListCount(); k++)
{
stoiList[j]->setUpdateColumn(k,genStoi->getUpdateColumn(k));
}
}
}
// copy the triggers list into the generator space
// and pass it to the root tdb, where it is used to check the
// enable/disable status of triggers.
short triggersCount = 0;
ComTimestamp *triggersList = NULL;
if (getTriggersList())
{
GenAssert(subjectTables && (subjectTables->entries() > 0),
"Mismatch: Triggers without Subject Tables");
delete subjectTables;
subjectTables = NULL;
triggersCount = (short) getTriggersList()->entries();
triggersList = new (space) ComTimestamp[triggersCount];
for (short k=0; k < triggersCount; k++)
triggersList[k] = getTriggersList()->at(k);
}
// copy uninitializedMvList into generator space
// to pass to root tdb
UninitializedMvName *newMvList = NULL;
short uninitializedMvCount = 0;
if (uninitializedMvList_)
{
uninitializedMvCount = (short)uninitializedMvList_->entries();
if( uninitializedMvCount != 0 )
{
newMvList = new (space) UninitializedMvName[uninitializedMvCount];
for( short i = 0; i < uninitializedMvCount; i++ )
{
UninitializedMvName *pMvName = uninitializedMvList_->at(i);
GenAssert( pMvName, "UninitializedMvName is invalid." );
newMvList[i].setPhysicalName( pMvName->getPhysicalName() );
newMvList[i].setAnsiName( pMvName->getAnsiName() );
}
}
}
// if there were any views referenced in the query, copy the stoi
// to root tdb. This is used at runtime to check for existence.
Queue * viewStoiList = NULL;
if (getViewStoiList().entries() > 0)
{
for (CollIndex i = 0; i < getViewStoiList().entries(); i++)
{
if (! viewStoiList)
viewStoiList = new(space) Queue(space);
SqlTableOpenInfo * stoi = new(space) SqlTableOpenInfo;
*stoi = *getViewStoiList()[i]->getStoi();
stoi->setFileName(
new(space) char[strlen(getViewStoiList()[i]->getStoi()->fileName()) + 1]);
strcpy(stoi->fileName(), getViewStoiList()[i]->getStoi()->fileName());
stoi->setAnsiName(
new(space) char[strlen(getViewStoiList()[i]->getStoi()->ansiName()) + 1]);
strcpy(stoi->nonConstAnsiName(), getViewStoiList()[i]->getStoi()->ansiName());
if (getViewStoiList()[i]->getStoi()->getColumnListCount())
{
stoi->setColumnList(new (space)
short[getViewStoiList()[i]->getStoi()->getColumnListCount()]);
for (short k = 0; k < getViewStoiList()[i]->getStoi()->getColumnListCount(); k++)
{
stoi->setUpdateColumn(k,getViewStoiList()[i]->getStoi()->getUpdateColumn(k));
}
}
if (CmpCommon::getDefault(VALIDATE_VIEWS_AT_OPEN_TIME) == DF_ON)
stoi->setValidateViewsAtOpenTime(TRUE);
else
stoi->setValidateViewsAtOpenTime(FALSE);
viewStoiList->insert(stoi);
// if this view name was used as a variable(hvar, envvar or define),
// then add it to the latename info list.
HostVar * hv = getViewStoiList()[i]->getCorrName().getPrototype();
if (hv != NULL)
{
LateNameInfo* lateNameInfo = new(generator->wHeap()) LateNameInfo();
char * varName;
GenAssert(hv->getName().data(), "Hostvar pointer must have name");
#pragma nowarn(1506) // warning elimination
lateNameInfo->setEnvVar(hv->isEnvVar());
#pragma warn(1506) // warning elimination
#pragma nowarn(1506) // warning elimination
lateNameInfo->setDefine(hv->isDefine());
#pragma warn(1506) // warning elimination
#pragma nowarn(1506) // warning elimination
lateNameInfo->setCachedParam(hv->isCachedParam());
#pragma warn(1506) // warning elimination
varName = convertNAString(hv->getName(), generator->wHeap());
strcpy(lateNameInfo->variableName(), varName);
char * prototypeValue = convertNAString(hv->getPrototypeValue(),
generator->wHeap());
char * compileTimeAnsiName = prototypeValue;
lateNameInfo->setVariable(1);
lateNameInfo->setView(1);
lateNameInfo->setCompileTimeName(compileTimeAnsiName, space);
lateNameInfo->setLastUsedName(compileTimeAnsiName, space);
lateNameInfo->setNameSpace(COM_TABLE_NAME);
lateNameInfo->setInputListIndex(-1);
generator->addLateNameInfo(lateNameInfo);
} // hv
} // for
}
// UDR Security
#pragma warning (disable : 4244) //warning elimination
#pragma nowarn(1506) // warning elimination
short noOfUdrs = generator->getBindWA()->getUdrStoiList().entries ();
#pragma warn(1506) // warning elimination
#pragma warning (default : 4244) //warning elimination
SqlTableOpenInfo **udrStoiList = NULL;
if ( noOfUdrs )
{
udrStoiList = new (space) SqlTableOpenInfo*[noOfUdrs];
BindWA *bindWA = generator->getBindWA ();
for (short udrIdx=0; udrIdx < noOfUdrs; udrIdx++)
{
udrStoiList[udrIdx] = new (space) SqlTableOpenInfo;
SqlTableOpenInfo *genUdrStoi =
(SqlTableOpenInfo *)bindWA->getUdrStoiList()[udrIdx]->getUdrStoi();
*(udrStoiList[udrIdx]) = *genUdrStoi;
udrStoiList[udrIdx]->setAnsiName(
new (space) char[strlen(genUdrStoi->ansiName()) + 1]
);
strcpy(udrStoiList[udrIdx]->nonConstAnsiName(), genUdrStoi->ansiName());
}
}
// setting transaction type flags in the transmode object
// Determines what type of transaction will be started for this statement,
// if autocommit is ON.
// setting accessMode to read_write if it was set to read_only by MX.
// if isolation_level is read_uncommitted we set accessmode to read only
// This causes trouble when we try to do DDL or IUD so we are resetting
// accessMode here. From here on accesMode is used only to start the transaction
if ((transMode->accessMode() == TransMode::READ_ONLY_) &&
(generator->needsReadWriteTransaction()))
transMode->accessMode() = TransMode::READ_WRITE_ ;
if (generator->withNoRollbackUsed() ||
(transMode->getRollbackMode() == TransMode::NO_ROLLBACK_))
{
if (generator->withNoRollbackUsed())
transMode->rollbackMode() = TransMode::NO_ROLLBACK_IN_IUD_STATEMENT_ ;
// if (childOperType().match(REL_ANY_GEN_UPDATE))
// generator->setAqrEnabled(FALSE);
// AIInterval is set to don't abort (i.e. 0).
// A setting of -2 is equivalent to a setting of 0 but has the additional meaning
// that it will never be overriden by the executor.
if (transMode->getAutoAbortIntervalInSeconds() == -1)
transMode->autoAbortIntervalInSeconds() = -2;
}
else if ((NOT generator->needsReadWriteTransaction()) &&
transMode->accessMode() != TransMode::READ_ONLY_SPECIFIED_BY_USER_ &&
containsLRU() == FALSE &&
updatableSelect() == FALSE)
{
transMode->accessMode() = TransMode::READ_ONLY_ ;
}
if (transMode->getAutoAbortIntervalInSeconds() == -1)
{
if (transMode->accessMode() == TransMode::READ_ONLY_SPECIFIED_BY_USER_)
transMode->autoAbortIntervalInSeconds() = -2;
else if (transMode->accessMode() == TransMode::READ_ONLY_)
transMode->autoAbortIntervalInSeconds() = 0;
}
// create the latename info List to be passed on to root_tdb.
LateNameInfoList * lnil = NULL;
Int32 numEntries = 0;
if (generator->getLateNameInfoList().entries() > 0)
#pragma nowarn(1506) // warning elimination
numEntries = generator->getLateNameInfoList().entries();
#pragma warn(1506) // warning elimination
lnil =
(LateNameInfoList *)
space->allocateMemory( sizeof(LateNameInfoList) +
(numEntries * sizeof(LateNameInfo)) );
// Initialize LNIL from real LNIL object (this copies vtblptr into lnil).
LateNameInfoList lnild;
memcpy((char *)lnil,&lnild,sizeof(LateNameInfoList));
lnil->setNumEntries(generator->getLateNameInfoList().entries());
// This allocates an array of 64-bit pointers in lnil.
lnil->allocateList(space,numEntries);
// This sets up the array elements to point to the LateNameInfo objects.
for (j = 0; j < numEntries; j++)
lnil->setLateNameInfo(j,((LateNameInfo *)(lnil + 1)) + j);
NABoolean definePresent = FALSE;
NABoolean viewPresent = FALSE;
NABoolean variablePresent = FALSE;
// olt opt is only done for tablenames which are literals
NABoolean doTablenameOltOpt = TRUE;
if (generator->getLateNameInfoList().entries() > 0)
{
for (CollIndex i = 0;
i < generator->getLateNameInfoList().entries(); i++)
{
#pragma nowarn(1506) // warning elimination
LateNameInfo * tgt = &(lnil->getLateNameInfo(i));
#pragma warn(1506) // warning elimination
LateNameInfo * src = (LateNameInfo *)generator->getLateNameInfoList()[i];
if (src->isVariable())
{
doTablenameOltOpt = FALSE;
if (src->isDefine())
definePresent = TRUE;
}
// *tgt = *src wouldn't work since it doesn't copy over the vtblptr.
memmove(tgt,src,sizeof(LateNameInfo));
// find the position of this hostvar in input var list.
if ((src->isVariable()) && (! src->isEnvVar()) && (! src->isDefine()))
{
if (tgt->isCachedParam())
{
tgt->setCachedParamOffset((Lng32)cachedAttrs[tgt->getInputListIndex()-1]->getOffset());
}
else
{
NABoolean found = FALSE;
for (CollIndex i = 0; ((i < newInputVars.entries()) && (! found)); i++)
{
ValueId val_id = newInputVars[i];
ItemExpr * item_expr = val_id.getItemExpr();
if (item_expr->getOperatorType() == ITM_HOSTVAR)
{
HostVar * inputHV = (HostVar *)item_expr;
if ((inputHV->getName().length() == strlen(src->variableName())) &&
(strcmp(inputHV->getName().data(), src->variableName()) == 0))
{
found = TRUE;
tgt->setInputListIndex((short)(i+1));
}
} // hostvar in input list
} // for
if (! found)
GenAssert(0, "Must find prototype hvar in input hvar");
}
} // not an env var or a define.
if (tgt->compileTimeAnsiName()[0] == '\\')
{
if (NOT tgt->isVariable())
{
if (NOT tgt->isMPalias())
tgt->setAnsiPhySame(TRUE);
else
tgt->setAnsiPhySame(FALSE);
}
else
{
if (tgt->isMPalias())
tgt->setAnsiPhySame(FALSE);
else if (tgt->isDefine())
tgt->setAnsiPhySame(TRUE);
else if (tgt->isEnvVar())
tgt->setAnsiPhySame(TRUE);
else
{
// hostvar
// If prototype is a fully qualified name, then
// ansi-phy names are the same.
if (tgt->compileTimeAnsiName()[0] == '\\')
tgt->setAnsiPhySame(TRUE);
};
}
}
else
{
if (tgt->isVariable())
{
QualifiedName qn(tgt->compileTimeAnsiName(), 1,
generator->wHeap(),
generator->getBindWA());
qn.applyDefaults(generator->currentCmpContext()->schemaDB_->getDefaultSchema());
char * compileTimeAnsiName = space->AllocateAndCopyToAlignedSpace(
qn.getQualifiedNameAsAnsiString(), 0);
tgt->setCompileTimeName(compileTimeAnsiName, space);
if (tgt->isView())
tgt->setMPalias(1);
}
} // else
if (tgt->isView())
viewPresent = TRUE;
if (tgt->isVariable())
variablePresent = TRUE;
if (tgt->lastUsedAnsiName()[0] != '\0')
{
// VO, Metadata Indexes
if (tgt->getNameSpace() == COM_INDEX_NAME)
// This lni is for an index - don't copy the compile time ansi name if the
// query is from a system module
if ( (generator->currentCmpContext()->internalCompile() != CmpContext::INTERNAL_MODULENAME) &&
!CmpCommon::statement()->isSMDRecompile() )
tgt->setLastUsedName(tgt->compileTimeAnsiName(),space);
}
// Special handling for case where we are recompiling a system module
// query. We need to resolve the name here since it will not go
// through the resolveNames in the CLI. Do this only for NSK
} // for
}
#pragma nowarn(1506) // warning elimination
lnil->setDefinePresent(definePresent);
#pragma warn(1506) // warning elimination
#pragma nowarn(1506) // warning elimination
lnil->setViewPresent(viewPresent);
#pragma warn(1506) // warning elimination
lnil->setVariablePresent(variablePresent);
// Generate info to do similarity check.
TrafQuerySimilarityInfo * qsi = genSimilarityInfo(generator);
// generate the executor fragment directory <exFragDir> (list of all
// fragments of the plan that are executed locally or are downloaded
// to DP2 or to ESPs) from the generator's copy <compFragDir> and attach
// it to the root_tdb
NABoolean fragmentQuotas = CmpCommon::getDefault(ESP_MULTI_FRAGMENT_QUOTAS) == DF_ON;
ExFragDir *exFragDir =
#pragma nowarn(1506) // warning elimination
new(space) ExFragDir(compFragDir->entries(),space,
CmpCommon::getDefault(ESP_MULTI_FRAGMENTS) == DF_ON,
fragmentQuotas,
(UInt16)CmpCommon::getDefaultLong(ESP_MULTI_FRAGMENT_QUOTA_VM),
fragmentQuotas ?
(UInt8)CmpCommon::getDefaultLong(ESP_NUM_FRAGMENTS_WITH_QUOTAS) :
(UInt8)CmpCommon::getDefaultLong(ESP_NUM_FRAGMENTS));
#pragma warn(1506) // warning elimination
// We compute the space needed in execution time for input Rowset variables
for (i = 0; i < inputVars().entries(); i++)
{
ValueId val_id = inputVars()[i];
ItemExpr * item_expr = val_id.getItemExpr();
OperatorTypeEnum op = item_expr->getOperatorType();
if (op == ITM_HOSTVAR) {
HostVar *hostVar = (HostVar *) item_expr;
if (hostVar->getType()->getTypeQualifier() == NA_ROWSET_TYPE) {
Lng32 thisSize = hostVar->getType()->getTotalSize();
input_vars_size += thisSize;
}
}
}
// find out if this is a delete where current of query.
NABoolean delCurrOf = FALSE;
short baseTablenamePosition = -1;
if (fetchedCursorName || (fetchedCursorHvar >= 0)) // upd/del curr of
{
if (childOperType() == REL_UNARY_DELETE)
delCurrOf = TRUE;
else
delCurrOf = FALSE;
}
// The ansi names of the table specified in the cursor stmt must match
// the name specified in the upd/del where curr of stmt. This check
// is done at runtime.
// basetablenameposition is the index in the latenameinfolist of
// the entry whose lastUsedAnsiName contains the name of the table.
baseTablenamePosition = -1;
if (updatableSelect())
{
// if this is an updatable select, find the ansi name of the basetable.
// This name will be used at runtime to compare to the tablename
// specified in an 'upd/del where current of' stmt. The two tablenames
// must be the same.
for (Int32 n = 0; n < noOfTables; n++)
{
SqlTableOpenInfo * stoi = stoiList[n];
if (NOT stoi->isIndex())
{
if (baseTablenamePosition == -1)
{
#pragma nowarn(1506) // warning elimination
baseTablenamePosition = n;
#pragma warn(1506) // warning elimination
}
}
}
if (baseTablenamePosition == -1)
{
// no base table access used. Only index access is used.
// The ansiname field in latenameInfo struct is the ansi name
// of the base table. Use that.
baseTablenamePosition = 0;
}
}
else if (updateCurrentOf())
{
// if this is an update/delete current of query, find the index of
// the base table. There might be other tables used in the plan
// for index maintanence and they will either be indices or
// specialTables with the special type being an INDEX_TABLE.
// Look only for the true base tables.
for (Int32 n = 0; n < noOfTables; n++)
{
SqlTableOpenInfo * stoi = stoiList[n];
if ((NOT stoi->isIndex()) &&
(NOT stoi->specialTable()) &&
(stoi->getUpdateAccess() ||
stoi->getDeleteAccess() ))
{
if (baseTablenamePosition == -1)
{
#pragma nowarn(1506) // warning elimination
baseTablenamePosition = n;
#pragma warn(1506) // warning elimination
}
}
}
if (baseTablenamePosition == -1)
{
// No base table found in the stoi list.
// Raise an error.
GenAssert(0, "Must find updelTableNamePosition!");
}
}
// find out if this was an update,delete or insert query.
NABoolean updDelInsert = FALSE;
if (childOperType().match(REL_ANY_GEN_UPDATE))
updDelInsert = TRUE;
// Do OLT optimization if:
// -- OLT optimization is possible
// -- and no upd/del where current of
// -- and no late name resolution
// -- and no views in query
NABoolean doOltQryOpt = FALSE;
if ((oltOptInfo().oltCliOpt()) &&
(viewStoiList == NULL) && // no views
(doTablenameOltOpt == TRUE) && // no late name info
(fetchedCursorName == NULL) && // no upd/del curr of
(fetchedCursorHvar < 0) &&
(delCurrOf == FALSE) &&
(getFirstNRows() == -1)) // no firstn specified
{
doOltQryOpt = TRUE;
}
// At runtime, we try to internally reexecute a statement in case of
// lost opens, if that query has not affected the database(inserted/updated/
// deleted a row), or a row has not been returned to the application.
// Do not retry for lost opens of IUD queries if there are more
// than one tables in the query. This is because we don't know if the db
// was affected when the open was lost on the non-IUD table in the query.
// For ex: in an insert...select query, an open could be blown away for
// the select part of the query.
// If there is only one table in this query, then that row will get locked
// during IUD and the open could not be blown away.
// If some day we put in a scheme to detect that the database
// was not affected for a multi-table IUD, we can retry for lost
// opens.
NABoolean retryableStmt = TRUE;
if (generator->aqrEnabled())
{
retryableStmt = FALSE;
}
if (updDelInsert && moreThanOneTable)
retryableStmt = FALSE;
if (childOperType() == REL_DDL)
retryableStmt = FALSE;
if (isExtractProducer || isExtractConsumer)
retryableStmt = FALSE;
// For now we mark statements containing UDRs as non-retryable.
// This is to avoid executing a stored procedure body multiple times
// inside a single application request. Currently the only UDR-
// containing statment is CALL.
//
// There are scenarios however in which it would be correct (and
// helpful) to retry a UDR-containing statement. Perhaps in the
// future the restriction can be lifted in some cases. Safe retry
// scenarios include a subquery input parameter returning a blown
// away open error before the stored procedure body has executed,
// and UDR bodies that only do computation and not transactional work.
if (noOfUdrs > 0)
retryableStmt = FALSE;
short maxResultSets = generator->getBindWA()->getMaxResultSets();
char *queryCostInfoBuf = NULL;
QueryCostInfo *queryCostInfo =
(QueryCostInfo *) new (space) char[sizeof(QueryCostInfo)];
// fill in cost. Taken from explain code in GenExplain.cpp
if (getRollUpCost())
{
double cpu, io, msg, idle, seqIOs, randIOs, total, cardinality;
double totalMemPerCpu, totalMemPerCpuInKB;
short maxCpuUsage;
Lng32 dummy;
Cost const *operatorCost = getRollUpCost();
const NABoolean inMaster = generator->getEspLevel() == 0;
operatorCost->getExternalCostAttr(cpu, io, msg, idle, seqIOs, randIOs, total, dummy);
// operatorCost->getOcmCostAttr(cpu, io, msg, idle, dummy);
// total = MINOF(operatorCost->convertToElapsedTime(), 1e32).getValue();
cardinality = MINOF(getEstRowsUsed(), 1e32).value();
// get the totalMem that is being used divide by max dop to get an
// an estimate of memory usage per cpu. Convert to KB units.
totalMemPerCpu =
generator->getTotalEstimatedMemory() /
((generator->compilerStatsInfo().dop() > 0) ?
generator->compilerStatsInfo().dop() : 1);
totalMemPerCpuInKB = totalMemPerCpu / 1024 ;
maxCpuUsage = generator->getMaxCpuUsage() ;
queryCostInfo->setCostInfo(cpu, io, msg, idle, seqIOs, randIOs, total,
cardinality, totalMemPerCpuInKB, maxCpuUsage);
// if resourceUsage need to be set (low/medium/high), set it here.
// For now, set to 0 which indicates that this value is not
// being returned.
queryCostInfo->setResourceUsage(0);
}
queryCostInfoBuf = (char*)queryCostInfo;
//
// CompilationStatsData
CompilationStats* stats = CURRENTSTMT->getCompilationStats();
char *compilerId = new (space) char[COMPILER_ID_LEN];
str_cpy_all(compilerId, generator->currentCmpContext()->getCompilerId(),
COMPILER_ID_LEN);
Int32 cLen = stats->getCompileInfoLen();
//
// make it 1 at minimum
cLen = ( cLen < 1 ) ? 1 : cLen;
char *compileInfo = new (space) char[cLen];
stats->getCompileInfo(compileInfo);
//
// Some of the fields are set here but modified later after generator phase is
// complete (such as compileEndTime, CMP_PHASE_ALL, and CMP_PHASE_GENERATOR)
CompilationStatsData *compilationStatsData =
(CompilationStatsData *) new (space)
CompilationStatsData(stats->compileStartTime(),
stats->compileEndTime(),
compilerId,
stats->cmpPhaseLength(CompilationStats::CMP_PHASE_ALL),
stats->cmpPhaseLength(CompilationStats::CMP_PHASE_BINDER),
stats->cmpPhaseLength(CompilationStats::CMP_PHASE_NORMALIZER),
stats->cmpPhaseLength(CompilationStats::CMP_PHASE_ANALYZER),
stats->cmpPhaseLength(CompilationStats::CMP_PHASE_OPTIMIZER),
stats->cmpPhaseLength(CompilationStats::CMP_PHASE_GENERATOR),
stats->metadataCacheHits(),
stats->metadataCacheLookups(),
stats->getQueryCacheState(),
stats->histogramCacheHits(),
stats->histogramCacheLookups(),
stats->stmtHeapCurrentSize(),
stats->cxtHeapCurrentSize(),
stats->optimizationTasks(),
stats->optimizationContexts(),
stats->isRecompile(),
compileInfo,
stats->getCompileInfoLen());
//
// CompilerStats
char *compilerStatsInfoBuf = NULL;
CompilerStatsInfo *compilerStatsInfo =
(CompilerStatsInfo *) new (space) char[sizeof(CompilerStatsInfo)];
compilerStatsInfoBuf = (char*)compilerStatsInfo;
*compilerStatsInfo = generator->compilerStatsInfo();
// remove the duplicated entries from the schema label list, and put the
// unique entries in TDB. During execution time, we will check the
// LastModTimestamp of the schema label. If the lastModTimestamp has changed,
// a timestamp mismatch will be returned, allowing for recompilation of the
// query
NABoolean validateSSTSFlag = TRUE;
CollIndex numObjectUIDs = generator->objectUids().entries();
Int64 *objectUIDsPtr = NULL;
if (numObjectUIDs > 0)
{
objectUIDsPtr = new (space) Int64[numObjectUIDs];
for (CollIndex i = 0; i < numObjectUIDs; i++)
objectUIDsPtr[i] = generator->objectUids()[i];
}
Queue * listOfSnapshotscanTables = NULL;
NAString tmpLocNAS;
char * tmpLoc = NULL;
Int64 numObjectNames = generator->objectNames().entries();
if (numObjectNames >0)
{
listOfSnapshotscanTables = new(space) Queue(space);
for (Lng32 i=0 ; i <generator->objectNames().entries(); i++)
{
char * nm = space->allocateAlignedSpace(generator->objectNames()[i].length() + 1);
strcpy(nm, generator->objectNames()[i].data());
listOfSnapshotscanTables->insert(nm);
}
tmpLocNAS = generator->getSnapshotScanTmpLocation();
CMPASSERT(tmpLocNAS[tmpLocNAS.length()-1] =='/');
tmpLoc = space->allocateAlignedSpace(tmpLocNAS.length() + 1);
strcpy(tmpLoc, tmpLocNAS.data());
}
// for describe type commands(showshape, showplan, explain) we don't
// need to pass in the actual param values even if the query contains
// params. Reset input_expr. This is done to avoid returning
// an error later if the actual param value is not set.
OperatorTypeEnum child_op_type = childOperType();
if (child_op_type == REL_EXE_UTIL || child_op_type == REL_DESCRIBE)
{
RelExpr * lc = child(0)->castToRelExpr();
OperatorTypeEnum actual_op_type = lc->getOperatorType();
if (actual_op_type == REL_EXE_UTIL)
{
ExeUtilExpr *e = (ExeUtilExpr *)lc;
if (e->getExeUtilType() == ExeUtilExpr::DISPLAY_EXPLAIN_)
input_expr = NULL;
}
else if (actual_op_type == REL_DESCRIBE)
{
Describe *d = (Describe *)lc;
if (d->getFormat() == Describe::SHAPE_
|| d->getFormat() == Describe::PLAN_)
input_expr = NULL;
}
}
// ---------------------------------------------------------------------
// now initialize the previously allocated root tdb. note that this
// init *must* come before we fill in the exFragDir's info because there
// we compute how much space is used by the fragment the root is in.
// This init() call passes in the space object. The root might allocate
// more space for its uses inside init().
// ---------------------------------------------------------------------
#pragma warning (disable : 4244) //warning elimination
root_tdb->init(child_tdb,
cri_desc, // input to child
(InputOutputExpr *)input_expr,
(InputOutputExpr *)output_expr,
#pragma nowarn(1506) // warning elimination
input_vars_size,
#pragma warn(1506) // warning elimination
pkey_expr,
pkey_len,
pred_expr,
work_cri_desc,
exFragDir,
transMode,
fetchedCursorName,
fetchedCursorHvar,
delCurrOf,
numUpdateCol,
updateColList,
(outputVarCntValid() && outputVarCnt()),
noOfTables,
getFirstNRows(),
userInputVars,
(getRollUpCost() ?
getRollUpCost()->displayTotalCost().getValue() : 0),
stoiList,
lnil,
viewStoiList,
qsi,
space,
uniqueExecuteIdOffset, //++ Triggers -
triggersStatusOffset,
triggersCount,
triggersList,
(short)generator->getTempTableId(),
(short)baseTablenamePosition,
updDelInsert,
retryableStmt,
getGroupAttr()->isStream(),
// next flag is set for destructive stream access protocol.
// Not needed for hbase/seabase access.
(getGroupAttr()->isEmbeddedUpdateOrDelete() &&
(NOT hdfsAccess())),
#pragma nowarn(1506) // warning elimination
CmpCommon::getDefaultNumeric(STREAM_TIMEOUT),
#pragma warn(1506) // warning elimination
generator->getPlanId(),
qCacheInfoBuf,
#pragma nowarn(1506) // warning elimination
cacheVarsSize,
#pragma warn(1506) // warning elimination
udrStoiList,
noOfUdrs,
maxResultSets,
queryCostInfoBuf,
newMvList,
uninitializedMvCount,
compilerStatsInfoBuf,
rwrsInfoBuf,
numObjectUIDs ,
objectUIDsPtr,
compilationStatsData,
tmpLoc,
listOfSnapshotscanTables);
#pragma warning (default : 4244) //warning elimination
root_tdb->setTdbId(generator->getAndIncTdbId());
if (childTdbIsNull)
root_tdb->setChildTdbIsNull();
if (generator->explainInRms())
root_tdb->setExplainInRms();
OperatorTypeEnum childOper = childOperType();
if (qCacheInfoIsClass)
root_tdb->setQCacheInfoIsClass(TRUE);
if (getHostArraysArea() && getHostArraysArea()->getRowwiseRowset())
{
root_tdb->setRowwiseRowsetInput(TRUE);
}
else
{
NABoolean singleRowInput = TRUE;
if ((input_expr) && ((InputOutputExpr *)input_expr)->isCall())
singleRowInput = FALSE;
if (CmpCommon::getDefault(COMP_BOOL_92) == DF_OFF)
singleRowInput = FALSE;
root_tdb->setSingleRowInput(singleRowInput);
}
if (childOper == REL_DDL)
{
root_tdb->setDDLQuery(TRUE);
}
root_tdb->setCIFON(isCIFOn_);
if (generator->currentCmpContext()->isEmbeddedArkcmp())
//if (IdentifyMyself::GetMyName() == I_AM_EMBEDDED_SQL_COMPILER)
root_tdb->setEmbeddedCompiler(TRUE);
else
root_tdb->setEmbeddedCompiler(FALSE);
// We check to see if this tree corresponds to a compound statement so
// we know this at execution time
RelExpr* checkNode = child(0);
if ( checkNode->getOperatorType() == REL_EXCHANGE )
checkNode = checkNode->child(0);
if ( checkNode->getOperatorType() == REL_PARTITION_ACCESS )
checkNode = checkNode->child(0);
if (checkNode->getOperatorType() == REL_COMPOUND_STMT ||
(checkNode->getOperatorType() == REL_UNION ||
checkNode->getOperatorType() == REL_MERGE_UNION)
&&
((Union *) (RelExpr *) checkNode)->getUnionForIF()) {
root_tdb->setCompoundStatement();
}
root_tdb->setDoOltQueryOpt(doOltQryOpt);
root_tdb->setQueryType(ComTdbRoot::SQL_OTHER);
// set the EMS Event Experience Level information
// the default is ADVANCED if it is not specified
if (CmpCommon::getDefault(USER_EXPERIENCE_LEVEL) == DF_BEGINNER)
{
root_tdb->setEMSEventExperienceLevelBeginner(TRUE);
}
if (CmpCommon::getDefault(UNC_PROCESS) == DF_ON)
{
root_tdb->setUncProcess(TRUE);
}
// If this is a ustat query set the query type so WMS can monitor it
if (childOper == REL_DDL)
{
DDLExpr *ddlExpr = (DDLExpr *)child(0)->castToRelExpr();
char * stmt = ddlExpr->getDDLStmtText();
NAString ddlStr = NAString(stmt);
ddlStr = ddlStr.strip(NAString::leading, ' ');
// If this is a ustat statement, set the type
Int32 foundUpdStat = 0;
// check if the first token is UPDATE
size_t position = ddlStr.index("UPDATE", 0, NAString::ignoreCase);
if (position == 0)
{
// found UPDATE. See if the next token is STATISTICS.
ddlStr = ddlStr(6, ddlStr.length()-6); // skip over UPDATE
ddlStr = ddlStr.strip(NAString::leading, ' ');
position = ddlStr.index("STATISTICS", 0, NAString::ignoreCase);
if (position == 0)
foundUpdStat = -1;
}
if (foundUpdStat)
{
root_tdb->setQueryType(ComTdbRoot::SQL_CAT_UTIL);
}
}
// Disable Cancel for some queries. But start the logic with
// "all queries can be canceled."
root_tdb->setMayNotCancel(FALSE);
// Disallow cancel.
if (CmpCommon::getDefault(COMP_BOOL_20) == DF_ON)
root_tdb->setMayNotCancel(TRUE);
if (generator->mayNotCancel())
root_tdb->setMayNotCancel(TRUE);
if (updDelInsert)
{
if ((childOper == REL_UNARY_INSERT) ||
(childOper == REL_LEAF_INSERT) ||
(childOper == REL_INSERT_CURSOR))
root_tdb->setQueryType(ComTdbRoot::SQL_INSERT_NON_UNIQUE);
else if ((childOper == REL_UNARY_UPDATE) ||
(childOper == REL_LEAF_UPDATE) ||
(childOper == REL_UPDATE_CURSOR))
root_tdb->setQueryType(ComTdbRoot::SQL_UPDATE_NON_UNIQUE);
else if ((childOper == REL_UNARY_DELETE) ||
(childOper == REL_LEAF_DELETE) ||
(childOper == REL_DELETE_CURSOR))
root_tdb->setQueryType(ComTdbRoot::SQL_DELETE_NON_UNIQUE);
}
if (output_expr)
root_tdb->setQueryType(ComTdbRoot::SQL_SELECT_NON_UNIQUE);
if ((updDelInsert) &&
(root_tdb->getQueryType() == ComTdbRoot::SQL_INSERT_NON_UNIQUE) &&
(rwrsInfo))
{
root_tdb->setQueryType(ComTdbRoot::SQL_INSERT_RWRS);
}
else if ((child(0)) &&
(child(0)->castToRelExpr()->getOperatorType() == REL_UTIL_INTERNALSP))
{
root_tdb->setQueryType(ComTdbRoot::SQL_CAT_UTIL);
}
else if ((child(0)) &&
(child(0)->castToRelExpr()->getOperatorType() == REL_DESCRIBE))
{
root_tdb->setSubqueryType(ComTdbRoot::SQL_DESCRIBE_QUERY);
}
else if ((child(0)) &&
(child(0)->castToRelExpr()->getOperatorType() == REL_EXE_UTIL))
{
root_tdb->setQueryType(ComTdbRoot::SQL_EXE_UTIL);
ExeUtilExpr * exeUtil = (ExeUtilExpr*)child(0)->castToRelExpr();
if (exeUtil->getExeUtilType() == ExeUtilExpr::CREATE_TABLE_AS_)
{
if (CmpCommon::getDefault(REDRIVE_CTAS) == DF_OFF)
root_tdb->setQueryType(ComTdbRoot::SQL_INSERT_NON_UNIQUE);
else
root_tdb->setSubqueryType(ComTdbRoot::SQL_STMT_CTAS);
}
else if (exeUtil->getExeUtilType() == ExeUtilExpr::GET_STATISTICS_)
root_tdb->setSubqueryType(ComTdbRoot::SQL_STMT_GET_STATISTICS);
else if (exeUtil->getExeUtilType() == ExeUtilExpr::DISPLAY_EXPLAIN_)
{
root_tdb->setSubqueryType(ComTdbRoot::SQL_DISPLAY_EXPLAIN);
if (CmpCommon::getDefault(EXE_UTIL_RWRS) == DF_ON)
root_tdb->setExeUtilRwrs(TRUE);
}
else if (exeUtil->getExeUtilType() == ExeUtilExpr::HBASE_COPROC_AGGR_)
root_tdb->setQueryType(ComTdbRoot::SQL_SELECT_NON_UNIQUE);
else if (exeUtil->getExeUtilType() == ExeUtilExpr::HBASE_LOAD_)
{
root_tdb->setSubqueryType(ComTdbRoot::SQL_STMT_HBASE_LOAD);
}
else if (exeUtil->getExeUtilType() == ExeUtilExpr::HBASE_UNLOAD_)
{
root_tdb->setSubqueryType(ComTdbRoot::SQL_STMT_HBASE_UNLOAD);
}
else if (exeUtil->getExeUtilType() == ExeUtilExpr::LOB_EXTRACT_)
{
root_tdb->setSubqueryType(ComTdbRoot::SQL_STMT_LOB_EXTRACT);
}
else if(exeUtil->getExeUtilType() == ExeUtilExpr::LOB_UPDATE_UTIL_)
{
root_tdb->setSubqueryType(ComTdbRoot::SQL_STMT_LOB_UPDATE_UTIL
);
}
else if (exeUtil->isExeUtilQueryType())
{
root_tdb->setQueryType(ComTdbRoot::SQL_EXE_UTIL);
}
}
else if ((child(0)) &&
(child(0)->castToRelExpr()->getOperatorType() == REL_DDL))
{
DDLExpr *ddlExpr = (DDLExpr *)child(0)->castToRelExpr();
if (ddlExpr->producesOutput())
root_tdb->setQueryType(ComTdbRoot::SQL_EXE_UTIL);
}
else if (generator->getBindWA()->hasCallStmts())
{
// In this version of the compiler we assume any statement that
// contains UDRs is either a CALL statement.
if (maxResultSets > 0)
root_tdb->setQueryType(ComTdbRoot::SQL_CALL_WITH_RESULT_SETS);
else
root_tdb->setQueryType(ComTdbRoot::SQL_CALL_NO_RESULT_SETS);
}
else
{
OperatorTypeEnum currChildOper =
child(0)->castToRelExpr()->getOperatorType();
if (currChildOper == REL_CONTROL_QUERY_DEFAULT)
{
root_tdb->setMayNotCancel(TRUE);
ControlQueryDefault * cqd =
(ControlQueryDefault*)child(0)->castToRelExpr();
if (cqd->dynamic())
{
if (cqd->getAttrEnum() == CATALOG)
root_tdb->setQueryType(ComTdbRoot::SQL_SET_CATALOG);
else if (cqd->getAttrEnum() == SCHEMA)
root_tdb->setQueryType(ComTdbRoot::SQL_SET_SCHEMA);
else
root_tdb->setQueryType(ComTdbRoot::SQL_CONTROL);
}
else
root_tdb->setQueryType(ComTdbRoot::SQL_CONTROL);
}
else if ((currChildOper == REL_CONTROL_QUERY_SHAPE) ||
(currChildOper == REL_CONTROL_TABLE))
{
root_tdb->setMayNotCancel(TRUE);
root_tdb->setQueryType(ComTdbRoot::SQL_CONTROL);
}
else if (currChildOper == REL_TRANSACTION)
{
root_tdb->setMayNotCancel(TRUE);
if (((RelTransaction*)child(0)->castToRelExpr())->getType() == SET_TRANSACTION_)
root_tdb->setQueryType(ComTdbRoot::SQL_SET_TRANSACTION);
}
else if (currChildOper == REL_SP_PROXY)
{
// This is a stored procedure result set
root_tdb->setQueryType(ComTdbRoot::SQL_SP_RESULT_SET);
}
else if (currChildOper == REL_EXE_UTIL)
{
ExeUtilExpr * exeUtil = (ExeUtilExpr*)child(0)->castToRelExpr();
if (exeUtil->getExeUtilType() == ExeUtilExpr::CREATE_TABLE_AS_)
root_tdb->setQueryType(ComTdbRoot::SQL_INSERT_NON_UNIQUE);
}
else if (REL_EXPLAIN == currChildOper)
root_tdb->setMayNotCancel(TRUE);
else if (REL_SET_TIMEOUT == currChildOper)
root_tdb->setMayNotCancel(TRUE);
else if (REL_CONTROL_RUNNING_QUERY == currChildOper)
root_tdb->setMayNotCancel(TRUE);
}
if (generator->isFastExtract())
{
root_tdb->setQueryType(ComTdbRoot::SQL_SELECT_UNLOAD);
}
if (child(0) && child(0)->castToRelExpr() &&
child(0)->castToRelExpr()->getOperator().match(REL_ANY_HBASE))
{
RelExpr * childExpr = child(0)->castToRelExpr();
OperatorTypeEnum currChildOper = childExpr->getOperatorType();
if ((childExpr->getOperator().match(REL_ANY_HBASE_GEN_UPDATE)) &&
(NOT output_expr))
{
GenericUpdate * gu = (GenericUpdate *)childExpr;
if (gu->uniqueHbaseOper())
{
if (currChildOper == REL_HBASE_UPDATE)
root_tdb->setQueryType(ComTdbRoot::SQL_UPDATE_UNIQUE);
else if (currChildOper == REL_HBASE_DELETE)
root_tdb->setQueryType(ComTdbRoot::SQL_DELETE_UNIQUE);
else
root_tdb->setQueryType(ComTdbRoot::SQL_INSERT_UNIQUE);
}
else
{
if (currChildOper == REL_HBASE_UPDATE)
root_tdb->setQueryType(ComTdbRoot::SQL_UPDATE_NON_UNIQUE);
else if (currChildOper == REL_HBASE_DELETE)
root_tdb->setQueryType(ComTdbRoot::SQL_DELETE_NON_UNIQUE);
else
root_tdb->setQueryType(ComTdbRoot::SQL_INSERT_NON_UNIQUE);
}
}
else if (currChildOper == REL_HBASE_ACCESS)
{
HbaseAccess * ha = (HbaseAccess *)childExpr;
if (ha->uniqueHbaseOper())
root_tdb->setQueryType(ComTdbRoot::SQL_SELECT_UNIQUE);
else
root_tdb->setQueryType(ComTdbRoot::SQL_SELECT_NON_UNIQUE);
}
}
// To help determine if it is safe to suspend.
if (child(0) &&
child(0)->castToRelExpr())
{
OperatorTypeEnum currChildOper =
child(0)->castToRelExpr()->getOperatorType();
if ((REL_DDL == currChildOper) ||
(REL_TRANSACTION == currChildOper) ||
(REL_EXE_UTIL == currChildOper))
root_tdb->setMayAlterDb(TRUE);
if (REL_LOCK == currChildOper)
root_tdb->setSuspendMayHoldLock(TRUE);
}
if (generator->anySerialiableScan())
root_tdb->setSuspendMayHoldLock(TRUE);
root_tdb->setOdbcQuery(CmpCommon::getDefault(ODBC_PROCESS) == DF_ON);
if (generator->getTolerateNonFatalError()) {
root_tdb->setTolerateNonFatalError(TRUE);
if (CmpCommon::getDefault(NOT_ATOMIC_FAILURE_LIMIT,0) == DF_SYSTEM)
root_tdb->setNotAtomicFailureLimit(ComCondition::NO_LIMIT_ON_ERROR_CONDITIONS);
else
root_tdb->setNotAtomicFailureLimit(CmpCommon::getDefaultLong(NOT_ATOMIC_FAILURE_LIMIT));
}
if (generator->embeddedIUDWithLast1()) {
root_tdb->setEmbeddedIUDWithLast1(TRUE);
}
if (generator->embeddedInsert()) {
root_tdb->setEmbeddedInsert(TRUE);
}
if (containsLRU())
{
root_tdb->setLRUOperation(TRUE);
}
if (generator->aqrEnabled())
root_tdb->setAqrEnabled(TRUE);
if (generator->cantReclaimQuery())
root_tdb->setCantReclaimQuery(TRUE);
// if a transaction is needed at runtime to execute this query,
// set that information in the root tdb. Generator synthesized
// this information based on the kind of query or if START_XN
// define was set.
// Certain queries (insert, update, delete) ALWAYS require a transaction
// at runtime.
// After parser support for REPEATABLE ACCESS, etc, is in, this
// information will come from the parse tree for scans.
if (generator->isTransactionNeeded())
{
root_tdb->setTransactionReqd();
if (generator->foundAnUpdate())
{
if (generator->updAbortOnError() == TRUE)
{
// if transaction has to be aborted at runtime after an error,
// set that info in root_tdb.
root_tdb->setUpdAbortOnError(-1);
}
else if (generator->updPartialOnError() == TRUE)
{
root_tdb->setUpdPartialOnError(-1);
}
else if (generator->updErrorInternalOnError() == TRUE)
{
root_tdb->setUpdErrorOnError(-1);
}
else if (generator->updErrorOnError() == FALSE)
{
if (generator->updSavepointOnError() == TRUE)
{
root_tdb->setUpdSavepointOnError(-1);
}
else
root_tdb->setUpdAbortOnError(-1);
}
else
root_tdb->setUpdErrorOnError(-1);
}
else
{
root_tdb->setUpdErrorOnError(-1);
}
} // transactionNeeded
if ((oltOptLean()) &&
(doOltQryOpt))
{
if ((NOT root_tdb->getUpdAbortOnError()) &&
(NOT root_tdb->getUpdSavepointOnError()) &&
(NOT root_tdb->getUpdPartialOnError()) &&
(NOT definePresent) &&
(retryableStmt) &&
(NOT root_tdb->thereIsACompoundStatement()))
{
root_tdb->setDoOltQueryOptLean(TRUE);
child_tdb->setDoOltQueryOptLean(TRUE);
}
}
if (generator->dp2XnsEnabled())
{
root_tdb->setDp2XnsEnabled(generator->dp2XnsEnabled());
}
if (generator->processLOB())
root_tdb->setProcessLOB(TRUE);
// Self-referencing updates
if (avoidHalloween_)
{
if (Generator::DP2LOCKS == generator->getHalloweenProtection())
{
// Plan was generated without resetting the generator's
// HalloweenProtectionType from DP2Locks, therefore we are
// using DP2 locks, and cannot allow auto commit off.
root_tdb->setCheckAutoCommit(TRUE);
}
}
else if (CmpCommon::getDefault(AQR_WNR_DELETE_NO_ROWCOUNT) == DF_ON)
{
// Allow non-ACID AQR of NO ROLLBACK DELETE that may have changed
// target. Query type (DELETE vs others) and WNR will be evaluated
// at runtime.
root_tdb->setAqrWnrDeleteContinue(TRUE);
}
if (CmpCommon::getDefault(PSHOLD_CLOSE_ON_ROLLBACK) == DF_ON)
root_tdb->setPsholdCloseOnRollback(TRUE);
else
root_tdb->setPsholdCloseOnRollback(FALSE);
if (CmpCommon::getDefault(PSHOLD_UPDATE_BEFORE_FETCH) == DF_ON)
root_tdb->setPsholdUpdateBeforeFetch(TRUE);
else
root_tdb->setPsholdUpdateBeforeFetch(FALSE);
root_tdb->setAbendType(
(Lng32) CmpCommon::getDefaultNumeric(COMP_INT_38) );
double cpuLimitCheckFreq = CmpCommon::getDefaultNumeric(COMP_INT_48);
if (cpuLimitCheckFreq > SHRT_MAX)
cpuLimitCheckFreq = SHRT_MAX;
root_tdb->setCpuLimitCheckFreq((short) cpuLimitCheckFreq);
// Config query execution limits.
Lng32 cpuLimit = (Lng32) CmpCommon::getDefaultNumeric(QUERY_LIMIT_SQL_PROCESS_CPU);
if (cpuLimit > 0)
root_tdb->setCpuLimit(cpuLimit);
if (CmpCommon::getDefault(QUERY_LIMIT_SQL_PROCESS_CPU_DEBUG) == DF_ON)
root_tdb->setQueryLimitDebug();
if (generator->inMemoryObjectDefn())
root_tdb->setInMemoryObjectDefn(TRUE);
if (CmpCommon::getDefault(READONLY_CURSOR) == DF_ON)
root_tdb->setCursorType(SQL_READONLY_CURSOR);
else
root_tdb->setCursorType(SQL_UPDATABLE_CURSOR);
if (CmpCommon::getDefault(WMS_QUERY_MONITORING) == DF_ON)
root_tdb->setWmsMonitorQuery(TRUE);
else
root_tdb->setWmsMonitorQuery(FALSE);
if (CmpCommon::getDefault(WMS_CHILD_QUERY_MONITORING) == DF_ON)
root_tdb->setWmsChildMonitorQuery(TRUE);
else
root_tdb->setWmsChildMonitorQuery(FALSE);
if (hdfsAccess())
root_tdb->setHdfsAccess(TRUE);
if(generator->hiveAccess())
root_tdb->setHiveAccess(TRUE);
Int32 numSikEntries = securityKeySet_.entries();
if (numSikEntries > 0)
{
ComSecurityKey * sikValues = new (space) ComSecurityKey[numSikEntries];
for (Int32 sv = 0; sv < numSikEntries; sv++)
sikValues[sv] = securityKeySet_[sv];
SecurityInvKeyInfo * sikInfo = new (space) SecurityInvKeyInfo(
numSikEntries, sikValues);
root_tdb->setSikInfo(sikInfo);
}
if (!generator->explainDisabled())
{
// finish up EXPLAIN
ExplainTuple *rootExplainTuple =
addExplainInfo(root_tdb, 0, 0, generator);
explainDesc->setExplainTreeRoot(rootExplainTuple);
ExplainTuple *childExplainTuple = generator->getExplainTuple();
rootExplainTuple->child(0) = childExplainTuple;
if(childExplainTuple)
{
childExplainTuple->setParent(rootExplainTuple);
rootExplainTuple->setChildSeqNum(
0,
childExplainTuple->getSeqNum());
}
generator->setExplainTuple(rootExplainTuple);
}
// Generate a list of scratch file options
exFragDir->setScratchFileOptions(genScratchFileOptions(generator));
// move ESP nodemask into frag dir
exFragDir->setNodeMask((ULng32) getDefault(PARALLEL_ESP_NODEMASK));
// generate the partition input data descriptor from the compile-time
// partitioning attributes
ExPartInputDataDesc **partInputDataDescs =
new(generator->wHeap()) ExPartInputDataDesc *[compFragDir->entries()];
ExEspNodeMap **nodeMap =
new(generator->wHeap()) ExEspNodeMap *[compFragDir->entries()];
for (i = 0; i < compFragDir->entries(); i++)
{
if (compFragDir->getPartitioningFunction(i) != NULL)
{
// This fragment has partitioning info, generate it
((PartitioningFunction *) compFragDir->getPartitioningFunction(i))->
codeGen(generator,
compFragDir->getPartInputDataLength(i));
partInputDataDescs[i] =
(ExPartInputDataDesc *) (generator->getGenObj());
NodeMap::codeGen(compFragDir->getPartitioningFunction(i),
compFragDir->getNumESPs(i),
generator);
nodeMap[i] = (ExEspNodeMap *) (generator->getGenObj());
}
else
{
partInputDataDescs[i] = NULL;
nodeMap[i] = NULL;
}
}
// remove maptable for child tree
generator->removeAll(map_table);
// remove my map table
generator->removeLast();
generator->setMapTable(NULL);
// move data entry by entry from the generator's copy into the executor copy
Lng32 offset = 0;
Lng32 currLength;
Lng32 compressThreshold = getDefault(FRAG_COMPRESSION_THRESHOLD);
NABoolean anyEspFragments = FALSE;
for (i = 0; i < compFragDir->entries(); i++)
{
// translate fragment type enums
ExFragDir::ExFragEntryType runTimeType = ExFragDir::MASTER;
currLength = compFragDir->getFragmentLength(i);
compilerStatsInfo->totalFragmentSize() += currLength;
switch (compFragDir->getType(i))
{
case FragmentDir::MASTER:
runTimeType = ExFragDir::MASTER;
compilerStatsInfo->masterFragmentSize() += currLength;
break;
case FragmentDir::DP2:
runTimeType = ExFragDir::DP2;
compilerStatsInfo->dp2FragmentSize() += currLength;
break;
case FragmentDir::ESP:
runTimeType = ExFragDir::ESP;
anyEspFragments = TRUE;
compilerStatsInfo->espFragmentSize() += currLength;
break;
case FragmentDir::EXPLAIN:
runTimeType = ExFragDir::EXPLAIN;
compilerStatsInfo->masterFragmentSize() += currLength;
break;
default:
ABORT("Internal error, invalid fragment type");
}
// take the pointer of the top-level object in this fragment and
// convert it to a fragment-relative offset
Lng32 offsetOfTopNode = compFragDir->getSpace(i)->
convertToOffset((char *)(compFragDir->getTopNode(i)));
// now set the values of the previously allocated directory entry
NABoolean mlimitPerCPU = CmpCommon::getDefaultLong(EXE_MEMORY_LIMIT_PER_CPU) > 0;
UInt16 BMOsMemoryUsage = 0;
if (mlimitPerCPU == TRUE)
BMOsMemoryUsage = (UInt16)compFragDir->getBMOsMemoryUsage(i);
else if (compFragDir->getNumBMOs(i) > 1 ||
(compFragDir->getNumBMOs(i) == 1 && CmpCommon::getDefault(EXE_SINGLE_BMO_QUOTA) == DF_ON))
BMOsMemoryUsage = (UInt16)CmpCommon::getDefaultLong(EXE_MEMORY_AVAILABLE_IN_MB);
exFragDir->set(i,
runTimeType,
(ExFragId) compFragDir->getParentId(i),
offset,
currLength,
-offsetOfTopNode,
partInputDataDescs[i],
nodeMap[i],
compFragDir->getNumESPs(i),
compFragDir->getEspLevel(i),
compFragDir->getNeedsTransaction(i),
(compressThreshold > 0 &&
runTimeType == ExFragDir::ESP &&
compressThreshold <= compFragDir->getNumESPs(i))?
TRUE: FALSE, // DP2 fragment will be compressed later
// if parent ESP fragment is compressed
// see executor/ex_frag_rt.cpp
compFragDir->getSoloFragment(i),
BMOsMemoryUsage,
compFragDir->getNumBMOs(i) > 0
);
offset += currLength;
} // for each fragment
compilerStatsInfo->totalFragmentSize() /= 1024;
compilerStatsInfo->masterFragmentSize() /= 1024;
compilerStatsInfo->espFragmentSize() /= 1024;
compilerStatsInfo->dp2FragmentSize() /= 1024;
compilerStatsInfo->collectStatsType() = generator->collectStatsType();
compilerStatsInfo->udr() = noOfUdrs;
compilerStatsInfo->ofMode() = generator->getOverflowMode();
compilerStatsInfo->ofSize() = generator->getTotalOverflowMemory();
compilerStatsInfo->bmo() = generator->getTotalNumBMOs();
compilerStatsInfo->queryType() = (Int16)root_tdb->getQueryType();
compilerStatsInfo->subqueryType() = (Int16)root_tdb->getSubqueryType();
NADELETEBASIC(partInputDataDescs, generator->wHeap());
NADELETEBASIC(nodeMap, generator->wHeap());
// Genesis 10-990114-6293:
// don't recompile a SELECT query if transmode changes to READ ONLY.
if (readOnlyTransIsOK()) root_tdb->setReadonlyTransactionOK();
// Inserts into non-audited indexes do not need to run in a transaction,
// if one does not exist. If one exists (which is the case during a create
// index operation), need to pass transid to all ESPs during the load
// index phase, otherwise they will get error 73s returned when they open
// the index. Store this information in the root TDB, so that the transaction
// can be passed to ESPs if needed. Dp2Insert::codeGen has set this
// generator flag.
NABoolean recompWarn =
(CmpCommon::getDefault(RECOMPILATION_WARNINGS) == DF_ON);
if (recompWarn) root_tdb->setRecompWarn();
// Set the FROM_SHOWPLAN flag if the statement is from a showplan
const NAString * val =
ActiveControlDB()->getControlSessionValue("SHOWPLAN");
if ( !(childOperType_ == REL_CONTROL_SESSION)
&& (val) && (*val == "ON") )
root_tdb->setFromShowplan();
if (CmpCommon::getDefault(EXE_LOG_RETRY_IPC) == DF_ON)
root_tdb->setLogRetriedIpcErrors(TRUE);
if (anyEspFragments)
{
if (generator->getBindWA()->queryCanUseSeaMonster() &&
generator->getQueryUsesSM())
root_tdb->setQueryUsesSM();
}
generator->setGenObj(this, root_tdb);
return 0;
} // RelRoot::codeGen()
short Sort::generateTdb(Generator * generator,
ComTdb * child_tdb,
ex_expr * sortKeyExpr,
ex_expr * sortRecExpr,
ULng32 sortKeyLen,
ULng32 sortRecLen,
ULng32 sortPrefixKeyLen,
ex_cri_desc * given_desc,
ex_cri_desc * returned_desc,
ex_cri_desc * work_cri_desc,
Lng32 saveNumEsps,
ExplainTuple *childExplainTuple,
NABoolean resizeCifRecord,
NABoolean considerBufferDefrag,
NABoolean operatorCIF)
{
NADefaults &defs = ActiveSchemaDB()->getDefaults();
ULng32 numBuffers = (ULng32)getDefault(GEN_SORT_NUM_BUFFERS);
CostScalar bufferSize = getDefault(GEN_SORT_MAX_BUFFER_SIZE);
ULng32 bufferSize_as_ulong =
(ULng32)(MINOF(CostScalar(UINT_MAX), bufferSize)).getValue();
GenAssert(sortRecLen <= bufferSize_as_ulong,
"Record Len greater than GEN_SORT_MAX_BUFFER_SIZE");
ComTdbSort * sort_tdb = 0;
// always start with quick sort. Sort will switch to
// replacement sort in case of overflow at runtime.
SortOptions *sort_options = new(generator->getSpace()) SortOptions();
Lng32 max_num_buffers = (Lng32)numBuffers;
NAString tmp;
CmpCommon::getDefault(SORT_ALGO, tmp, -1);
if(tmp == "HEAP")
sort_options->sortType() = SortOptions::ITER_HEAP;
else if(tmp == "REPSEL")
sort_options->sortType() = SortOptions::REPLACEMENT_SELECT;
else if(tmp == "IQS")
sort_options->sortType() = SortOptions::ITER_QUICK;
else if(tmp == "QS")
sort_options->sortType() = SortOptions::QUICKSORT;
max_num_buffers = (Lng32)getDefault(GEN_SORT_MAX_NUM_BUFFERS);
sort_options->internalSort() = TRUE;
unsigned short threshold = (unsigned short) CmpCommon::getDefaultLong(SCRATCH_FREESPACE_THRESHOLD_PERCENT);
sort_options->scratchFreeSpaceThresholdPct() = threshold;
sort_options->sortMaxHeapSize() = (short)getDefault(SORT_MAX_HEAP_SIZE_MB);
sort_options->mergeBufferUnit() = (short)getDefault(SORT_MERGE_BUFFER_UNIT_56KB);
//512kb default size initiliazed in sort_options.
if(sortRecLen >= sort_options->scratchIOBlockSize())
{
Int32 maxScratchIOBlockSize = (Int32)getDefault(SCRATCH_IO_BLOCKSIZE_SORT_MAX);
GenAssert(sortRecLen <= maxScratchIOBlockSize,
"sortRecLen is greater than SCRATCH_IO_BLOCKSIZE_SORT_MAX");
sort_options->scratchIOBlockSize() = MINOF(sortRecLen * 128, maxScratchIOBlockSize);
}
sort_options->scratchIOVectorSize() = (Int16)getDefault(SCRATCH_IO_VECTOR_SIZE_SORT);
if (sortNRows())
{
sort_options->sortNRows() = TRUE;
}
if (CmpCommon::getDefault(EXE_BMO_SET_BUFFERED_WRITES) == DF_ON)
sort_options->setBufferedWrites(TRUE);
if (CmpCommon::getDefault(EXE_DIAGNOSTIC_EVENTS) == DF_ON)
sort_options->setLogDiagnostics(TRUE);
// Disable Compiler Hints checks if: CQD is ON or if SYSTEM - only for HDD
if (
(CmpCommon::getDefault(EXE_BMO_DISABLE_CMP_HINTS_OVERFLOW_SORT) == DF_ON)
||
(
((generator->getOverflowMode()== ComTdb::OFM_DISK) ||
(generator->getOverflowMode()== ComTdb::OFM_MMAP))
&&
(CmpCommon::getDefault(EXE_BMO_DISABLE_CMP_HINTS_OVERFLOW_SORT)
== DF_SYSTEM )
)
)
sort_options->setDisableCmpHintsOverflow(TRUE);
if (CmpCommon::getDefault(EXE_BMO_DISABLE_OVERFLOW) == DF_ON)
sort_options->dontOverflow() = TRUE;
if (CmpCommon::getDefault(SORT_INTERMEDIATE_SCRATCH_CLEANUP) == DF_ON)
sort_options->setIntermediateScratchCleanup(TRUE);
sort_options->setResizeCifRecord(resizeCifRecord);
sort_options->setConsiderBufferDefrag(considerBufferDefrag);
short memoryQuotaMB = 0;
if(CmpCommon::getDefault(SORT_MEMORY_QUOTA_SYSTEM) != DF_OFF)
{
// The CQD EXE_MEM_LIMIT_PER_BMO_IN_MB has precedence over the mem quota sys
memoryQuotaMB = (UInt16)defs.getAsDouble(EXE_MEM_LIMIT_PER_BMO_IN_MB);
if (memoryQuotaMB > 0) {
sort_options->memoryQuotaMB() = memoryQuotaMB;
} else {
UInt16 numBMOsInFrag = (UInt16)generator->getFragmentDir()->getNumBMOs();
// Apply quota system if either one the following two is true:
// 1. the memory limit feature is turned off and more than one BMOs
// 2. the memory limit feature is turned on
NABoolean mlimitPerCPU = defs.getAsDouble(EXE_MEMORY_LIMIT_PER_CPU) > 0;
if ( mlimitPerCPU || numBMOsInFrag > 1 ) {
memoryQuotaMB = (short)
computeMemoryQuota(generator->getEspLevel() == 0,
mlimitPerCPU,
generator->getBMOsMemoryLimitPerCPU().value(),
generator->getTotalNumBMOsPerCPU(),
generator->getTotalBMOsMemoryPerCPU().value(),
numBMOsInFrag,
generator->getFragmentDir()->getBMOsMemoryUsage()
);
Lng32 sortMemoryLowbound = defs.getAsLong(EXE_MEMORY_LIMIT_LOWER_BOUND_SORT);
if ( memoryQuotaMB < sortMemoryLowbound )
memoryQuotaMB = (short)sortMemoryLowbound;
}
}
}
//BMO settings. By Default set this value to max available
//irrespective of quota is enabled or disabled. Sort at run time
//will manage to check for quota and available physical memory
//before consuming memory. Note that if memoryQuota is set zero,
//sort may not do physical memory or memory pressure checks.
if ( memoryQuotaMB <= 0 &&
! sort_options->disableCmpHintsOverflow() ) // compiler hints enabled
{
memoryQuotaMB = (UInt16)defs.getAsLong(EXE_MEMORY_AVAILABLE_IN_MB);
}
sort_options->memoryQuotaMB() = memoryQuotaMB;
if(generator->getOverflowMode() == ComTdb::OFM_SSD )
sort_options->bmoMaxMemThresholdMB() = (UInt16)defs.getAsLong(SSD_BMO_MAX_MEM_THRESHOLD_IN_MB);
else
sort_options->bmoMaxMemThresholdMB() = (UInt16)defs.getAsLong(EXE_MEMORY_AVAILABLE_IN_MB);
sort_options->pressureThreshold() =
(short)getDefault(GEN_MEM_PRESSURE_THRESHOLD);
short sortGrowthPercent =
RelExpr::bmoGrowthPercent(getEstRowsUsed(), getMaxCardEst());
sort_tdb = new(generator->getSpace())
ComTdbSort(sortKeyExpr,
sortRecExpr,
sortKeyLen,
sortRecLen,
sortPrefixKeyLen,
#pragma nowarn(1506) // warning elimination
returned_desc->noTuples() - 1,
child_tdb,
given_desc,
returned_desc,
work_cri_desc,
// if sort input is from top, switch the UP and DOWN queue
// sizes
(sortFromTop()
? (queue_index)getDefault(GEN_SORT_SIZE_UP)
: (queue_index)getDefault(GEN_SORT_SIZE_DOWN)),
(queue_index)getDefault(GEN_SORT_SIZE_UP),
(Cardinality) (getInputCardinality() * getEstRowsUsed()).getValue(),
numBuffers,
bufferSize_as_ulong,
max_num_buffers,
sort_options,
sortGrowthPercent);
sort_tdb->setCollectNFErrors(this->collectNFErrors());
#pragma warn(1506) // warning elimination
sort_tdb->setSortFromTop(sortFromTop());
sort_tdb->setOverflowMode(generator->getOverflowMode());
sort_tdb->setTopNSortEnabled(CmpCommon::getDefault(GEN_SORT_TOPN) == DF_ON);
if (generator->getUserSidetreeInsert())
sort_tdb->setUserSidetreeInsert(TRUE);
if (getTolerateNonFatalError() == RelExpr::NOT_ATOMIC_)
sort_tdb->setTolerateNonFatalError(TRUE);
sort_tdb->setCIFON(operatorCIF);
generator->initTdbFields(sort_tdb);
double sortMemEst = getEstimatedRunTimeMemoryUsage(sort_tdb);
generator->addToTotalEstimatedMemory(sortMemEst);
generator->addToTotalOverflowMemory(
getEstimatedRunTimeOverflowSize(memoryQuotaMB));
if (sortPrefixKeyLen > 0)
((ComTdbSort *)sort_tdb)->setPartialSort(TRUE); // do partial sort
if(CmpCommon::getDefaultLong(SORT_REC_THRESHOLD) > 0)
((ComTdbSort *)sort_tdb)->setMinimalSortRecs(CmpCommon::getDefaultLong(SORT_REC_THRESHOLD));
sort_tdb->setMemoryContingencyMB(getDefault(PHY_MEM_CONTINGENCY_MB));
float bmoCtzFactor;
defs.getFloat(BMO_CITIZENSHIP_FACTOR, bmoCtzFactor);
sort_tdb->setBmoCitizenshipFactor((Float32)bmoCtzFactor);
//if(!generator->explainDisabled()) {
Lng32 sortMemEstInKBPerCPU = (Lng32)(sortMemEst / 1024) ;
sortMemEstInKBPerCPU = sortMemEstInKBPerCPU/
(MAXOF(generator->compilerStatsInfo().dop(),1));
sort_tdb->setSortMemEstInMbPerCpu
( Float32(MAXOF(sortMemEstInKBPerCPU/1024,1)) );
if(!generator->explainDisabled()) {
generator->setOperEstimatedMemory(sortMemEstInKBPerCPU );
generator->setExplainTuple(
addExplainInfo(sort_tdb, childExplainTuple, 0, generator));
generator->setOperEstimatedMemory(0);
}
// set the new up cri desc.
generator->setCriDesc(returned_desc, Generator::UP);
generator->setGenObj(this, sort_tdb);
// reset the expression generation flag to generate float validation pcode
generator->setGenNoFloatValidatePCode(FALSE);
return 0;
}
//////////////////////////////////////////////////////////////
//
// Sort::codeGen()
//
/////////////////////////////////////////////////////////
short Sort::codeGen(Generator * generator)
{
ExpGenerator * exp_gen = generator->getExpGenerator();
Space * space = generator->getSpace();
////////////////////////////////////////////////////////////////////////
//
// Layout at this node:
//
// |-------------------------------------------------|
// | input data | Sorted data | child's data |
// | ( I tupps ) | ( 1 tupp ) | ( C tupps ) |
// |-------------------------------------------------|
// <-- returned row to parent --->
// <------------ returned row from child ------------>
//
// input data: the atp input to this node by its parent.
// sorted data: tupp where the sorted row is.
// this data is accessed by the key and by the data
// separately.
// The key data is in SQLMX_KEY_FORMAT and the data
// will be in either internal or exploded format.
// child data: tupps appended by the child
//
// Input to child: I + 1 tupps
//
// Tupps returned from child are only used to create the
// sorted data. They are not returned to parent.
//
/////////////////////////////////////////////////////////////////////////
// Tupps returned from child are only used to create the
// sorted data. They are not returned to parent.
//
/////////////////////////////////////////////////////////////////////////
MapTable * last_map_table = generator->getLastMapTable();
ex_cri_desc * given_desc
= generator->getCriDesc(Generator::DOWN);
ex_cri_desc * returned_desc
= new(space) ex_cri_desc(given_desc->noTuples() + 1, space);
Int32 work_atp = 1; // temps
Int32 work_atp_index = 2; // where the result row will be
ex_cri_desc * work_cri_desc = new(space) ex_cri_desc(3, space);
//All the records in the table will not be processed by a single sort
//instance if multiple sort instances are involved within ESPs.
Lng32 saveNumEsps = generator->getNumESPs();
if (sortFromTop())
generator->setCriDesc(returned_desc, Generator::DOWN);
// generate code for child tree
child(0)->codeGen(generator);
//This value is set inside generator by my parent exchange node,
//as a global variable. Reset the saveNumEsps value back into
//generator since codegen of my children exchange nodes may have
//changed it. Resetting is performed here so the codegen of right
//child nodes of my parent gets the proper value.
generator->setNumESPs(saveNumEsps);
ComTdb * child_tdb = (ComTdb *)(generator->getGenObj());
ExplainTuple *childExplainTuple = generator->getExplainTuple();
// Before generating any expression for this node, set the
// the expression generation flag not to generate float
// validation PCode. This is to speed up PCode evaluation
generator->setGenNoFloatValidatePCode(TRUE);
// generate an expression to create the input row
// to be sent to sort.
// The input row consists of:
// n + m values
// where, n is the number of encoded key columns.
// m is the total number of column values.
// At runtime, a contiguous row of n + m columns is created
// and then given to sort.
// sort prefix key columns are indexed from 0 to k where k < n.
// The data within the Sort buffer will be contiguous in the format
// | encoded keys | returned column values |
// ----------------------------------------
// The keys will be in key format.
// The returned column values will be in Exploded or Compressed internal
// format.
// generate the key encode value id list used for sorting
UInt32 sortKeyLen = 0;
UInt32 sortPrefixKeyLen = 0;
Int32 prefixKeyCnt = getPrefixSortKey().entries();
ValueIdList sortKeyValIdList;
CollIndex sortKeyListIndex;
CollIndex sortRecListIndex;
for (sortKeyListIndex = 0;
sortKeyListIndex < getSortKey().entries();
sortKeyListIndex++)
{
ItemExpr * skey_node =
((getSortKey()[sortKeyListIndex]).getValueDesc())->getItemExpr();
short desc_flag = FALSE;
if (skey_node->getOperatorType() == ITM_INVERSE)
{
desc_flag = TRUE;
}
if (skey_node->getValueId().getType().getVarLenHdrSize() > 0)
{
// Explode varchars by moving them to a fixed field
// whose length is equal to the max length of varchar.
// 5/8/98: add support for VARNCHAR
const CharType& char_type =
(CharType&)(skey_node->getValueId().getType());
//no cast to fixed char in the case of collation (Czech)
if (!CollationInfo::isSystemCollation(char_type.getCollation()))
{
skey_node =
new(generator->wHeap())
Cast (skey_node,
(new(generator->wHeap())
SQLChar(
CharLenInfo(char_type.getStrCharLimit(), char_type.getDataStorageSize()),
char_type.supportsSQLnull(),
FALSE, FALSE, FALSE,
char_type.getCharSet(),
char_type.getCollation(),
char_type.getCoercibility()
)
)
);
}
}
CompEncode * enode
= new(generator->wHeap()) CompEncode(skey_node, desc_flag);
enode->bindNode(generator->getBindWA());
sortKeyLen += enode->getValueId().getType().getTotalSize();
sortKeyValIdList.insert(enode->getValueId());
if (sortKeyListIndex < (CollIndex) prefixKeyCnt)
// sort key length is the prefix sort key length
// Note we need do this assignment only once. Need a better way
sortPrefixKeyLen = sortKeyLen;
}
/*
// Generate the key encode expression ...
ex_expr * sortKeyExpr = 0;
exp_gen->generateContiguousMoveExpr(sortKeyValIdList,
0, // no conv nodes
work_atp, work_atp_index,
ExpTupleDesc::SQLMX_KEY_FORMAT,
sortKeyLen, &sortKeyExpr,
0, // no tupp descr
ExpTupleDesc::SHORT_FORMAT);
*/
// Now generate the returned column value value id list that moves the input
// data into the sort input buffer using convert nodes.
ValueIdList sortRecValIdList;
ValueId valId;
for (valId = getGroupAttr()->getCharacteristicOutputs().init();
getGroupAttr()->getCharacteristicOutputs().next(valId);
getGroupAttr()->getCharacteristicOutputs().advance(valId))
{
// add the convert node
Convert * convNode = new(generator->wHeap())Convert(valId.getItemExpr());
convNode->bindNode(generator->getBindWA());
sortRecValIdList.insert(convNode->getValueId());
}
UInt32 sortRecLen = 0;
ex_expr * sortRecExpr = 0;
ex_expr * sortKeyExpr = 0;
ExpTupleDesc * tuple_desc = 0;
// contains the value ids that are being returned (the sorted values)
MapTable * returnedMapTable = 0;
ExpTupleDesc::TupleDataFormat tupleFormat = generator->getInternalFormat();
// resizeCifRecord indicator that tells us whether we need to resize the CIF row or not
// if CIF is not used then no resizing is done
// if CIF is used the based on some logic that will be defined we determine whether we need
// to resize the row or not
NABoolean resizeCifRecord = FALSE;
// Sometimes only the key value id list has entries and there are no
// additional characteristic outputs.
// This happens when Sort is used as a blocking operator for NAR (Non Atomic
// Rowsets).
NABoolean bmo_affinity = (CmpCommon::getDefault(COMPRESSED_INTERNAL_FORMAT_BMO_AFFINITY) == DF_ON);
NABoolean considerBufferDefrag = FALSE;
if (sortRecValIdList.entries() > 0)
{
if (! bmo_affinity &&
getCachedTupleFormat() != ExpTupleDesc::UNINITIALIZED_FORMAT &&
CmpCommon::getDefault(COMPRESSED_INTERNAL_FORMAT) == DF_SYSTEM &&
CmpCommon::getDefault(COMPRESSED_INTERNAL_FORMAT_BMO) == DF_SYSTEM)
{
resizeCifRecord = getCachedResizeCIFRecord();
tupleFormat = getCachedTupleFormat();
considerBufferDefrag = getCachedDefrag() && resizeCifRecord;
}
else
{
//apply heuristic to determine the tuple format and whether we need to resize the row or not
tupleFormat = determineInternalFormat( sortRecValIdList,
this,
resizeCifRecord,
generator,
bmo_affinity,
considerBufferDefrag);
considerBufferDefrag = considerBufferDefrag && resizeCifRecord;
}
}
if (tupleFormat == ExpTupleDesc::SQLMX_ALIGNED_FORMAT)
{
exp_gen->generateContiguousMoveExpr(sortKeyValIdList,
0, // no conv nodes
work_atp, work_atp_index,
ExpTupleDesc::SQLMX_KEY_FORMAT,
sortKeyLen, &sortKeyExpr,
0, // no tupp descr
ExpTupleDesc::SHORT_FORMAT);
exp_gen->generateContiguousMoveExpr(sortRecValIdList,
0, // no convert nodes
work_atp, work_atp_index,
tupleFormat,
sortRecLen, &sortRecExpr,
&tuple_desc, ExpTupleDesc::SHORT_FORMAT);
sortRecLen += sortKeyLen;
if (resizeCifRecord)
{
// with CIF if we need to resize the rows then we need to keep track of the length
// of the row with the row itself
// in this case the allocated space for the row will be as folows
// |row size --(4 bytes)|sort key (sortkeyLen)| data (data length --may be variable) |
// ------------------------------------
sortRecLen += sizeof(UInt32);
}
}
else
{
CMPASSERT(resizeCifRecord == FALSE);
sortKeyValIdList.insertSet(sortRecValIdList);
exp_gen->generateContiguousMoveExpr(sortKeyValIdList,
0, // no conv nodes
work_atp, work_atp_index,
ExpTupleDesc::SQLARK_EXPLODED_FORMAT,
sortRecLen, &sortRecExpr,
&tuple_desc,
ExpTupleDesc::SHORT_FORMAT);
}
// add in the total area for the keys since we generated this separately
// and ensure the size is a factor of 8 so the data aligns correctly
// describe the returned row
returned_desc->setTupleDescriptor((UInt16)returned_desc->noTuples() - 1,
tuple_desc);
returnedMapTable = generator->appendAtEnd(); // allocates a new map table
generator->unlinkLast();
// Add the returned values to the map table. We get the returned value
// value id from the char outputs and the item expr from the sort record
// value id list, starting with the first item expr added to the sort
// record value id list by the for loop immediately preceding this one.
sortRecListIndex = 0;
for (valId = getGroupAttr()->getCharacteristicOutputs().init();
getGroupAttr()->getCharacteristicOutputs().next(valId);
getGroupAttr()->getCharacteristicOutputs().advance(valId))
{
// ????????????The first time through this loop, sortRecListIndex will point
// to the first return value convert node. This is because we
// stopped incrementing the sortRecListIndex after the last sort
// key was added and we did not increment it when we added the
// return value convert nodes.
Attributes *attr =
generator->getMapInfo(sortRecValIdList[sortRecListIndex++])->getAttr();
// ...add it to the new map table as if it belonged to
// the original value id...
MapInfo * mi =
generator->addMapInfoToThis(returnedMapTable, valId, attr);
// All reference to the returned values from this point on
// will be at atp = 0, atp_index = last entry in returned desc.
// Offset will be the same as in the workAtp.
mi->getAttr()->setAtp(0);
mi->getAttr()->setAtpIndex(returned_desc->noTuples() - 1);
// ... and make sure no more code gets generated for it.
mi->codeGenerated();
}
// remove all appended map tables and return the returnedMapTable
generator->removeAll(last_map_table);
generator->appendAtEnd(returnedMapTable);
short rc =
generateTdb(generator,
child_tdb,
sortKeyExpr,
sortRecExpr,
sortKeyLen,
sortRecLen,
sortPrefixKeyLen,
given_desc,
returned_desc,
work_cri_desc,
saveNumEsps,
childExplainTuple,
resizeCifRecord,
considerBufferDefrag,
(tupleFormat == ExpTupleDesc::SQLMX_ALIGNED_FORMAT));
return rc;
}
ExpTupleDesc::TupleDataFormat Sort::determineInternalFormat( const ValueIdList & valIdList,
RelExpr * relExpr,
NABoolean & resizeCifRecord,
Generator * generator,
NABoolean bmo_affinity,
NABoolean & considerBufferDefrag)
{
RelExpr::CifUseOptions bmo_cif = RelExpr::CIF_SYSTEM;
if (CmpCommon::getDefault(COMPRESSED_INTERNAL_FORMAT_BMO) == DF_OFF)
{
bmo_cif = RelExpr::CIF_OFF;
}
else
if (CmpCommon::getDefault(COMPRESSED_INTERNAL_FORMAT_BMO) == DF_ON)
{
bmo_cif = RelExpr::CIF_ON;
}
//CIF_SYSTEM
UInt32 sortKeyLength = getSortKey().getRowLength();
return generator->determineInternalFormat(valIdList,
relExpr,
resizeCifRecord,
bmo_cif,
bmo_affinity,
considerBufferDefrag,
sortKeyLength);
}
//////////////////////////////////////////////////////////////
//
// SortFromTop::codeGen()
//
/////////////////////////////////////////////////////////
short SortFromTop::codeGen(Generator * generator)
{
ExpGenerator * exp_gen = generator->getExpGenerator();
Space * space = generator->getSpace();
MapTable * last_map_table = generator->getLastMapTable();
ex_cri_desc * given_desc
= generator->getCriDesc(Generator::DOWN);
// no tuples returned from this operator
ex_cri_desc * returned_desc = given_desc;
// child gets one sorted row created in this operator.
ex_cri_desc * child_desc
= new(space) ex_cri_desc(given_desc->noTuples() + 1, space);
Int32 work_atp = 1; // temps
Int32 work_atp_index = 2; // where the result row will be
ex_cri_desc * work_cri_desc = new(space) ex_cri_desc(3, space);
//All the records in the table will not be processed by a single sort
//instance if multiple sort instances are involved within ESPs.
Lng32 saveNumEsps = generator->getNumESPs();
generator->setCriDesc(child_desc, Generator::DOWN);
// Before generating any expression for this node, set the
// the expression generation flag not to generate float
// validation PCode. This is to speed up PCode evaluation
generator->setGenNoFloatValidatePCode(TRUE);
MapTable *myMapTable = generator->appendAtEnd();
ULng32 sort_rec_len = 0;
ULng32 sort_key_len = 0;
ex_expr * sort_rec_expr = 0;
ex_expr * sort_key_expr = 0;
ExpTupleDesc * tuple_desc = 0;
ValueIdList sortRecVIDlist;
ValueIdList sortKeyVIDlist;
CollIndex ii = 0;
for (ii = 0; ii < getSortKey().entries(); ii++)
{
ItemExpr * skey_node =
((getSortKey()[ii]).getValueDesc())->getItemExpr();
if (skey_node->getOperatorType() != ITM_INDEXCOLUMN)
GenAssert(0, "Must be IndexColumn");
IndexColumn * ic = (IndexColumn*)skey_node;
NABoolean found = FALSE;
CollIndex jj = 0;
while ((NOT found) && (jj < getSortRecExpr().entries()))
{
const ItemExpr *assignExpr =
getSortRecExpr()[jj].getItemExpr();
ItemExpr * tgtCol = assignExpr->child(0)->castToItemExpr();
if (tgtCol->getOperatorType() != ITM_BASECOLUMN)
GenAssert(0, "Must be BaseColumn");
ValueId tgtValueId = tgtCol->getValueId();
ValueId srcValueId =
assignExpr->child(1)->castToItemExpr()->getValueId();
ItemExpr * srcVal = assignExpr->child(1)->castToItemExpr();
if (ic->getNAColumn()->getColName() ==
((BaseColumn*)tgtCol)->getNAColumn()->getColName())
{
found = TRUE;
//***TBD*** need to handle descnding. Get that from index desc.
short desc_flag = FALSE;
if (ic->getNAColumn()->getClusteringKeyOrdering() == DESCENDING)
{
desc_flag = TRUE;
}
if (skey_node->getValueId().getType().getVarLenHdrSize() > 0)
{
// Explode varchars by moving them to a fixed field
// whose length is equal to the max length of varchar.
const CharType& char_type =
(CharType&)(skey_node->getValueId().getType());
//no cast to fixed char in the case of collation (Czech)
if (!CollationInfo::isSystemCollation(char_type.getCollation()))
{
skey_node =
new(generator->wHeap())
Cast (srcVal,
(new(generator->wHeap())
SQLChar(
CharLenInfo(char_type.getStrCharLimit(), char_type.getDataStorageSize()),
char_type.supportsSQLnull(),
FALSE, FALSE, FALSE,
char_type.getCharSet(),
char_type.getCollation(),
char_type.getCoercibility()
)
)
);
}
}
else
{
skey_node = new(generator->wHeap())
Cast(srcVal, &tgtValueId.getType());
}
CompEncode * enode
= new(generator->wHeap()) CompEncode(skey_node, desc_flag);
enode->bindNode(generator->getBindWA());
sort_key_len += enode->getValueId().getType().getTotalSize();
sortKeyVIDlist.insert(enode->getValueId());
} // if
jj++;
} // while
if (NOT found)
{
GenAssert(0, "Key not found in newRecExprArray");
}
} // for
// genearate sort key expr
//ex_expr * sort_key_expr = 0;
if (sortKeyVIDlist.entries() > 0)
{
exp_gen->generateContiguousMoveExpr(sortKeyVIDlist,
0, // no conv nodes
work_atp, work_atp_index,
ExpTupleDesc::SQLMX_KEY_FORMAT,
sort_key_len, &sort_key_expr,
0, // no tupp descr
ExpTupleDesc::SHORT_FORMAT);
}
//sortRecVIDlist = sortKeyVIDlist;
for (ii = 0; ii < getSortRecExpr().entries(); ii++)
{
const ItemExpr *assignExpr =
getSortRecExpr()[ii].getItemExpr();
ValueId tgtValueId =
assignExpr->child(0)->castToItemExpr()->getValueId();
ValueId srcValueId =
assignExpr->child(1)->castToItemExpr()->getValueId();
ItemExpr * ie = NULL;
ie = new(generator->wHeap())
Cast(assignExpr->child(1),
&tgtValueId.getType());
ie->bindNode(generator->getBindWA());
sortRecVIDlist.insert(ie->getValueId());
} // for
ExpTupleDesc::TupleDataFormat tupleFormat = generator->getInternalFormat();
NABoolean resizeCifRecord = FALSE;
NABoolean considerBufferDefrag = FALSE;
if (sortRecVIDlist.entries()>0)
{
tupleFormat = determineInternalFormat( sortRecVIDlist,
this,
resizeCifRecord,
generator,
FALSE,
considerBufferDefrag);
exp_gen->generateContiguousMoveExpr(sortRecVIDlist,
0 /*don't add conv nodes*/,
work_atp, work_atp_index,
tupleFormat,
sort_rec_len, &sort_rec_expr,
&tuple_desc, ExpTupleDesc::SHORT_FORMAT);
}
//sort_key_len = ROUND8(sort_key_len); ?????????
sort_rec_len += sort_key_len;
if (resizeCifRecord)
{
// with CIF if we need to resize the rows then we need to keep track of the length
// of the row with the row itself
// in this case the allocated space for the row will be as folows
// |row size --(4 bytes)|sort key (sortkeyLen)| data (data length --may be variable) |
// ------------------------------------
sort_rec_len += sizeof(UInt32);
}
for (CollIndex i = 0; i < (CollIndex) sortRecVIDlist.entries(); i++)
{
ItemExpr * cn = (sortRecVIDlist[i]).getItemExpr();
Attributes *attrib =
generator->getMapInfo(cn->getValueId())->getAttr();
MapInfo * mi =
generator->
addMapInfoToThis(myMapTable,
cn->child(0)->castToItemExpr()->getValueId(),
attrib);
// All reference to the sorted values from this point on
// will be at atp = 0, atp_index = last entry in child desc.
// Offset will be the same as in the workAtp.
mi->getAttr()->setAtp(0);
mi->getAttr()->setAtpIndex(child_desc->noTuples() - 1);
// ... and make sure no more code gets generated for it.
mi->codeGenerated();
}
// generate code for child tree
child(0)->codeGen(generator);
//This value is set inside generator by my parent exchange node,
//as a global variable. Reset the saveNumEsps value back into
//generator since codegen of my children exchange nodes may have
//changed it. Resetting is performed here so the codegen of right
//child nodes of my parent gets the proper value.
generator->setNumESPs(saveNumEsps);
ComTdb * child_tdb = (ComTdb *)(generator->getGenObj());
ExplainTuple *childExplainTuple = generator->getExplainTuple();
// remove all appended map tables. No values are returned by
// this node.
generator->removeAll(last_map_table);
short rc =
generateTdb(generator,
child_tdb,
sort_key_expr,
sort_rec_expr,
sort_key_len,
sort_rec_len,
0, //sort_prefix_key_len,
given_desc,
child_desc,
work_cri_desc,
saveNumEsps,
childExplainTuple,
resizeCifRecord,
considerBufferDefrag);
return rc;
}
double Sort::getEstimatedRunTimeOverflowSize(double memoryQuotaMB)
{
if ( memoryQuotaMB > 0 ) {
CostScalar memoryUsage = getEstimatedRunTimeMemoryUsage(TRUE /*per CPU*/);
double delta = memoryUsage.getValue() - memoryQuotaMB * COM_ONE_MEG ;
if ( delta > 0 ) {
const PhysicalProperty* const phyProp = getPhysicalProperty();
Lng32 pipelines = 1;
if (phyProp != NULL)
{
PartitioningFunction * partFunc =
phyProp -> getPartitioningFunction() ;
if ( partFunc )
pipelines = partFunc->getCountOfPartitions();
}
return delta * pipelines;
}
}
return 0;
}
CostScalar Sort::getEstimatedRunTimeMemoryUsage(NABoolean perCPU)
{
GroupAttributes * childGroupAttr = child(0).getGroupAttr();
Lng32 childRecordSize =
childGroupAttr->getCharacteristicOutputs().getRowLength();
CostScalar totalMemory = getEstRowsUsed() * childRecordSize;
if ( perCPU == TRUE ) {
const PhysicalProperty* const phyProp = getPhysicalProperty();
if (phyProp != NULL)
{
PartitioningFunction * partFunc = phyProp -> getPartitioningFunction() ;
totalMemory /= partFunc->getCountOfPartitions();
}
}
return totalMemory;
}
double Sort::getEstimatedRunTimeMemoryUsage(ComTdb * tdb)
{
CostScalar totalMemory = getEstimatedRunTimeMemoryUsage(FALSE);
const Int32 numBuffs = ActiveSchemaDB()->getDefaults().getAsLong(GEN_SORT_MAX_NUM_BUFFERS);
const Int32 bufSize = ActiveSchemaDB()->getDefaults().getAsLong(GEN_SORT_MAX_BUFFER_SIZE);
double memoryLimitPerCpu;
short memoryQuotaInMB = ((ComTdbSort *)tdb)->getSortOptions()->memoryQuotaMB();
if (memoryQuotaInMB)
memoryLimitPerCpu = memoryQuotaInMB * 1024 * 1024 ;
else
memoryLimitPerCpu = numBuffs * bufSize ;
const PhysicalProperty* const phyProp = getPhysicalProperty();
Lng32 numOfStreams = 1;
if (phyProp != NULL)
{
PartitioningFunction * partFunc = phyProp -> getPartitioningFunction() ;
numOfStreams = partFunc->getCountOfPartitions();
}
CostScalar memoryPerCpu = totalMemory/numOfStreams ;
if ( memoryPerCpu > memoryLimitPerCpu )
{
memoryPerCpu = memoryLimitPerCpu;
}
totalMemory = memoryPerCpu * numOfStreams ;
return totalMemory.value();
}
/////////////////////////////////////////////////////////
//
// Tuple::codeGen()
//
/////////////////////////////////////////////////////////
short Tuple::codeGen(Generator * generator)
{
// code generation for this node doesn't do anything.
// A Tuple node returns one tuple of expression of constants. This
// expression is evaluated where it is used. Since this node doesn't
// produce anything, the returned cri desc is same as the input cri desc.
// We could do away with generating something here but the
// parent node expects a child node to return 'something' before it
// can continue.
Queue * qList = new(generator->getSpace()) Queue(generator->getSpace());
ExpGenerator *expGen = generator->getExpGenerator();
// expression to conditionally return 0 or more rows.
ex_expr *predExpr = NULL;
// generate tuple selection expression, if present
if(NOT selectionPred().isEmpty())
{
ItemExpr* pred = selectionPred().rebuildExprTree(ITM_AND,TRUE,TRUE);
expGen->generateExpr(pred->getValueId(),ex_expr::exp_SCAN_PRED,&predExpr);
}
ComTdbTupleLeaf *tuple_tdb = new(generator->getSpace())
ComTdbTupleLeaf(qList,
0, // no tuple returned. Length = 0.
0, // no tupp index
predExpr,
generator->getCriDesc(Generator::DOWN),
generator->getCriDesc(Generator::DOWN),
(queue_index)getDefault(GEN_TUPL_SIZE_DOWN),
(queue_index)getDefault(GEN_TUPL_SIZE_UP),
(Cardinality) (getInputCardinality() * getEstRowsUsed()).getValue(),
#pragma nowarn(1506) // warning elimination
getDefault(GEN_TUPL_NUM_BUFFERS),
getDefault(GEN_TUPL_BUFFER_SIZE));
#pragma warn(1506) // warning elimination
generator->initTdbFields(tuple_tdb);
if(!generator->explainDisabled()) {
generator->setExplainTuple(
addExplainInfo(tuple_tdb, 0, 0, generator));
}
generator->setGenObj(this, tuple_tdb);
generator->setCriDesc(generator->getCriDesc(Generator::DOWN),
Generator::UP);
return 0;
}
/////////////////////////////////////////////////////////
//
// TupleList::codeGen()
//
/////////////////////////////////////////////////////////
short TupleList::codeGen(Generator * generator)
{
Space * space = generator->getSpace();
ExpGenerator * expGen = generator->getExpGenerator();
ex_cri_desc * givenDesc
= generator->getCriDesc(Generator::DOWN);
ex_cri_desc * returnedDesc
#pragma nowarn(1506) // warning elimination
= new(space) ex_cri_desc(givenDesc->noTuples() + 1, space);
#pragma warn(1506) // warning elimination
Int32 tuppIndex = returnedDesc->noTuples() - 1;
// disable common subexpression elimination for now.
// There is a problem which shows up due to common subexpression
// elimination. See case 10-040402-2209.
// After the problem is diagnosed and fixed in tuple list,
// we won't need to disable subexp elimination.
generator->getExpGenerator()->setEnableCommonSubexpressionElimination(FALSE);
Queue * qList = new(generator->getSpace()) Queue(generator->getSpace());
ULng32 tupleLen = 0;
ExpTupleDesc * tupleDesc = 0;
ExprValueId eVid(tupleExprTree());
ItemExprTreeAsList tupleList(&eVid, ITM_ITEM_LIST);
CollIndex nTupEntries = (CollIndex) tupleList.entries();
for (CollIndex i = 0; i < nTupEntries; i++)
{
ex_expr * moveExpr = NULL;
ItemExpr * tuple =
((ItemExpr *) tupleList[i])->child(0)->castToItemExpr();
ExprValueId tVid(tuple);
ItemExprTreeAsList tupleTree(&tVid, ITM_ITEM_LIST);
ValueIdList convVIDlist;
BindWA *bindWA = generator->getBindWA();
NABoolean castTo = castToList().entries() > 0;
for (CollIndex j = 0; j < tupleExpr().entries(); j++)
{
ItemExpr * castNode = tupleExpr()[j].getItemExpr();
ItemExpr * childNode = (ItemExpr *) tupleTree[j];
if (castTo)
{
// When we have ins/upd target cols which are
// MP NCHAR in MX-NSK-Rel1 (i.e., SINGLE-byte),
// and the source was from a Tuple/TupleList,
// then we must do some magic Assign binding
// to ensure that the single-byte even-num-of-bytes
// "constraint" is not violated.
//
// Build + copy this "constraint" --
// NOTE: tmpAssign MUST BE ON HEAP -- see TupleList::bindNode() !
Assign *tmpAssign = new(bindWA->wHeap())
Assign(castToList()[j].getItemExpr(), childNode);
//***************************************************************
// 10-0414-2428: Note that this assign is for inserts or updates
// (1) castTo is set only when insert DMLs
// (2) Assign constructor argument UserSpecified is set to TRUE
//***************************************************************
setInUpdateOrInsert(bindWA, NULL, REL_INSERT);
tmpAssign = (Assign *)tmpAssign->bindNode(bindWA);
setInUpdateOrInsert(bindWA, NULL);
childNode = tmpAssign->getSource().getItemExpr();
castNode->child(0) = childNode;
}
else
{
childNode = childNode->bindNode(bindWA);
if ( (castNode->child(0)) &&
(castNode->child(0)->getOperatorType() == ITM_INSTANTIATE_NULL) )
{
// if this tuplelist is part of a subquery an additional node
// of type ITM_INSTANTIATE_NULL is placed in the binder; check if
// that is the case
castNode->child(0)->child(0) = childNode; // need to fix this
}
else
{
castNode->child(0) = childNode;
}
}
castNode->bindNode(bindWA);
// if any unknown type in the tuple,
// coerce it to the target type.
childNode->bindNode(bindWA);
childNode->getValueId().coerceType(castNode->getValueId().getType());
MapInfo * mInfo = generator->getMapInfoAsIs(castNode->getValueId());
if (mInfo)
mInfo->resetCodeGenerated();
castNode->unmarkAsPreCodeGenned();
convVIDlist.insert(castNode->getValueId());
}
GenAssert(!bindWA->errStatus(), "bindWA");
expGen->generateContiguousMoveExpr(convVIDlist,
0 /*don't add conv nodes*/,
0 /*atp*/,
tuppIndex,
generator->getInternalFormat(),
tupleLen,
&moveExpr,
&tupleDesc,
ExpTupleDesc::SHORT_FORMAT);
qList->insert(moveExpr);
}
#pragma nowarn(1506) // warning elimination
returnedDesc->setTupleDescriptor(tuppIndex, tupleDesc);
#pragma warn(1506) // warning elimination
// generate expression for selection predicate, if it exists
ex_expr *predExpr = NULL;
if(NOT selectionPred().isEmpty())
{
ItemExpr* pred = selectionPred().rebuildExprTree(ITM_AND,TRUE,TRUE);
expGen->generateExpr(pred->getValueId(),ex_expr::exp_SCAN_PRED,
&predExpr);
}
// Compute the buffer size based on upqueue size and row size.
// Try to get enough buffer space to hold twice as many records
// as the up queue.
ULng32 buffersize = getDefault(GEN_TUPL_BUFFER_SIZE);
#pragma nowarn(1506) // warning elimination
Int32 numBuffers = getDefault(GEN_TUPL_NUM_BUFFERS);
#pragma warn(1506) // warning elimination
queue_index upqueuelength = (queue_index)getDefault(GEN_TUPL_SIZE_UP);
ULng32 cbuffersize =
((tupleLen + sizeof(tupp_descriptor))
* (upqueuelength * 2/numBuffers)) +
SqlBufferNeededSize(0,0);
buffersize = buffersize > cbuffersize ? buffersize : cbuffersize;
#pragma nowarn(1506) // warning elimination
ComTdbTupleLeaf *tupleTdb = new(generator->getSpace())
ComTdbTupleLeaf(qList,
tupleLen,
tuppIndex,
predExpr,
givenDesc,
returnedDesc,
(queue_index)getDefault(GEN_TUPL_SIZE_DOWN),
(queue_index)upqueuelength,
(Cardinality) (getInputCardinality() * getEstRowsUsed()).getValue(),
getDefault(GEN_TUPL_NUM_BUFFERS),
buffersize);
#pragma warn(1506) // warning elimination
generator->initTdbFields(tupleTdb);
if(!generator->explainDisabled())
{
generator->setExplainTuple(
addExplainInfo(tupleTdb, 0, 0, generator));
}
generator->setGenObj(this, tupleTdb);
generator->setCriDesc(givenDesc, Generator::DOWN);
generator->setCriDesc(returnedDesc, Generator::UP);
return 0;
}
/////////////////////////////////////////////////////////
//
// ExplainFunc::codeGen()
//
/////////////////////////////////////////////////////////
short ExplainFunc::codeGen(Generator * generator)
{
ExpGenerator * expGen = generator->getExpGenerator();
Space * space = generator->getSpace();
// allocate a map table for the retrieved columns
generator->appendAtEnd();
ex_expr *explainExpr = 0;
ex_cri_desc * givenDesc
= generator->getCriDesc(Generator::DOWN);
ex_cri_desc * returnedDesc
#pragma nowarn(1506) // warning elimination
= new(space) ex_cri_desc(givenDesc->noTuples() + 1, space);
#pragma warn(1506) // warning elimination
ex_cri_desc * paramsDesc
#pragma nowarn(1506) // warning elimination
= new(space) ex_cri_desc(givenDesc->noTuples() + 1, space);
#pragma warn(1506) // warning elimination
// Assumption (for now): retrievedCols contains ALL columns from
// the table/index. This is because this operator does
// not support projection of columns. Add all columns from this table
// to the map table.
//
// The row retrieved from filesystem is returned as the last entry in
// the returned atp.
const ValueIdList & columnList = getTableDesc()->getColumnList();
const CollIndex numColumns = columnList.entries();
Attributes ** attrs = new(generator->wHeap()) Attributes * [numColumns];
for (CollIndex i = 0; i < numColumns; i++)
{
ItemExpr * col_node = ((columnList[i]).getValueDesc())->getItemExpr();
attrs[i] = (generator->addMapInfo(col_node->getValueId(), 0))->
getAttr();
}
ExpTupleDesc *explTupleDesc = 0;
ULng32 explTupleLength = 0;
expGen->processAttributes(numColumns,
attrs, ExpTupleDesc::SQLARK_EXPLODED_FORMAT,
explTupleLength,
0, returnedDesc->noTuples() - 1,
&explTupleDesc, ExpTupleDesc::SHORT_FORMAT);
// delete [] attrs;
// NADELETEBASIC is used because compiler does not support delete[]
// operator yet. Should be changed back later when compiler supports
// it.
NADELETEBASIC(attrs, generator->wHeap());
// add this descriptor to the work cri descriptor.
#pragma nowarn(1506) // warning elimination
returnedDesc->setTupleDescriptor(returnedDesc->noTuples()-1, explTupleDesc);
#pragma warn(1506) // warning elimination
// generate explain selection expression, if present
if (! selectionPred().isEmpty())
{
ItemExpr * newPredTree = selectionPred().rebuildExprTree(ITM_AND,TRUE,TRUE);
expGen->generateExpr(newPredTree->getValueId(), ex_expr::exp_SCAN_PRED,
&explainExpr);
}
// generate move expression for the parameter list
ex_expr *moveExpr = 0;
ExpTupleDesc *tupleDesc = 0;
ULng32 tupleLength = 0;
if (! getProcInputParamsVids().isEmpty())
{
expGen->generateContiguousMoveExpr(getProcInputParamsVids(),
-1,
0,
paramsDesc->noTuples()-1,
ExpTupleDesc::SQLARK_EXPLODED_FORMAT,
tupleLength,
&moveExpr,
&tupleDesc,
ExpTupleDesc::LONG_FORMAT);
// add this descriptor to the work cri descriptor.
#pragma nowarn(1506) // warning elimination
paramsDesc->setTupleDescriptor(paramsDesc->noTuples()-1, tupleDesc);
#pragma warn(1506) // warning elimination
}
// allocate buffer space to contain atleast 2 rows.
ULng32 bufferSize = (explTupleLength+100/*padding*/) * 2/*rows*/;
bufferSize = MAXOF(bufferSize, 30000); // min buf size 30000
Int32 numBuffers = 3; // allocate 3 buffers
#pragma nowarn(1506) // warning elimination
ComTdbExplain *explainTdb
= new(space)
ComTdbExplain(givenDesc, // given_cri_desc
returnedDesc, // returned cri desc
8, // Down queue size
16, // Up queue size0
returnedDesc->noTuples() - 1, // Index in atp of return
// tuple.
explainExpr, // predicate
paramsDesc, // Descriptor of params Atp
tupleLength, // Length of params Tuple
moveExpr, // expression to calculate
// the explain parameters
numBuffers, // Number of buffers to allocate
bufferSize); // Size of each buffer
#pragma warn(1506) // warning elimination
generator->initTdbFields(explainTdb);
// Add the explain Information for this node to the EXPLAIN
// Fragment. Set the explainTuple pointer in the generator so
// the parent of this node can get a handle on this explainTuple.
if(!generator->explainDisabled()) {
generator->setExplainTuple(
addExplainInfo(explainTdb, 0, 0, generator));
}
generator->setCriDesc(givenDesc, Generator::DOWN);
generator->setCriDesc(returnedDesc, Generator::UP);
generator->setGenObj(this, explainTdb);
return 0;
}
// PhysTranspose::codeGen() ------------------------------------------
// Generate code (a TDB node with corresponding Expr expressions) for
// the PhysTranspose node. This node implements the TRANSPOSE operator.
//
// Parameters:
//
// Generator *generator
// IN/OUT : A pointer to the generator object which contains the state,
// and tools (e.g. expression generator) to generate code for
// this node.
//
// Side Effects: Generates an ComTdbTranspose along with all the required
// expressions for the transpose node.
//
// Generates explain info.
//
// Alters the state of the generator, including modifing
// the map table, setting references to the generated Tdb
// and explain info. etc.
//
short
PhysTranspose::codeGen(Generator *generator)
{
// Get handles on expression generator, map table, and heap allocator
//
ExpGenerator *expGen = generator->getExpGenerator();
Space *space = generator->getSpace();
// Allocate a new map table for this operation
//
MapTable *localMapTable = generator->appendAtEnd();
// Generate the child and capture the task definition block and a description
// of the reply composite row layout and the explain information.
//
child(0)->codeGen(generator);
ComTdb *childTdb = (ComTdb*)(generator->getGenObj());
ex_cri_desc *childCriDesc = generator->getCriDesc(Generator::UP);
ExplainTuple *childExplainTuple = generator->getExplainTuple();
// Generate the given and returned composite row descriptors
// Transpose adds a tupp to the row returned by the *child*
// or the Given row, if no columns of the child are outputs of
// this node.
//
ex_cri_desc *givenCriDesc = generator->getCriDesc(Generator::DOWN);
// Determine if any of the childs outputs are also outputs of the node.
// (if not, then don't pass them up the queue, later we will remove them
// from the map table.)
//
// Get the outputs of this node.
//
ValueIdSet transOutputs = getGroupAttr()->getCharacteristicOutputs();
// Get all the ValueIds that this node generates.
//
ValueIdSet transVals;
for(CollIndex v = 0; v < transUnionVectorSize(); v++) {
transVals.insertList(transUnionVector()[v]);
}
// Remove from the outputs, those values that are generated by this
// node. (if none are left, then the child's outputs are not needed
// above, otherwise they are.)
//
transOutputs -= transVals;
ex_cri_desc *returnedCriDesc = 0;
if (transOutputs.isEmpty()) {
// The child's outputs are not needed above, so do not pass them
// along.
//
returnedCriDesc =
#pragma nowarn(1506) // warning elimination
new(space) ex_cri_desc(givenCriDesc->noTuples() + 1, space);
#pragma warn(1506) // warning elimination
// Make all of my child's outputs map to ATP 1. Since they are
// not needed above, they will not be in the work ATP (0).
// (Later, they will be reomved from the map table)
//
localMapTable->setAllAtp(1);
} else {
// The child's outputs are needed above, so must pass them along.
//
returnedCriDesc =
#pragma nowarn(1506) // warning elimination
new(space) ex_cri_desc(childCriDesc->noTuples() + 1, space);
#pragma warn(1506) // warning elimination
}
// transposeCols is the last Tp in Atp 0.
//
const Int32 transColsAtpIndex = returnedCriDesc->noTuples() - 1;
const Int32 transColsAtp = 0;
// The length of the new tuple which will contain the columns
// generated by transpose.
//
ULng32 transColsTupleLen;
// The Transpose node contains a vector of ValueIdLists. There is
// one entry for each transpose set, plus one entry for the key
// values. Each entry contains a list of ValueIdUnion Nodes. The
// first entry contains a list with one ValueIdUnion node. This node
// is for the Const. Values (1 - N) representing the Key Values. The
// other entries contain lists of ValueIdUnion nodes for the
// Transposed Values. Each of these entries of the vector represent
// a transpose set. If the transpose set contains a list of values,
// then there will be only one ValueIdUnion node in the list. If
// the transpose set contains a list of lists of values, then there
// will be as many ValueIdUnion nodes as there are items in the
// sublists. Each ValueIdUnion within a list must contain the same
// number of elements.
//
// The cardinality of an entry in the vector is the number of entries
// in each of its ValueIdUnion nodes. The cardinality of the first
// entry (the key values) is equal to the sum of the cardinalities of
// the subsequent entries (the transpose values).
// In order to generate the expressions for the transpose node the
// ValueIdUnion nodes for the values expressions (entries 1 - 3 below)
// have to be indexed from 0 - N (0 - 6 in the example below).
// In order to translate from an index from 0 - N to an index of the
// vector and an index within the ValueIdUnion, numExprs contains the
// number of ValExprs for each ValueIdUnion. Eg. if the transUnionVector
// contains these entries:
//
// Vector Num
// Entry Entries Card.
// 0 1 7 ValueIdUnion(1,2,3,4,5,6,7)
// 1 1 2 ValueIdUnion(A,B)
// 2 1 3 ValueIdUnion(X,Y,Z)
// 3 2 2 ValueIdUnion(1,2) , ValueIdUnion('hello', 'world')
//
// Then numUnionLists would be 4
//
// The numKeyExprs would be 7 (the cardinality of the Key Values, see below)
//
// The total cardinality of the rest of the ValueIdUnions also equals 7.
//
// numExprs will have 4 (numUnionLists) entries with values 7, 2, 3 and 2.
//
// The number of entries in the transUnionVector.
//
CollIndex numUnionLists = transUnionVectorSize();
// Allocate a vector of size numUnionLists to hold the cardinality of each
// entry in the vector.
//
CollIndex *numExprs = new(space) CollIndex[numUnionLists];
// The Tuple Desc describing the tuple containing the new transpose columns
// It is generated when the expressions are generated.
//
ExpTupleDesc *transColsTupleDesc = 0;
// Loop index used throughout
//
CollIndex unionListNum;
// Used below for a sanity check (assert).
//
CollIndex totNumExprs = 0;
// Populate the numExprs array.
//
for(unionListNum = 0; unionListNum < numUnionLists; unionListNum++) {
// If the entry of the vector has at least one ValueIdUnion ...
// (All ValueIdUnions in an entry of the vector should have the
// same number of enties, so use the first entry)
//
if(transUnionVector()[unionListNum].entries() > 0)
numExprs[unionListNum] =
((ValueIdUnion *)(transUnionVector()[unionListNum])[0].
getValueDesc()->getItemExpr())->entries();
else
// If the keyCol was not specified, then the first entry of the
// vector may contain no ValueIdUnion nodes.
//
numExprs[unionListNum] = 0;
// Used in an assert below.
//
totNumExprs += numExprs[unionListNum];
}
// The number of Key Exprs. The item expressions should be the Constants
// 1 -> numKeyExprs or non-existant. The Key Exprs are the first entry.
//
const CollIndex numKeyExprs = numExprs[0];
// If there are no key expressions, the the number of expressions
// to generate is the totNumExprs.
//
const CollIndex numMovExprs = (numKeyExprs == 0 ? totNumExprs : numKeyExprs);
// The total number of item expressions in the value unions should equal
// the number of key values in the first ValueIdUnion.
//
GenAssert(totNumExprs == numKeyExprs * 2 || numKeyExprs == 0,
"Transpose: Internal Error");
// Allocate space to hold numMovExprs expressions. Each expression will
// compute one value for each of the ValueIdUnions.
// A constant Key Value will be computed in each expression,
// but only value expressions from one vector entry will be computed.
// All other value expressions will be NULL.
//
ex_expr ** transExprs = new(space)ex_expr*[numMovExprs];
// A list of the ValueIds will be constructed and used to generate
// the expressions
//
ValueIdList ValueIds;
// A new item expression which casts the result to the proper type
// will be constructed.
//
Cast *castExpr = 0;
// Loop index
//
CollIndex exprNum;
// For each transpose value expression (also equal to the number of key
// values if they exist)
//
for(exprNum = 0; exprNum < numMovExprs; exprNum++) {
// Clear the list of value Ids (used in the previous iteration
// through this loop).
//
ValueIds.clear();
// Contruct Key Expression with Cast node if they exist.
// The key ValueIdUnion is in position 0.
//
if(numKeyExprs != 0) {
// Get the key ValueIdUnion.
//
ValueIdUnion *keyValIdUnion =
(ValueIdUnion *)((transUnionVector()[0])[0].
getValueDesc()->getItemExpr());
// Extract one (exprNum) entry from the ValueIdUnion and add a
// Cast to cast it to the proper type.
//
castExpr = new(generator->wHeap())
#pragma nowarn(1506) // warning elimination
Cast(keyValIdUnion->getSource(exprNum).getValueDesc()->getItemExpr(),
#pragma warn(1506) // warning elimination
&(keyValIdUnion->getResult().getType()));
// Bind this new item expression.
//
castExpr->bindNode(generator->getBindWA());
// This should never fail at this point !!!
//
GenAssert(! generator->getBindWA()->errStatus(),
"Transpose: Internal Error");
// Insert the Value Id for this item expression into the list of
// Value Ids that will be used to generate the final expression.
//
ValueIds.insert(castExpr->getValueId());
}
// ValueIds may now contains one ValueId representing the key value.
//
// Translate the expression number (exprNum) from 0 -> numKeyExprs - 1
// to vector entry number (vectorEntryNum) and expression number
// (valExprNum)
//
// Binary expression indexes.
//
CollIndex vectorEntryNum, valExprNum;
// Keep track of how many expressions are covered by the current
// ValueIdUnion and those previous.
//
CollIndex numCoveredExprs;
// Translate the Unary expression index (exprNum) into a binary
// index (vectorEntryNum, valExprNum)
// (A for-loop with no body)
//
for((vectorEntryNum = 1,
numCoveredExprs = numExprs[1],
valExprNum = exprNum);
// Does this ValueIdUnion cover this expression number
//
numCoveredExprs <= exprNum;
// If not, adjust the indexes and try the next one.
(numCoveredExprs += numExprs[vectorEntryNum + 1],
valExprNum -= numExprs[vectorEntryNum],
vectorEntryNum++));
// At this point:
// vectorEntryNum is index of the transUnionVector for this exprNum.
// valExprNum is index into the ValueIdUnion.
GenAssert(vectorEntryNum > 0 && vectorEntryNum < numUnionLists,
"Transpose: Internal Error");
GenAssert(valExprNum < numExprs[vectorEntryNum],
"Transpose: Internal Error");
// Generate all Value Expressions.
// One will be all of the value expressions indexed by vectorEntryNum
// and valExprNum and the others will be NULL.
//
for(unionListNum = 1; unionListNum < numUnionLists; unionListNum++) {
// For Each ValueIdUnion in an entry of the vector.
//
for(CollIndex numValUnions = 0;
numValUnions < transUnionVector()[unionListNum].entries();
numValUnions++) {
ValueIdUnion *valValIdUnion =
(ValueIdUnion *)((transUnionVector()[unionListNum])[numValUnions].
getValueDesc()->getItemExpr());
if(unionListNum == vectorEntryNum){
// Construct the value expression cast to the proper type.
//
castExpr = new(generator->wHeap())
Cast(valValIdUnion->
#pragma nowarn(1506) // warning elimination
getSource(valExprNum).getValueDesc()->getItemExpr(),
#pragma warn(1506) // warning elimination
&(valValIdUnion->getResult().getType()));
} else {
// Construct NULL expression cast to the proper type.
//
castExpr = new(generator->wHeap())
Cast(new (space) ConstValue(),
&(valValIdUnion->getResult().getType()));
}
// Bind the CASTed itemed expression.
//
castExpr->bindNode(generator->getBindWA());
// This should never fail at this point !!!
//
GenAssert(! generator->getBindWA()->errStatus(),
"Transpose: Internal Error");
// Add the ValueId to the list of value ids which will be used
// to generate the final expression.
//
ValueIds.insert(castExpr->getValueId());
}
}
// Generate the expression.
// Initialize the pointer to the expression to be generated
// to be NULL.
//
transExprs[exprNum] = 0;
// Generate the expressions.
//
// ValueIds - refers to the expressions for 1 key value, and N
// transpose values, all but one will be NULL.
//
// 0 - Do not add conv. nodes.
//
// transColsAtp - this expression will be evaluated on the
// transColsAtp (0) ATP.
//
// transColsAtpIndex - within the transColsAtp (0) ATP, the destination
// for this move expression will be the transColsAtpIndex TP. This should
// be the last TP of the ATP.
//
// SQLARK_EXPLODED_FORMAT - generate the move expression to construct
// the destination tuple in EXPLODED FORMAT.
//
// transColsTupleLen - This is an output which will contain the length
// of the destination Tuple. This better return the same value for each
// interation of this loop.
//
// &transExprs[exprNum] - The address of the pointer to the expression
// which will be generated.
//
// &transColsTupleDesc - The address of the tuple descriptor which is
// generated. This describes the destination tuple of the move expression.
// This will be generated only the first time through the loop. The tuple
// better have the same format each time. A NULL value inticates that
// the descriptor should not be generated.
//
// SHORT_FORMAT - generate the transColsTupleDesc in the SHORT FORMAT.
//
expGen->generateContiguousMoveExpr(ValueIds,
0,
transColsAtp,
transColsAtpIndex,
generator->getInternalFormat(),
transColsTupleLen,
&transExprs[exprNum],
(exprNum == 0
? &transColsTupleDesc
: 0),
ExpTupleDesc::SHORT_FORMAT);
// Set the tuple descriptor in the returned CRI descriptor.
// This will be set only the first time through the loop.
//
if(exprNum == 0) {
#pragma nowarn(1506) // warning elimination
returnedCriDesc->setTupleDescriptor(transColsAtpIndex,
#pragma warn(1506) // warning elimination
transColsTupleDesc);
// Take the result (ie. the Cast nodes) of generateContiguousMove
// and set the mapInfo for the ValueIdUnion nodes to be the same so
// that our parent can access the computed expressions
// This code is executed only the first time through the loop.
// The info generated from the expression generator should be the
// same each time through the loop.
//
// An index into the ValueIds that we just generated.
// (It is assumed that the above code inserts the value Ids into
// ValueIds in the same order as the loops below).
//
CollIndex valIdNum = 0;
// For each entry in the transUnionVector...
//
for(unionListNum = 0; unionListNum < numUnionLists; unionListNum++) {
// For each ValueIdUnion in the entry...
//
for(CollIndex numValUnions = 0;
numValUnions < transUnionVector()[unionListNum].entries();
numValUnions++) {
// Get the ValueId of the ValueIdUnion node.
//
ValueId valId =
(transUnionVector()[unionListNum])[numValUnions];
// Add a map entry to the map table for this ValueIdUnion node.
// The mapInfo is the same as the mapInfo of the result of
// generating an expression for the corresponding Cast expression.
//
MapInfo * mapInfo =
generator->addMapInfoToThis(localMapTable,
valId,
generator->getMapInfo(ValueIds[valIdNum])->getAttr());
// Indicate that code was generated for this map table entry.
//
mapInfo->codeGenerated();
// Advance the index to the ValueIds.
//
valIdNum++;
}
}
}
//localMapTable->removeLast();
generator->removeLast();
}
// Generate expression to evaluate predicate on the transposed row
//
ex_expr *afterTransPred = 0;
if (! selectionPred().isEmpty()) {
ItemExpr * newPredTree =
selectionPred().rebuildExprTree(ITM_AND,TRUE,TRUE);
expGen->generateExpr(newPredTree->getValueId(), ex_expr::exp_SCAN_PRED,
&afterTransPred);
}
// Allocate the Transpose TDB
//
queue_index upQ = (queue_index)getDefault(GEN_TRSP_SIZE_UP);
#pragma nowarn(1506) // warning elimination
ComTdbTranspose *transTdb =
new (space) ComTdbTranspose(childTdb,
transExprs,
numMovExprs,
afterTransPred,
transColsTupleLen,
transColsAtpIndex,
givenCriDesc,
returnedCriDesc,
(queue_index)getDefault(GEN_TRSP_SIZE_DOWN),
//(queue_index)getDefault(GEN_TRSP_SIZE_UP),
upQ,
(Cardinality) (getInputCardinality() * getEstRowsUsed()).getValue(),
getDefault(GEN_TRSP_NUM_BUFFERS),
getDefault(GEN_TRSP_BUFFER_SIZE),
space);
#pragma warn(1506) // warning elimination
generator->initTdbFields(transTdb);
// If the child's outputs are not needed above this node,
// remove the entries from the map table.
//
if (transOutputs.isEmpty()) {
// Remove child's outputs from mapTable, They are not needed
// above.
//
generator->removeAll(localMapTable);
NADELETEARRAY(numExprs,numUnionLists,CollIndex,space);
numExprs = NULL;
}
// Add the explain Information for this node to the EXPLAIN
// Fragment. Set the explainTuple pointer in the generator so
// the parent of this node can get a handle on this explainTuple.
//
if(!generator->explainDisabled()) {
generator->setExplainTuple(addExplainInfo(transTdb,
childExplainTuple,
0,
generator));
}
// Restore the Cri Desc's and set the return object.
//
generator->setCriDesc(givenCriDesc, Generator::DOWN);
generator->setCriDesc(returnedCriDesc, Generator::UP);
generator->setGenObj(this, transTdb);
return 0;
}
// -----------------------------------------------------------------------
// PhyPack::codeGen()
// -----------------------------------------------------------------------
short PhyPack::codeGen(Generator* generator)
{
// Get handles on expression generator, map table, and heap allocator.
ExpGenerator* expGen = generator->getExpGenerator();
Space* space = generator->getSpace();
// Allocate a new map table for this operation.
MapTable* localMapTable =
generator->appendAtEnd();
// Get a description of what kind of tuple my parent gives me.
ex_cri_desc* givenCriDesc = generator->getCriDesc(Generator::DOWN);
// PhyPack adds one tupp to what its parent gives in its return tuple.
#pragma nowarn(1506) // warning elimination
unsigned short returnedNoOfTuples = givenCriDesc->noTuples() + 1;
#pragma warn(1506) // warning elimination
ex_cri_desc* returnedCriDesc =
new (space) ex_cri_desc(returnedNoOfTuples,space);
// Get a handle of the last map table before allowing child to code gen.
MapTable* lastMapTable = generator->getLastMapTable();
// Create new attributes for the packed columns.
CollIndex noOfPackedCols = packingExpr().entries();
Attributes** attrs =
new (generator->wHeap()) Attributes * [noOfPackedCols];
// Add the packed columns to the map table. (using ATP0)
for(CollIndex i = 0; i < noOfPackedCols; i++)
attrs[i] = (generator->addMapInfo(packingExpr().at(i),0))->getAttr();
// PhyPack adds one tupp to the tail of its parent's ATP (stored as ATP0)
const Int32 atpIndex = returnedNoOfTuples - 1;
const Int32 atp = 0;
// To store length of the last tupp introduced by PhyPack.
ULng32 tupleLen = 0;
// Will be generated to describe the last tupp introduced by PhyPack.
ExpTupleDesc* tupleDesc = 0;
// From here up we go back into Exploded Internal format (if not there
// already) since it doesn't make sense for column wise rowsets to
// be in Compressed Internal format yet.
generator->setExplodedInternalFormat();
// Fill in offsets and other info in the attributes list and computes the
// length and descriptor of the resulting tuple generated.
// The format of packed rows in rowsets is always Exploded Internal format.
expGen->processAttributes(noOfPackedCols,
attrs,
ExpTupleDesc::SQLARK_EXPLODED_FORMAT,
tupleLen,
atp,
atpIndex,
&tupleDesc,
ExpTupleDesc::SHORT_FORMAT);
// Copies of Attributes have been made to store in the tuple descriptor.
NADELETEBASIC(attrs,generator->wHeap());
// Store the computed tuple desc for the new tuple added by PhyPack.
#pragma nowarn(1506) // warning elimination
returnedCriDesc->setTupleDescriptor(returnedNoOfTuples - 1,tupleDesc);
#pragma warn(1506) // warning elimination
// Set the DOWN descriptor in the generator to what my child gets from me.
generator->setCriDesc(returnedCriDesc,Generator::DOWN);
// Get the child code generated and retrieve useful stuffs from it.
child(0)->codeGen(generator);
ComTdb* childTdb = (ComTdb*)(generator->getGenObj());
// Generate an expression to pack the rows.
ex_expr* packExpr = 0;
expGen->generateListExpr(packingExpr(),ex_expr::exp_ARITH_EXPR,&packExpr);
// Generate selection predicate on the packed cols if there are any.
ex_expr* predExpr = 0;
if(NOT selectionPred().isEmpty())
{
ItemExpr* pred = selectionPred().rebuildExprTree(ITM_AND,TRUE,TRUE);
expGen->generateExpr(pred->getValueId(),ex_expr::exp_SCAN_PRED,
&predExpr);
}
//????????? Generate an expression to copy result row to result buffer.
// ex_expr* copyExpr = 0;
// expGen->generateContiguousMoveExpr(packingExpr(),
// -1, // add conv
// 1, // copy to ATP1
// returnedNoOfTuples - 1, // ATP index
// ExpTupleDesc::SQLARK_EXPLODED_FORMAT,
// tupleLen,
// ©Expr);
//
// Note that my parent is gonna find the non-copied version of attributes
// information in my map table, since it is refering to the vid's of the
// originals but not the copies. Coincidentally this is good for us since:
// 1. My parent is expected to see things at ATP0 rather than ATP1.
// 2. The format (or order) of the tuple doesn't get changed after copying
// so that the old attributes are refering to the right offsets.
//
// $$$ Seems copyExpr_ is no longer needed after ComTdbPackRows code were written.
// Values at the child's context as well as the intermediates during expr
// generation are no longer visible any more from this point.
generator->removeAll(lastMapTable);
// Create the Pack node's TDB.
ComTdbPackRows* packTdb = new (space) ComTdbPackRows (childTdb,
packExpr,
predExpr,
#pragma nowarn(1506) // warning elimination
returnedNoOfTuples - 1,
tupleLen,
givenCriDesc,
returnedCriDesc,
(queue_index) 8,
(queue_index) 8);
#pragma warn(1506) // warning elimination
generator->initTdbFields(packTdb);
// Add explain info of this node to the EXPLAIN fragment. Set explainTuple
// pointer in the generator so the parent of this node can get a handle on
// this explainTuple.
//
if(NOT generator->explainDisabled())
{
// Child's explain tuple.
ExplainTuple* childExplainTuple = generator->getExplainTuple();
generator->setExplainTuple(addExplainInfo(packTdb,
childExplainTuple,
0,
generator));
}
// Set up the new up CRI desc.
generator->setCriDesc(returnedCriDesc,Generator::UP);
// Restore original down CRI desc since this node changed it.
generator->setCriDesc(givenCriDesc,Generator::DOWN);
// Return generated object (the TDB) to the generator.
generator->setGenObj(this, packTdb);
return 0;
}
// -----------------------------------------------------------------------
// TableValuedFunction methods
// -----------------------------------------------------------------------
const char * TableValuedFunction::getVirtualTableName()
{ return "??TableValuedFunction??"; }
TrafDesc *TableValuedFunction::createVirtualTableDesc()
{ return NULL; }
void TableValuedFunction::deleteVirtualTableDesc(TrafDesc *vtd)
{
}
// -----------------------------------------------------------------------
// StatisticsFunc methods
// -----------------------------------------------------------------------
const char * StatisticsFunc::getVirtualTableName()
//{ return "STATISTICS__"; }
{return getVirtualTableNameStr();}
TrafDesc *StatisticsFunc::createVirtualTableDesc()
{
TrafDesc * table_desc =
Generator::createVirtualTableDesc(getVirtualTableName(),
ComTdbStats::getVirtTableNumCols(),
ComTdbStats::getVirtTableColumnInfo(),
ComTdbStats::getVirtTableNumKeys(),
ComTdbStats::getVirtTableKeyInfo());
return table_desc;
}
short StatisticsFunc::codeGen(Generator* generator)
{
ExpGenerator * expGen = generator->getExpGenerator();
Space * space = generator->getSpace();
// allocate a map table for the retrieved columns
generator->appendAtEnd();
ex_expr *scanExpr = 0;
ex_expr *projExpr = 0;
ex_cri_desc * givenDesc
= generator->getCriDesc(Generator::DOWN);
ex_cri_desc * returnedDesc
#pragma nowarn(1506) // warning elimination
= new(space) ex_cri_desc(givenDesc->noTuples() + 1, space);
#pragma warn(1506) // warning elimination
// cri descriptor for work atp has 4 entries:
// first two entries for consts and temps.
// Entry 1(index #2) is
// where the stats row will be moved to evaluate
// the 'where' predicate.
// Entry 2(index #3) is where the input row will be built.
ex_cri_desc * workCriDesc = new(space) ex_cri_desc(4, space);
const Int32 work_atp = 1;
const Int32 stats_row_atp_index = 2;
const Int32 input_row_atp_index = 3;
// Assumption (for now): retrievedCols contains ALL columns from
// the table/index. This is because this operator does
// not support projection of columns. Add all columns from this table
// to the map table.
//
// The row retrieved from filesystem is returned as the last entry in
// the returned atp.
Attributes ** attrs =
new(generator->wHeap())
Attributes * [getTableDesc()->getColumnList().entries()];
for (CollIndex i = 0; i < getTableDesc()->getColumnList().entries(); i++)
{
ItemExpr * col_node
= (((getTableDesc()->getColumnList())[i]).getValueDesc())->
getItemExpr();
attrs[i] = (generator->addMapInfo(col_node->getValueId(), 0))->
getAttr();
}
ExpTupleDesc *tupleDesc = 0;
ULng32 tupleLength = 0;
// StatisticsFunc must use Exploded Format for now.
ExpTupleDesc::TupleDataFormat tupleFormat = ExpTupleDesc::SQLARK_EXPLODED_FORMAT;
expGen->processAttributes(getTableDesc()->getColumnList().entries(),
attrs, tupleFormat,
tupleLength,
work_atp, stats_row_atp_index,
&tupleDesc, ExpTupleDesc::LONG_FORMAT);
// delete [] attrs;
// NADELETEBASIC is used because compiler does not support delete[]
// operator yet. Should be changed back later when compiler supports
// it.
NADELETEBASIC(attrs, generator->wHeap());
// add this descriptor to the work cri descriptor.
workCriDesc->setTupleDescriptor(stats_row_atp_index, tupleDesc);
// generate stats selection expression, if present
if (! selectionPred().isEmpty())
{
ItemExpr * newPredTree = selectionPred().rebuildExprTree(ITM_AND,TRUE,TRUE);
expGen->generateExpr(newPredTree->getValueId(), ex_expr::exp_SCAN_PRED,
&scanExpr);
}
// generate move expression for the parameter list
ex_expr *inputExpr = 0;
ExpTupleDesc *inputTupleDesc = 0;
ULng32 inputTupleLength = 0;
if (! getProcInputParamsVids().isEmpty())
{
expGen->generateContiguousMoveExpr(getProcInputParamsVids(),
-1,
work_atp,
input_row_atp_index,
tupleFormat,
inputTupleLength,
&inputExpr,
&inputTupleDesc,
ExpTupleDesc::LONG_FORMAT);
// add this descriptor to the work cri descriptor.
workCriDesc->setTupleDescriptor(input_row_atp_index, inputTupleDesc);
}
// The stats row will be returned as the last entry of the returned atp.
// Change the atp and atpindex of the returned values to indicate that.
expGen->assignAtpAndAtpIndex(getTableDesc()->getColumnList(),
0, returnedDesc->noTuples()-1);
Lng32 numBuffers = 3;
#pragma nowarn(1506) // warning elimination
Lng32 bufferSize = 10 * tupleLength;
#pragma warn(1506) // warning elimination
ComTdbStats *statsTdb = new(space) ComTdbStats
(
tupleLength, // Length of stats Tuple
tupleLength, // Length of returned tuple
inputTupleLength, // length of input tuple
givenDesc, // given_cri_desc
returnedDesc, // returned cri desc
8, // Down queue size
16, // Up queue size
numBuffers, // Number of buffers to
// allocate
bufferSize, // Size of each buffer
scanExpr, // predicate
inputExpr,
projExpr,
workCriDesc, // Descriptor of work Atp
stats_row_atp_index,
input_row_atp_index);
generator->initTdbFields(statsTdb);
// Add the explain Information for this node to the EXPLAIN
// Fragment. Set the explainTuple pointer in the generator so
// the parent of this node can get a handle on this explainTuple.
if(!generator->explainDisabled()) {
generator->setExplainTuple(
addExplainInfo(statsTdb, 0, 0, generator));
}
generator->setCriDesc(givenDesc, Generator::DOWN);
generator->setCriDesc(returnedDesc, Generator::UP);
generator->setGenObj(this, statsTdb);
return 0;
}
// -----------------------------------------------------------------------
// ProxyFunc methods
// -----------------------------------------------------------------------
static void initProxyKeyDescStruct(TrafKeysDesc *tgt, ComUInt32& src)
{
tgt->keyseqnumber = 1;
tgt->tablecolnumber = 0;
tgt->setDescending(FALSE);
}
static Lng32 createDescStructsForProxy(const ProxyFunc &proxy,
char *tableName,
TrafDesc *&colDescs,
TrafDesc *&keyDescs)
{
colDescs = NULL;
keyDescs = NULL;
Lng32 reclen = 0;
TrafDesc *prev_desc = NULL;
ComUInt32 numCols = proxy.getNumColumns();
// Creates and populates column descs
proxy.populateColumnDesc(tableName, colDescs, reclen);
// Create key descs
prev_desc = NULL;
numCols = 1;
for (ComUInt32 keyNum = 0; keyNum < numCols; keyNum++)
{
TrafDesc *key_desc = TrafAllocateDDLdesc(DESC_KEYS_TYPE, NULL);
if (prev_desc)
prev_desc->next = key_desc;
else
keyDescs = key_desc;
prev_desc = key_desc;
initProxyKeyDescStruct(key_desc->keysDesc(), keyNum);
}
return reclen;
}
TrafDesc *ProxyFunc::createVirtualTableDesc()
{
// TrafAllocateDDLdesc() requires that HEAP (STMTHEAP)
// be used for operator new herein
TrafDesc *table_desc = TrafAllocateDDLdesc(DESC_TABLE_TYPE, NULL);
const char *tableName = getVirtualTableName();
table_desc->tableDesc()->tablename = new HEAP char[strlen(tableName)+1];
strcpy(table_desc->tableDesc()->tablename, tableName);
table_desc->tableDesc()->setSystemTableCode(TRUE);
TrafDesc *files_desc = TrafAllocateDDLdesc(DESC_FILES_TYPE, NULL);
files_desc->filesDesc()->setAudited(TRUE); // audited table
table_desc->tableDesc()->files_desc = files_desc;
TrafDesc *cols_descs = NULL;
TrafDesc *keys_descs = NULL;
table_desc->tableDesc()->colcount = (Int32) getNumColumns();
table_desc->tableDesc()->record_length =
createDescStructsForProxy(*this,
table_desc->tableDesc()->tablename,
cols_descs,
keys_descs);
TrafDesc *index_desc = TrafAllocateDDLdesc(DESC_INDEXES_TYPE, NULL);
index_desc->indexesDesc()->tablename =
table_desc->tableDesc()->tablename;
index_desc->indexesDesc()->indexname =
table_desc->tableDesc()->tablename;
index_desc->indexesDesc()->keytag = 0; // primary index
index_desc->indexesDesc()->record_length =
table_desc->tableDesc()->record_length;
index_desc->indexesDesc()->colcount =
table_desc->tableDesc()->colcount;
index_desc->indexesDesc()->blocksize = 4096; // doesn't matter.
// cannot simply point to same files desc as the table one,
// because then ReadTableDef::deleteTree frees same memory twice (error)
TrafDesc *i_files_desc = TrafAllocateDDLdesc(DESC_FILES_TYPE, NULL);
i_files_desc->filesDesc()->setAudited(TRUE); // audited table
index_desc->indexesDesc()->files_desc = i_files_desc;
index_desc->indexesDesc()->keys_desc = keys_descs;
table_desc->tableDesc()->columns_desc = cols_descs;
table_desc->tableDesc()->indexes_desc = index_desc;
return table_desc;
}
short PhysicalExtractSource::codeGen(Generator *)
{
GenAssert(0, "PhysicalExtractSource::codeGen() should never be called");
return 0;
}
/////////////////////////////////////////////////////////
//
// ControlRunningQuery::codeGen()
//
/////////////////////////////////////////////////////////
short ControlRunningQuery::codeGen(Generator * generator)
{
Space * space = generator->getSpace();
GenAssert((child(0) == NULL) && (child(1) == NULL),
"ControlRunningQuery does not expect any child.");
char *qid =
space->allocateAndCopyToAlignedSpace(queryId_, str_len(queryId_), 0);
char *pname =
space->allocateAndCopyToAlignedSpace(pname_, str_len(pname_), 0);
char *comment =
space->allocateAndCopyToAlignedSpace(comment_, str_len(comment_), 0);
ComTdbCancel::Action a = ComTdbCancel::InvalidAction;
switch (action_)
{
case Cancel:
{
switch (qs_)
{
case ControlQid:
a = ComTdbCancel::CancelByQid;
break;
case ControlPname:
case ControlNidPid:
break;
default:
GenAssert(0, "Invalid ControlRunningQuery::qs_"); break;
}
break;
}
case Suspend:
case Activate:
break;
default: GenAssert(0, "Invalid ControlRunningQuery::action_");
}
if (a == ComTdbCancel::InvalidAction)
{
*CmpCommon::diags() << DgSqlCode(-1010);
GenExit();
return -1;
}
ComTdbCancel * exe_cancel_tdb = new(space)
ComTdbCancel(qid, pname, nid_, pid_,
getDefault(CANCEL_MINIMUM_BLOCKING_INTERVAL),
a, forced_, comment,
generator->getCriDesc(Generator::DOWN),
generator->getCriDesc(Generator::DOWN),
(queue_index)getDefault(GEN_DDL_SIZE_DOWN),
(queue_index)getDefault(GEN_DDL_SIZE_UP)
);
generator->initTdbFields(exe_cancel_tdb);
// no tupps are returned
generator->setCriDesc((ex_cri_desc *)
(generator->getCriDesc(Generator::DOWN)),
Generator::UP);
generator->setGenObj(this, exe_cancel_tdb);
if(!generator->explainDisabled()) {
generator->setExplainTuple(
addExplainInfo(exe_cancel_tdb, 0, 0, generator));
}
return 0;
}
| 1 | 16,394 | This is fine, but maybe we should change the variable name to "bufferSize_as_uint"? Or "...uint32"? | apache-trafodion | cpp |
@@ -57,9 +57,10 @@ module Blacklight::SolrResponse::Spelling
# origFreq =>
# suggestion => [{ frequency =>, word => }] # for extended results
# suggestion => ['word'] # for non-extended results
- if suggestions.index("correctlySpelled")
+ origFreq = term_info['origFreq']
+ if term_info['suggestion'].first.is_a?(Hash) or suggestions.index("correctlySpelled")
word_suggestions << term_info['suggestion'].map do |suggestion|
- suggestion['word'] if suggestion['freq'] > term_info['origFreq']
+ suggestion['word'] if suggestion['freq'] > origFreq
end
else
# only extended suggestions have frequency so we just return all suggestions | 1 | # A mixin for making access to the spellcheck component data easy.
#
# response.spelling.words
#
module Blacklight::SolrResponse::Spelling
def spelling
@spelling ||= Base.new(self)
end
class Base
attr :response
def initialize(response)
@response = response
end
# returns an array of spelling suggestion for specific query words,
# as provided in the solr response. Only includes words with higher
# frequency of occurrence than word in original query.
# can't do a full query suggestion because we only get info for each word;
# combination of words may not have results.
# Thanks to Naomi Dushay!
def words
@words ||= (
word_suggestions = []
spellcheck = self.response[:spellcheck]
if spellcheck && spellcheck[:suggestions]
suggestions = spellcheck[:suggestions]
unless suggestions.nil?
# suggestions is an array:
# (query term)
# (hash of term info and term suggestion)
# ...
# (query term)
# (hash of term info and term suggestion)
# 'correctlySpelled'
# true/false
# collation
# (suggestion for collation)
if suggestions.index("correctlySpelled") #if extended results
i_stop = suggestions.index("correctlySpelled")
elsif suggestions.index("collation")
i_stop = suggestions.index("collation")
else
i_stop = suggestions.length
end
# step through array in 2s to get info for each term
0.step(i_stop-1, 2) do |i|
term = suggestions[i]
term_info = suggestions[i+1]
# term_info is a hash:
# numFound =>
# startOffset =>
# endOffset =>
# origFreq =>
# suggestion => [{ frequency =>, word => }] # for extended results
# suggestion => ['word'] # for non-extended results
if suggestions.index("correctlySpelled")
word_suggestions << term_info['suggestion'].map do |suggestion|
suggestion['word'] if suggestion['freq'] > term_info['origFreq']
end
else
# only extended suggestions have frequency so we just return all suggestions
word_suggestions << term_info['suggestion']
end
end
end
end
word_suggestions.flatten.compact.uniq
)
end
def collation
# FIXME: DRY up with words
spellcheck = self.response[:spellcheck]
if spellcheck && spellcheck[:suggestions]
suggestions = spellcheck[:suggestions]
unless suggestions.nil?
if suggestions.index("collation")
suggestions[suggestions.index("collation") + 1]
end
end
end
end
end
end
| 1 | 5,746 | Use snake_case for variable names. | projectblacklight-blacklight | rb |
@@ -1095,7 +1095,7 @@ namespace pwiz.SkylineTestA
return count;
}
- [TestMethod]
+ //[TestMethod]
public void ConsoleBadRawFileImportTest()
{
// Run this test only if we can read Thermo's raw files | 1 | /*
* Original author: John Chilton <jchilton .at. u.washington.edu>,
* MacCoss Lab, Department of Genome Sciences, UW
*
* Copyright 2011 University of Washington - Seattle, WA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
using System;
using System.Collections.Generic;
using System.Globalization;
using System.IO;
using System.Linq;
using System.Text;
using System.Threading;
using Microsoft.VisualStudio.TestTools.UnitTesting;
using pwiz.Common.Collections;
using pwiz.Common.DataBinding;
using pwiz.Common.SystemUtil;
using pwiz.Skyline;
using pwiz.Skyline.Model;
using pwiz.Skyline.Model.Databinding;
using pwiz.Skyline.Model.DocSettings;
using pwiz.Skyline.Model.DocSettings.Extensions;
using pwiz.Skyline.Model.Results;
using pwiz.Skyline.Model.Tools;
using pwiz.Skyline.Properties;
using pwiz.Skyline.Util;
using pwiz.Skyline.Util.Extensions;
using pwiz.SkylineTestUtil;
namespace pwiz.SkylineTestA
{
/// <summary>
/// Summary description for CommandLineTest
/// </summary>
[TestClass]
public class CommandLineTest : AbstractUnitTest
{
protected override void Initialize()
{
Settings.Default.ToolList.Clear();
}
protected override void Cleanup()
{
Settings.Default.ToolList.Clear();
}
private const string ZIP_FILE = @"TestA\Results\FullScan.zip";
private const string COMMAND_FILE = @"TestA\CommandLineTest.zip";
[TestMethod]
public void ConsoleReplicateOutTest()
{
var testFilesDir = new TestFilesDir(TestContext, ZIP_FILE);
string docPath = testFilesDir.GetTestPath("BSA_Protea_label_free_20100323_meth3_multi.sky");
string outPath = testFilesDir.GetTestPath("Imported_single.sky");
// Import the first RAW file (or mzML for international)
string rawPath = testFilesDir.GetTestPath("ah_20101011y_BSA_MS-MS_only_5-2" +
ExtensionTestContext.ExtThermoRaw);
RunCommand("--in=" + docPath,
"--import-file=" + rawPath,
"--import-replicate-name=Single",
"--out=" + outPath);
SrmDocument doc = ResultsUtil.DeserializeDocument(outPath);
AssertEx.IsDocumentState(doc, 0, 2, 7, 7, 49);
AssertResult.IsDocumentResultsState(doc, "Single", 3, 3, 0, 21, 0);
//Test --import-append
var dataFile2 = testFilesDir.GetTestPath("ah_20101029r_BSA_CID_FT_centroid_3uscan_3" +
ExtensionTestContext.ExtThermoRaw);
RunCommand("--in=" + outPath,
"--import-file=" + dataFile2,
"--import-replicate-name=Single",
"--import-append",
"--save");
doc = ResultsUtil.DeserializeDocument(outPath);
AssertEx.IsDocumentState(doc, 0, 2, 7, 7, 49);
AssertResult.IsDocumentResultsState(doc, "Single", 6, 6, 0, 42, 0);
Assert.AreEqual(1, doc.Settings.MeasuredResults.Chromatograms.Count);
}
[TestMethod]
public void ConsoleRemoveResultsTest()
{
var testFilesDir = new TestFilesDir(TestContext, ZIP_FILE);
string docPath = testFilesDir.GetTestPath("Remove_Test.sky");
string outPath = testFilesDir.GetTestPath("Remove_Test_Out.sky");
string[] allFiles =
{
"FT_2012_0311_RJ_01.raw",
"FT_2012_0311_RJ_02.raw",
"FT_2012_0311_RJ_07.raw",
"FT_2012_0316_RJ_01_120316125013.raw",
"FT_2012_0316_RJ_01_120316131853.raw",
"FT_2012_0316_RJ_01_120316132340.raw",
"FT_2012_0316_RJ_02.raw",
"FT_2012_0316_RJ_09.raw",
"FT_2012_0316_RJ_10.raw",
};
string[] removedFiles =
{
"FT_2012_0311_RJ_01.raw",
"FT_2012_0311_RJ_02.raw",
"FT_2012_0311_RJ_07.raw"
};
string output = RunCommand("--in=" + docPath,
"--remove-before=" + DateTime.Parse("3/16/2012", CultureInfo.InvariantCulture),
"--out=" + outPath);
SrmDocument doc = ResultsUtil.DeserializeDocument(outPath);
Assert.IsFalse(output.Contains(Resources.CommandLineTest_ConsoleAddFastaTest_Error));
Assert.IsFalse(output.Contains(Resources.CommandLineTest_ConsoleAddFastaTest_Warning));
// check for removed filenames
// Assert.AreEqual(removedFiles.Count(), Regex.Matches(output, "\nRemoved").Count); L10N problem
AssertEx.Contains(output, removedFiles);
AssertEx.IsDocumentState(doc, 0, 1, 5, 5, 15);
Assert.AreEqual(6, doc.Settings.MeasuredResults.Chromatograms.Count);
// try to remove all
output = RunCommand("--in=" + docPath,
"--remove-before=" + DateTime.Parse("3/16/2013", CultureInfo.InvariantCulture),
"--out=" + outPath);
doc = ResultsUtil.DeserializeDocument(outPath);
Assert.IsFalse(output.Contains(Resources.CommandLineTest_ConsoleAddFastaTest_Error));
Assert.IsFalse(output.Contains(Resources.CommandLineTest_ConsoleAddFastaTest_Warning));
// Assert.AreEqual(allFiles.Count(), Regex.Matches(output, "\nRemoved").Count); L10N problem
AssertEx.Contains(output, allFiles);
Assert.IsNull(doc.Settings.MeasuredResults);
}
// TODO: Enable this again once file locking issues have been resolved
//[TestMethod]
public void ConsoleSetLibraryTest()
{
var testFilesDir = new TestFilesDir(TestContext, ZIP_FILE);
string docPath = testFilesDir.GetTestPath("BSA_Protea_label_free_20100323_meth3_multi.sky");
string outPath = testFilesDir.GetTestPath("SetLib_Out.sky");
string libPath = testFilesDir.GetTestPath("sample.blib");
string libPath2 = testFilesDir.GetTestPath("sample2.blib");
const string libName = "namedlib";
string fakePath = docPath + ".fake";
string libPathRedundant = testFilesDir.GetTestPath("sample.redundant.blib");
// Test error (name without path)
string output = RunCommand("--in=" + docPath,
"--add-library-name=" + libName,
"--out=" + outPath);
CheckRunCommandOutputContains(Resources.CommandLine_SetLibrary_Error__Cannot_set_library_name_without_path_, output);
// Test error (file does not exist)
output = RunCommand("--in=" + docPath,
"--add-library-path=" + fakePath,
"--out=" + outPath);
CheckRunCommandOutputContains(string.Format(Resources.CommandLine_SetLibrary_Error__The_file__0__does_not_exist_, fakePath), output);
// Test error (file does not exist)
output = RunCommand("--in=" + docPath,
"--add-library-path=" + libPathRedundant,
"--out=" + outPath);
CheckRunCommandOutputContains(Resources.CommandLineTest_ConsoleAddFastaTest_Error, output);
// Test error (unsupported library format)
output = RunCommand("--in=" + docPath,
"--add-library-path=" + docPath,
"--out=" + outPath);
CheckRunCommandOutputContains(string.Format(Resources.CommandLine_SetLibrary_Error__The_file__0__is_not_a_supported_spectral_library_file_format_,docPath), output);
// Test add library without name
output = RunCommand("--in=" + docPath,
"--add-library-path=" + libPath,
"--out=" + outPath);
SrmDocument doc = ResultsUtil.DeserializeDocument(outPath);
Assert.IsFalse(output.Contains(Resources.CommandLineTest_ConsoleAddFastaTest_Error));
Assert.IsFalse(output.Contains(Resources.CommandLineTest_ConsoleAddFastaTest_Warning));
AssertEx.IsDocumentState(doc, 0, 2, 7, 7, 49);
Assert.AreEqual(doc.Settings.PeptideSettings.Libraries.Libraries.Count,
doc.Settings.PeptideSettings.Libraries.LibrarySpecs.Count);
Assert.AreEqual(1, doc.Settings.PeptideSettings.Libraries.LibrarySpecs.Count);
Assert.AreEqual(Path.GetFileNameWithoutExtension(libPath), doc.Settings.PeptideSettings.Libraries.LibrarySpecs[0].Name);
Assert.AreEqual(libPath, doc.Settings.PeptideSettings.Libraries.LibrarySpecs[0].FilePath);
// Add another library with name
output = RunCommand("--in=" + outPath,
"--add-library-name=" + libName,
"--add-library-path=" + libPath2,
"--save");
doc = ResultsUtil.DeserializeDocument(outPath);
Assert.IsFalse(output.Contains(Resources.CommandLineTest_ConsoleAddFastaTest_Error));
Assert.IsFalse(output.Contains(Resources.CommandLineTest_ConsoleAddFastaTest_Warning));
AssertEx.IsDocumentState(doc, 0, 2, 7, 7, 49);
Assert.AreEqual(doc.Settings.PeptideSettings.Libraries.Libraries.Count,
doc.Settings.PeptideSettings.Libraries.LibrarySpecs.Count);
Assert.AreEqual(2, doc.Settings.PeptideSettings.Libraries.LibrarySpecs.Count);
Assert.AreEqual(Path.GetFileNameWithoutExtension(libPath), doc.Settings.PeptideSettings.Libraries.LibrarySpecs[0].Name);
Assert.AreEqual(libPath, doc.Settings.PeptideSettings.Libraries.LibrarySpecs[0].FilePath);
Assert.AreEqual(libName, doc.Settings.PeptideSettings.Libraries.LibrarySpecs[1].Name);
Assert.AreEqual(libPath2, doc.Settings.PeptideSettings.Libraries.LibrarySpecs[1].FilePath);
// Test error (library with conflicting name)
output = RunCommand("--in=" + outPath,
"--add-library-path=" + libPath,
"--out=" + outPath);
CheckRunCommandOutputContains(Resources.CommandLine_SetLibrary_Error__The_library_you_are_trying_to_add_conflicts_with_a_library_already_in_the_file_, output);
}
[TestMethod]
public void ConsoleAddFastaTest()
{
var testFilesDir = new TestFilesDir(TestContext, ZIP_FILE);
string docPath = testFilesDir.GetTestPath("BSA_Protea_label_free_20100323_meth3_multi.sky");
string outPath = testFilesDir.GetTestPath("AddFasta_Out.sky");
string fastaPath = testFilesDir.GetTestPath("sample.fasta");
string output = RunCommand("--in=" + docPath,
"--import-fasta=" + fastaPath,
"--keep-empty-proteins",
"--out=" + outPath);
SrmDocument doc = ResultsUtil.DeserializeDocument(outPath);
Assert.IsFalse(output.Contains(Resources.CommandLineTest_ConsoleAddFastaTest_Error));
Assert.IsFalse(output.Contains(Resources.CommandLineTest_ConsoleAddFastaTest_Warning));
// Before import, there are 2 peptides. 3 peptides after
AssertEx.IsDocumentState(doc, 0, 3, 7, 7, 49);
// Test without keep empty proteins
output = RunCommand("--in=" + docPath,
"--import-fasta=" + fastaPath,
"--out=" + outPath);
doc = ResultsUtil.DeserializeDocument(outPath);
Assert.IsFalse(output.Contains(Resources.CommandLineTest_ConsoleAddFastaTest_Error));
Assert.IsFalse(output.Contains(Resources.CommandLineTest_ConsoleAddFastaTest_Warning));
AssertEx.IsDocumentState(doc, 0, 2, 7, 7, 49);
}
[TestMethod]
public void ConsoleReportExportTest()
{
var testFilesDir = new TestFilesDir(TestContext, ZIP_FILE);
string docPath = testFilesDir.GetTestPath("BSA_Protea_label_free_20100323_meth3_multi.sky");
string outPath = testFilesDir.GetTestPath("Exported_test_report.csv");
// Import the first RAW file (or mzML for international)
string rawPath = testFilesDir.GetTestPath("ah_20101011y_BSA_MS-MS_only_5-2" +
ExtensionTestContext.ExtThermoRaw);
const string replicate = "Single";
//Before generating this report, check that it exists
string reportName = Resources.ReportSpecList_GetDefaults_Peptide_Ratio_Results;
Settings.Default.PersistedViews.ResetDefaults();
Assert.IsNotNull(Settings.Default.PersistedViews.GetViewSpecList(PersistedViews.MainGroup.Id)
.GetView(Resources.ReportSpecList_GetDefaults_Peptide_Ratio_Results));
//First, programmatically generate the report
SrmDocument doc = ResultsUtil.DeserializeDocument(docPath);
//Attach replicate
var commandLine = new CommandLine();
using (var docContainer = new ResultsTestDocumentContainer(doc, docPath))
{
commandLine.ImportResults(docContainer, replicate, MsDataFileUri.Parse(rawPath), null);
docContainer.WaitForComplete();
docContainer.AssertComplete(); // No errors
doc = docContainer.Document;
}
MemoryDocumentContainer memoryDocumentContainer = new MemoryDocumentContainer();
Assert.IsTrue(memoryDocumentContainer.SetDocument(doc, memoryDocumentContainer.Document));
SkylineDataSchema skylineDataSchema = new SkylineDataSchema(memoryDocumentContainer, SkylineDataSchema.GetLocalizedSchemaLocalizer());
DocumentGridViewContext viewContext = new DocumentGridViewContext(skylineDataSchema);
ViewInfo viewInfo = viewContext.GetViewInfo(PersistedViews.MainGroup.Id.ViewName(reportName));
StringWriter writer = new StringWriter();
IProgressStatus status = new ProgressStatus("Exporting report");
viewContext.Export(CancellationToken.None, null, ref status, viewInfo, writer, viewContext.GetCsvWriter());
var programmaticReport = writer.ToString();
RunCommand("--in=" + docPath,
"--import-file=" + rawPath,
"--import-replicate-name=" + replicate,
"--report-name=" + reportName,
"--report-format=CSV",
"--report-file=" + outPath);
string reportLines = File.ReadAllText(outPath);
AssertEx.NoDiff(reportLines, programmaticReport);
}
[TestMethod]
public void ConsoleChromatogramExportTest()
{
var testFilesDir = new TestFilesDir(TestContext, ZIP_FILE);
string docPath = testFilesDir.GetTestPath("BSA_Protea_label_free_20100323_meth3_multi.sky");
string outPath = testFilesDir.GetTestPath("Exported_chromatograms.csv");
// Import the first RAW file (or mzML for international)
string rawFile = "ah_20101011y_BSA_MS-MS_only_5-2" + ExtensionTestContext.ExtThermoRaw;
string rawPath = testFilesDir.GetTestPath(rawFile);
const string replicate = "Single";
//Attach replicate
SrmDocument doc = ResultsUtil.DeserializeDocument(docPath);
var commandLine = new CommandLine();
using (var docContainer = new ResultsTestDocumentContainer(doc, docPath))
{
commandLine.ImportResults(docContainer, replicate, MsDataFileUri.Parse(rawPath), null);
docContainer.WaitForComplete();
docContainer.AssertComplete(); // No errors
doc = docContainer.Document;
}
//First, programmatically generate the report
var chromFiles = new[] { rawFile };
var chromExporter = new ChromatogramExporter(doc);
var chromExtractors = new[] { ChromExtractor.summed, ChromExtractor.base_peak };
var chromSources = new[] { ChromSource.ms1, ChromSource.fragment };
var chromBuffer = new StringBuilder();
using (var chromWriter = new StringWriter(chromBuffer))
{
chromExporter.Export(chromWriter, null, chromFiles, LocalizationHelper.CurrentCulture, chromExtractors,
chromSources);
}
CollectionUtil.ForEach(doc.Settings.MeasuredResults.ReadStreams, s => s.CloseStream());
string programmaticReport = chromBuffer.ToString();
RunCommand("--in=" + docPath,
"--import-file=" + rawPath,
"--import-replicate-name=" + replicate,
"--chromatogram-file=" + outPath,
"--chromatogram-precursors",
"--chromatogram-products",
"--chromatogram-base-peaks",
"--chromatogram-tics");
string chromLines = File.ReadAllText(outPath);
AssertEx.NoDiff(chromLines, programmaticReport);
}
[TestMethod]
public void ConsoleAddDecoysTest()
{
var testFilesDir = new TestFilesDir(TestContext, ZIP_FILE);
string docPath = testFilesDir.GetTestPath("BSA_Protea_label_free_20100323_meth3_multi.sky");
string outPath = testFilesDir.GetTestPath("DecoysAdded.sky");
string output = RunCommand("--in=" + docPath,
"--decoys-add",
"--out=" + outPath);
const int expectedPeptides = 7;
AssertEx.Contains(output, string.Format(Resources.CommandLine_AddDecoys_Added__0__decoy_peptides_using___1___method,
expectedPeptides, DecoyGeneration.REVERSE_SEQUENCE));
output = RunCommand("--in=" + docPath,
"--decoys-add=" + CommandArgs.ARG_DECOYS_ADD_VALUE_REVERSE);
AssertEx.Contains(output, string.Format(Resources.CommandLine_AddDecoys_Added__0__decoy_peptides_using___1___method,
expectedPeptides, DecoyGeneration.REVERSE_SEQUENCE));
output = RunCommand("--in=" + docPath,
"--decoys-add=" + CommandArgs.ARG_DECOYS_ADD_VALUE_SHUFFLE);
AssertEx.Contains(output, string.Format(Resources.CommandLine_AddDecoys_Added__0__decoy_peptides_using___1___method,
expectedPeptides, DecoyGeneration.SHUFFLE_SEQUENCE));
const string badDecoyMethod = "shift";
output = RunCommand("--in=" + docPath,
"--decoys-add=" + badDecoyMethod);
AssertEx.Contains(output, string.Format(Resources.CommandArgs_ParseArgsInternal_Error__Invalid_value___0___for__1___use___2___or___3___, badDecoyMethod,
CommandArgs.ArgText(CommandArgs.ARG_DECOYS_ADD), CommandArgs.ARG_DECOYS_ADD_VALUE_REVERSE, CommandArgs.ARG_DECOYS_ADD_VALUE_SHUFFLE));
output = RunCommand("--in=" + outPath,
"--decoys-add");
AssertEx.Contains(output, Resources.CommandLine_AddDecoys_Error__Attempting_to_add_decoys_to_document_with_decoys_);
int tooManyPeptides = expectedPeptides + 1;
output = RunCommand("--in=" + docPath,
"--decoys-add",
"--decoys-add-count=" + tooManyPeptides);
AssertEx.Contains(output, string.Format(Resources.CommandLine_AddDecoys_Error_The_number_of_peptides,
tooManyPeptides, 7, CommandArgs.ArgText(CommandArgs.ARG_DECOYS_ADD), CommandArgs.ARG_DECOYS_ADD_VALUE_SHUFFLE));
const int expectFewerPeptides = 4;
output = RunCommand("--in=" + docPath,
"--decoys-add",
"--decoys-add-count=" + expectFewerPeptides);
AssertEx.Contains(output, string.Format(Resources.CommandLine_AddDecoys_Added__0__decoy_peptides_using___1___method,
expectFewerPeptides, DecoyGeneration.REVERSE_SEQUENCE));
}
[TestMethod]
public void ConsoleMassListTest()
{
var testFilesDir = new TestFilesDir(TestContext, ZIP_FILE);
string docPath = testFilesDir.GetTestPath("BSA_Protea_label_free_20100323_meth3_multi.sky");
var doc = ResultsUtil.DeserializeDocument(docPath);
// Import the first RAW file (or mzML for international)
string rawPath = testFilesDir.GetTestPath("ah_20101011y_BSA_MS-MS_only_5-2" +
ExtensionTestContext.ExtThermoRaw);
/////////////////////////
// Thermo test
string thermoPath = testFilesDir.GetTestPath("Thermo_test.csv");
string output = RunCommand("--in=" + docPath,
"--import-file=" + rawPath,
"--exp-translist-instrument=" + ExportInstrumentType.THERMO,
"--exp-file=" + thermoPath);
CheckRunCommandOutputContains(string.Format(Resources.CommandLine_ExportInstrumentFile_List__0__exported_successfully_, "Thermo_test.csv"), output);
Assert.IsTrue(File.Exists(thermoPath));
Assert.AreEqual(doc.MoleculeTransitionCount, File.ReadAllLines(thermoPath).Length);
/////////////////////////
// Agilent test
string agilentPath = testFilesDir.GetTestPath("Agilent_test.csv");
output = RunCommand("--in=" + docPath,
"--import-file=" + rawPath,
"--exp-translist-instrument=" + ExportInstrumentType.AGILENT,
"--exp-file=" + agilentPath,
"--exp-dwell-time=20");
//check for success
CheckRunCommandOutputContains(string.Format(Resources.CommandLine_ExportInstrumentFile_List__0__exported_successfully_, "Agilent_test.csv"), output);
Assert.IsTrue(File.Exists(agilentPath));
Assert.AreEqual(doc.MoleculeTransitionCount + 1, File.ReadAllLines(agilentPath).Length);
/////////////////////////
// AB Sciex test
string sciexPath = testFilesDir.GetTestPath("AB_Sciex_test.csv");
output = RunCommand("--in=" + docPath,
"--import-file=" + rawPath,
"--exp-translist-instrument=" + ExportInstrumentType.ABI,
"--exp-file=" + sciexPath,
"--exp-dwell-time=20");
//check for success
CheckRunCommandOutputContains(string.Format(Resources.CommandLine_ExportInstrumentFile_List__0__exported_successfully_, "AB_Sciex_test.csv"), output);
Assert.IsTrue(File.Exists(sciexPath));
Assert.AreEqual(doc.MoleculeTransitionCount, File.ReadAllLines(sciexPath).Length);
/////////////////////////
// Waters test
string watersPath = testFilesDir.GetTestPath("Waters_test.csv");
var cmd = new[] {
"--in=" + docPath,
"--exp-translist-instrument=" + ExportInstrumentType.WATERS,
"--exp-file=" + watersPath,
"--exp-run-length=100"
};
output = RunCommand(cmd);
//check for success
CheckRunCommandOutputContains(string.Format(Resources.CommandLine_ExportInstrumentFile_List__0__exported_successfully_, "Waters_test.csv"), output);
Assert.IsTrue(File.Exists(watersPath));
Assert.AreEqual(doc.MoleculeTransitionCount + 1, File.ReadAllLines(watersPath).Length);
// Run it again as a mixed polarity document
MixedPolarityTest(doc, testFilesDir, docPath, watersPath, cmd, false, false);
}
private static void MixedPolarityTest(SrmDocument doc, TestFilesDir testFilesDir, string inPath, string outPath, string[]cmds,
bool precursorsOnly, bool isMethod)
{
var refine = new RefinementSettings();
var docMixed = refine.ConvertToSmallMolecules(doc, testFilesDir.FullPath, RefinementSettings.ConvertToSmallMoleculesMode.formulas,
RefinementSettings.ConvertToSmallMoleculesChargesMode.invert_some); // Convert every other transition group to negative charge
var xml = string.Empty;
AssertEx.RoundTrip(docMixed, ref xml);
var skyExt = Path.GetExtension(inPath) ?? string.Empty;
var docPathMixed = inPath.Replace(skyExt, "_mixed_polarity"+skyExt);
File.WriteAllText(docPathMixed, xml);
var ext = Path.GetExtension(outPath)??string.Empty;
foreach (var polarityFilter in Helpers.GetEnumValues<ExportPolarity>().Reverse())
{
var outname = "polarity_test_" + polarityFilter + ext;
var outPathMixed = testFilesDir.GetTestPath(outname);
var args = new List<string>(cmds.Select(c => c.Replace(inPath, docPathMixed).Replace(outPath, outPathMixed))) { "--exp-polarity=" + polarityFilter };
var output = RunCommand(args.ToArray());
if (polarityFilter == ExportPolarity.separate && !isMethod)
{
outname = outname.Replace(ext, "*" + ext); // Will create multiple files
}
CheckRunCommandOutputContains(
string.Format(isMethod ?
Resources.CommandLine_ExportInstrumentFile_Method__0__exported_successfully_ :
Resources.CommandLine_ExportInstrumentFile_List__0__exported_successfully_, outname), output);
if (polarityFilter == ExportPolarity.separate)
{
PolarityFilterCheck(docMixed, outPathMixed, ExportPolarity.negative, ExportPolarity.separate, precursorsOnly, isMethod);
PolarityFilterCheck(docMixed, outPathMixed, ExportPolarity.positive, ExportPolarity.separate, precursorsOnly, isMethod);
}
else
{
PolarityFilterCheck(docMixed, outPathMixed, polarityFilter, polarityFilter, precursorsOnly, isMethod);
}
}
}
private static void PolarityFilterCheck(SrmDocument docMixed, string path, ExportPolarity polarityFilter, ExportPolarity mode, bool precursorsOnly, bool isMethod)
{
var expected = 0;
var nPositive = precursorsOnly
? docMixed.MoleculeTransitionGroups.Count(t => t.TransitionGroup.PrecursorCharge > 0)
: docMixed.MoleculeTransitions.Count(t => t.Transition.Charge > 0);
var nNegative = precursorsOnly
? docMixed.MoleculeTransitionGroups.Count(t => t.TransitionGroup.PrecursorCharge < 0)
: docMixed.MoleculeTransitions.Count(t => t.Transition.Charge < 0);
if (polarityFilter != ExportPolarity.positive)
{
expected += nNegative;
}
if (polarityFilter != ExportPolarity.negative)
{
expected += nPositive;
}
var ext = Path.GetExtension(path) ?? string.Empty;
if (mode == ExportPolarity.separate)
{
// Expect a pair of files
path = path.Replace(ext, string.Format("_{0}_{1:0000}{2}", ExportPolarity.negative, 1, ext));
if (isMethod)
{
Assert.IsTrue(Directory.Exists(path));
}
else
{
Assert.IsTrue(File.Exists(path));
Assert.AreEqual(nNegative + 1, File.ReadAllLines(path).Length, polarityFilter.ToString());
}
path = path.Replace(ExportPolarity.negative.ToString(), ExportPolarity.positive.ToString());
expected = nPositive;
}
else if (isMethod)
{
path = path.Replace(ext, "_0001"+ext);
}
if (isMethod)
{
Assert.IsTrue(Directory.Exists(path));
}
else
{
Assert.IsTrue(File.Exists(path));
Assert.AreEqual(expected + 1, File.ReadAllLines(path).Count(l => !string.IsNullOrEmpty(l)), polarityFilter.ToString());
}
}
[TestMethod]
public void ConsoleMethodTest()
{
//Here I'll only test Agilent for now
var commandFilesDir = new TestFilesDir(TestContext, COMMAND_FILE);
/////////////////////////
// Thermo test
// var testFilesDir = new TestFilesDir(TestContext, ZIP_FILE);
// string docPath = testFilesDir.GetTestPath("BSA_Protea_label_free_20100323_meth3_multi.sky");
// string thermoTemplate = commandFilesDir.GetTestPath("20100329_Protea_Peptide_targeted.meth");
// string thermoOut = commandFilesDir.GetTestPath("Thermo_test.meth");
// output = RunCommand("--in=" + docPath,
// "--import-file=" + rawPath,
// "--exp-method-instrument=Thermo LTQ",
// "--exp-template=" + thermoTemplate,
// "--exp-file=" + thermoOut,
// "--exp-strategy=buckets",
// "--exp-max-trans=130",
// "--exp-optimizing=ce",
// "--exp-full-scans");
//
// check for success
// CheckRunCommandOutputContains("successfully.", output);
/////////////////////////
// Agilent test
string docPath2 = commandFilesDir.GetTestPath("WormUnrefined.sky");
string agilentTemplate = commandFilesDir.GetTestPath("43mm-40nL-30min-opt.m");
string agilentOut = commandFilesDir.GetTestPath("Agilent_test.m");
// Try this a few times, because Agilent method building seems to fail under stress
// about 10% of the time.
bool success = false;
string output = "";
for (int i = 0; !success && i < 3; i++)
{
var cmd = new[] {"--in=" + docPath2,
"--exp-method-instrument=Agilent 6400 Series",
"--exp-template=" + agilentTemplate,
"--exp-file=" + agilentOut,
"--exp-dwell-time=20",
"--exp-strategy=buckets",
"--exp-max-trans=75"};
output = RunCommand(cmd);
//check for success
success = output.Contains(string.Format(Resources.CommandLine_ExportInstrumentFile_Method__0__exported_successfully_, "Agilent_test.m"));
// Relax a bit if things aren't going well.
if (!success)
Thread.Sleep(5000);
else
{
try
{
// Run it again as a mixed polarity document
var doc = ResultsUtil.DeserializeDocument(docPath2);
MixedPolarityTest(doc, commandFilesDir, docPath2, agilentOut, cmd, false, true);
}
catch (Exception)
{
success = false; // Allow for retries
}
}
}
if (!success)
{
// ReSharper disable LocalizableElement
Console.WriteLine("Failed to write Agilent method: {0}", output); // Not L10N
// ReSharper restore LocalizableElement
Assert.IsTrue(success);
}
}
[TestMethod]
public void ConsoleExportTrigger()
{
// The special mode for exercising non-proteomic molecules just doesn't make sense with this test
TestSmallMolecules = false;
var testFilesDir = new TestFilesDir(TestContext, ZIP_FILE);
string docPath = testFilesDir.GetTestPath("BSA_Protea_label_free_20100323_meth3_multi.sky");
string failurePath = testFilesDir.GetTestPath("Failure_test.csv");
string output = RunCommand("--in=" + docPath,
"--exp-translist-instrument=" + ExportInstrumentType.WATERS,
"--exp-file=" + failurePath,
"--exp-strategy=single",
"--exp-method-type=triggered",
"--exp-primary-count=x");
//check for warning and error
Assert.AreEqual(2, CountInstances(Resources.CommandLineTest_ConsoleAddFastaTest_Warning, output)); // exp-primary-count and CE not Waters
CheckRunCommandOutputContains(Resources.CommandLineTest_ConsoleAddFastaTest_Error, output); // Waters
Assert.AreEqual(2, CountInstances(ExportInstrumentType.WATERS, output));
var commandFilesDir = new TestFilesDir(TestContext, COMMAND_FILE);
string thermoTemplate = commandFilesDir.GetTestPath("20100329_Protea_Peptide_targeted.meth");
output = RunCommand("--in=" + docPath,
"--exp-method-instrument=" + ExportInstrumentType.THERMO_TSQ,
"--exp-template=" + thermoTemplate,
"--exp-file=" + failurePath,
"--exp-strategy=single",
"--exp-method-type=triggered");
Assert.IsTrue(output.Contains(Resources.CommandLineTest_ConsoleAddFastaTest_Error)); // Thermo TSQ method
Assert.IsFalse(output.Contains(Resources.CommandLineTest_ConsoleAddFastaTest_Warning));
Assert.AreEqual(2, CountInstances(ExportInstrumentType.THERMO, output)); // Thermo and Thermo TSQ
Assert.AreEqual(1, CountInstances(ExportInstrumentType.THERMO_TSQ, output));
output = RunCommand("--in=" + docPath,
"--exp-translist-instrument=" + ExportInstrumentType.AGILENT,
"--exp-file=" + failurePath,
"--exp-strategy=single",
"--exp-method-type=triggered");
Assert.AreEqual(1, CountInstances(Resources.CommandLineTest_ConsoleAddFastaTest_Warning, output)); // exp-primary-count and CE not Agilent
Assert.AreEqual(1, CountInstances(ExportInstrumentType.AGILENT, output)); // CE not Agilent
Assert.IsTrue(output.Contains(Resources.CommandLine_ExportInstrumentFile_Error__triggered_acquistion_requires_a_spectral_library_or_imported_results_in_order_to_rank_transitions_)); // No library and no data
// Successful export to Agilent transtion list
string triggerPath = testFilesDir.GetTestPath("BSA_Protea_label_free_20100323_meth3_multi_triggered.sky");
string rawPath = testFilesDir.GetTestPath("ah_20101011y_BSA_MS-MS_only_5-2" +
ExtensionTestContext.ExtThermoRaw);
const string replicate = "Single";
string agilentTriggeredPath = testFilesDir.GetTestPath("AgilentTriggered.csv");
output = RunCommand("--in=" + docPath,
"--import-file=" + rawPath,
"--import-replicate-name=" + replicate,
"--out=" + triggerPath,
"--exp-translist-instrument=" + ExportInstrumentType.AGILENT,
"--exp-file=" + agilentTriggeredPath,
"--exp-strategy=single",
"--exp-method-type=triggered");
Assert.AreEqual(1, CountInstances(Resources.CommandLineTest_ConsoleAddFastaTest_Warning, output)); // exp-primary-count and CE not Agilent
Assert.AreEqual(1, CountInstances(ExportInstrumentType.AGILENT, output)); // CE not Agilent
Assert.IsTrue(output.Contains(Resources.CommandLine_ExportInstrumentFile_Error__The_current_document_contains_peptides_without_enough_information_to_rank_transitions_for_triggered_acquisition_)); // peptides without enough information
//check for success
var doc = ResultsUtil.DeserializeDocument(triggerPath);
var ceRegression = new CollisionEnergyRegression("Agilent", new[] {new ChargeRegressionLine(2, 2, 10)});
doc = doc.ChangeSettings(doc.Settings.ChangeTransitionPrediction(
p => p.ChangeCollisionEnergy(ceRegression)));
doc = (SrmDocument) doc.RemoveChild(doc.Children[1]);
new CommandLine().SaveDocument(doc, triggerPath, Console.Out);
output = RunCommand("--in=" + triggerPath,
"--exp-translist-instrument=" + ExportInstrumentType.AGILENT,
"--exp-file=" + agilentTriggeredPath,
"--exp-strategy=single",
"--exp-method-type=triggered");
Assert.IsTrue(output.Contains(string.Format(Resources.CommandLine_ExportInstrumentFile_List__0__exported_successfully_, "AgilentTriggered.csv")));
Assert.IsFalse(output.Contains(Resources.CommandLineTest_ConsoleAddFastaTest_Error));
Assert.IsFalse(output.Contains(Resources.CommandLineTest_ConsoleAddFastaTest_Warning));
Assert.IsTrue(File.Exists(agilentTriggeredPath));
Assert.AreEqual(doc.PeptideTransitionCount + 1, File.ReadAllLines(agilentTriggeredPath).Length);
// Isolation list export
string agilentIsolationPath = testFilesDir.GetTestPath("AgilentIsolationList.csv");
var cmd = new[]
{
"--in=" + docPath,
"--exp-isolationlist-instrument=" + ExportInstrumentType.AGILENT_TOF,
"--exp-strategy=single",
"--exp-file=" + agilentIsolationPath
};
output = RunCommand(cmd);
Assert.IsTrue(output.Contains(string.Format(Resources.CommandLine_ExportInstrumentFile_List__0__exported_successfully_, "AgilentIsolationList.csv")));
Assert.IsFalse(output.Contains(Resources.CommandLineTest_ConsoleAddFastaTest_Error));
Assert.IsTrue(File.Exists(agilentIsolationPath));
doc = ResultsUtil.DeserializeDocument(docPath);
Assert.AreEqual(doc.PeptideTransitionGroupCount + 1, File.ReadAllLines(agilentIsolationPath).Length);
// Run it again as a mixed polarity document
MixedPolarityTest(doc, testFilesDir, docPath, agilentIsolationPath, cmd, true, false);
}
private static void AssertErrorCount(int expectedErrorsInOutput, string output, string failureMessage)
{
// Include not-yet-localized messages in the error count
var countErrorsLocalized = Resources.CommandLineTest_ConsoleAddFastaTest_Error.Contains("Error") ? 0 : CountInstances(Resources.CommandLineTest_ConsoleAddFastaTest_Error, output);
var countErrorsEnglish = CountInstances("Error", output);
Assert.AreEqual(expectedErrorsInOutput, countErrorsLocalized + countErrorsEnglish, failureMessage);
}
[TestMethod]
public void ConsolePathCoverage()
{
var testFilesDir = new TestFilesDir(TestContext, ZIP_FILE);
string bogusPath = testFilesDir.GetTestPath("bogus_file.sky");
string docPath = testFilesDir.GetTestPath("BSA_Protea_label_free_20100323_meth3_multi.sky");
string outPath = testFilesDir.GetTestPath("Output_file.sky");
string tsvPath = testFilesDir.GetTestPath("Exported_test_report.csv");
// The special mode for exercising non-proteomic molecules just doesn't make sense with this test
TestSmallMolecules = false;
// Import the first RAW file (or mzML for international)
string rawPath = testFilesDir.GetTestPath("ah_20101011y_BSA_MS-MS_only_5-2" +
ExtensionTestContext.ExtThermoRaw);
//Error: file does not exist
string output = RunCommand("--in=" + bogusPath);
Assert.IsTrue(output.Contains(string.Format(Resources.CommandLine_OpenSkyFile_Error__The_Skyline_file__0__does_not_exist_, bogusPath)));
//Error: no raw file
output = RunCommand("--in=" + docPath,
"--import-file=" + rawPath + "x",
"--import-replicate-name=Single");
Assert.IsTrue(output.Contains(string.Format(Resources.CommandLine_CanReadFile_Error__File_does_not_exist___0__,rawPath+"x")));
//Error: no reportfile
output = RunCommand("--in=" + docPath,
"--import-file=" + rawPath,
"--import-replicate-name=Single",
"--out=" + outPath,
"--report-format=TSV",
"--report-name=" + "Peptide Ratio Results");
Assert.IsTrue(output.Contains(Resources.CommandLine_ExportReport_));
//Error: no such report
output = RunCommand("--in=" + docPath,
"--import-file=" + rawPath,
"--report-file=" + tsvPath,
"--report-name=" + "Bogus Report");
Assert.IsTrue(output.Contains(string.Format(Resources.CommandLine_ExportReport_Error__The_report__0__does_not_exist__If_it_has_spaces_in_its_name__use__double_quotes__around_the_entire_list_of_command_parameters_,"Bogus Report")));
//Error: no --in specified with --import-file
output = RunCommand("--import-file=" + rawPath,
"--save");
Assert.IsTrue(output.Contains(Resources.CommandArgs_ParseArgsInternal_Error__Use___in_to_specify_a_Skyline_document_to_open_));
//Error: no --in specified with --report
output = RunCommand("--out=" + outPath,
"--report-file=" + tsvPath,
"--report-name=" + "Bogus Report");
Assert.IsTrue(output.Contains(Resources.CommandArgs_ParseArgsInternal_Error__Use___in_to_specify_a_Skyline_document_to_open_));
//Error: no template
output = RunCommand("--in=" + docPath,
"--exp-method-instrument=" + ExportInstrumentType.THERMO_LTQ,
"--exp-method-type=scheduled",
"--exp-strategy=single",
"--exp-file=" + testFilesDir.GetTestPath("Bogus.meth"));
Assert.IsTrue(output.Contains(Resources.CommandLine_ExportInstrumentFile_Error__A_template_file_is_required_to_export_a_method_));
Assert.IsFalse(output.Contains(Resources.CommandLine_ExportInstrumentFile_No_method_will_be_exported_));
//Error: template does not exist
output = RunCommand("--in=" + docPath,
"--exp-method-instrument=" + ExportInstrumentType.THERMO_LTQ,
"--exp-method-type=scheduled",
"--exp-strategy=single",
"--exp-file=" + testFilesDir.GetTestPath("Bogus.meth"),
"--exp-template=" + testFilesDir.GetTestPath("Bogus_template.meth"));
Assert.IsTrue(output.Contains(string.Format(Resources.CommandLine_ExportInstrumentFile_Error__The_template_file__0__does_not_exist_, testFilesDir.GetTestPath("Bogus_template.meth"))));
Assert.IsFalse(output.Contains(Resources.CommandLine_ExportInstrumentFile_No_method_will_be_exported_));
//Error: can't schedule instrument type
var commandFilesDir = new TestFilesDir(TestContext, COMMAND_FILE);
string thermoTemplate = commandFilesDir.GetTestPath("20100329_Protea_Peptide_targeted.meth");
output = RunCommand("--in=" + docPath,
"--exp-method-instrument=" + ExportInstrumentType.THERMO_LTQ,
"--exp-method-type=scheduled",
"--exp-strategy=single",
"--exp-file=" + testFilesDir.GetTestPath("Bogus.meth"),
"--exp-template=" + thermoTemplate);
Assert.IsTrue(output.Contains(string.Format(Resources.CommandLine_ExportInstrumentFile_Error__the_specified_instrument__0__is_not_compatible_with_scheduled_methods_,"Thermo LTQ")));
Assert.IsTrue(output.Contains(Resources.CommandLine_ExportInstrumentFile_No_method_will_be_exported_));
//Error: not all peptides have RT info
const string watersFilename = "Waters_test.csv";
string watersPath = testFilesDir.GetTestPath(watersFilename);
output = RunCommand("--in=" + docPath,
"--import-file=" + rawPath,
"--exp-translist-instrument=" + ExportInstrumentType.WATERS,
"--exp-file=" + watersPath,
"--exp-method-type=scheduled",
"--exp-run-length=100",
"--exp-optimizing=ce",
"--exp-strategy=protein",
"--exp-max-trans=100",
"--exp-scheduling-replicate=LAST");
Assert.IsTrue(output.Contains(Resources.CommandLine_ExportInstrumentFile_Error__to_export_a_scheduled_method__you_must_first_choose_a_retention_time_predictor_in_Peptide_Settings___Prediction__or_import_results_for_all_peptides_in_the_document_));
Assert.IsTrue(output.Contains(Resources.CommandLine_ExportInstrumentFile_No_list_will_be_exported_));
//check for success. This is merely to cover more paths
string schedulePath = testFilesDir.GetTestPath("BSA_Protea_label_free_20100323_meth3_multi_scheduled.sky");
var doc = ResultsUtil.DeserializeDocument(docPath);
doc = (SrmDocument)doc.RemoveChild(doc.Children[1]);
new CommandLine().SaveDocument(doc, schedulePath, Console.Out);
docPath = schedulePath;
output = RunCommand("--in=" + docPath,
"--import-file=" + rawPath,
"--exp-translist-instrument=" + ExportInstrumentType.WATERS,
"--exp-file=" + watersPath,
"--exp-method-type=scheduled",
"--exp-run-length=100",
"--exp-optimizing=ce",
"--exp-strategy=protein",
"--exp-max-trans=100",
"--exp-scheduling-replicate=LAST");
Assert.IsTrue(output.Contains(string.Format(Resources.CommandLine_ExportInstrumentFile_List__0__exported_successfully_, watersFilename)));
//check for success
output = RunCommand("--in=" + docPath,
"--import-file=" + rawPath,
"--import-replicate-name=Single",
"--exp-translist-instrument=" + ExportInstrumentType.WATERS,
"--exp-file=" + watersPath,
"--exp-method-type=scheduled",
"--exp-run-length=100",
"--exp-optimizing=ce",
"--exp-strategy=buckets",
"--exp-max-trans=10000000",
"--exp-scheduling-replicate=Single");
Assert.IsTrue(output.Contains(string.Format(Resources.CommandLine_ExportInstrumentFile_List__0__exported_successfully_, "Waters_test.csv")));
//Check a bunch of warnings
output = RunCommand("--in=" + docPath,
"--import-file=" + rawPath,
"--import-replicate-name=Single",
"--report-format=BOGUS",
"--exp-translist-instrument=BOGUS",
"--exp-method-instrument=BOGUS",
"--exp-strategy=BOGUS",
"--exp-max-trans=BOGUS",
"--exp-optimizing=BOGUS",
"--exp-method-type=BOGUS",
"--exp-polarity=BOGUS",
"--exp-dwell-time=1000000000", //bogus
"--exp-dwell-time=BOGUS",
"--exp-run-length=1000000000",
"--exp-run-length=BOGUS",
"--exp-translist-instrument=" + ExportInstrumentType.WATERS,
"--exp-method-instrument=" + ExportInstrumentType.THERMO_LTQ);
//1 Error for using the above 2 parameters simultaneously
Assert.IsFalse(output.Contains(Resources.CommandLineTest_ConsolePathCoverage_successfully_));
Assert.AreEqual(11, CountInstances(Resources.CommandLineTest_ConsoleAddFastaTest_Warning, output));
Assert.AreEqual(2, CountErrors(output));
//This test uses a broken Skyline file to test the InvalidDataException catch
var brokenFile = commandFilesDir.GetTestPath("Broken_file.sky");
output = RunCommand("--in=" + brokenFile);
AssertEx.Contains(output, string.Format(Resources.CommandLine_OpenSkyFile_Error__There_was_an_error_opening_the_file__0_, brokenFile));
AssertEx.Contains(output, string.Format(Resources.XmlUtil_GetInvalidDataMessage_The_file_contains_an_error_on_line__0__at_column__1__, 2, 7));
//This test uses a broken Skyline file to test the InvalidDataException catch
var invalidFile = commandFilesDir.GetTestPath("InvalidFile.sky");
output = RunCommand("--in=" + invalidFile);
AssertEx.Contains(output, string.Format(Resources.CommandLine_OpenSkyFile_Error__There_was_an_error_opening_the_file__0_, invalidFile));
AssertEx.Contains(output, string.Format(Resources.XmlUtil_GetInvalidDataMessage_The_file_contains_an_error_on_line__0__at_column__1__, 7, 8));
AssertEx.Contains(output, string.Format(Resources.DigestSettings_ValidateIntRange_The_value__1__for__0__must_be_between__2__and__3__, Resources.DigestSettings_Validate_maximum_missed_cleavages, 10, 0, 9));
//Test unexpected parameter formats
//CONSIDER: Maybe some more automatic way to keep these lists up to date.
TestMissingValueFailures(new[]
{
"in",
"out",
"import-file",
"import-replicate-name",
"import-all",
"import-naming-pattern",
"report-name",
"report-file",
"report-format",
// "exp-translist-format",
"exp-dwell-time",
"exp-run-length",
"exp-method-instrument",
"exp-template",
"exp-file",
"exp-polarity",
"exp-strategy",
"exp-method-type",
"exp-max-trans",
"exp-optimizing",
"exp-scheduling-replicate",
"tool-add",
"tool-command",
"tool-arguments",
"tool-initial-dir",
"tool-conflict-resolution",
"tool-report",
"report-add",
"report-conflict-resolution",
"batch-commands",
});
TestUnexpectedValueFailures(new[]
{
"save",
"import-append",
"exp-ignore-proteins",
"exp-add-energy-ramp",
// "exp-full-scans",
"tool-output-to-immediate-window",
"exp-polarity",
});
}
private void TestMissingValueFailures(string[] names)
{
TestNameValueFailures(names, arg => arg);
TestNameValueFailures(names, arg => string.Format("{0}=", arg));
}
private void TestUnexpectedValueFailures(IEnumerable<string> names)
{
TestNameValueFailures(names, arg => string.Format("{0}=true", arg));
}
private void TestNameValueFailures(IEnumerable<string> names, Func<string, string> getCommandLineForArg, bool allowUnlocalizedErrors = false)
{
foreach (var name in names)
{
string arg = string.Format("--{0}", name);
string output = RunCommand(getCommandLineForArg(arg));
Assert.AreEqual(1, CountErrors(output, allowUnlocalizedErrors), string.Format("No error for argument {0}", arg));
Assert.AreEqual(1, CountInstances(arg, output), string.Format("Missing expected argument {0}", arg));
}
}
private static string RunCommand(params string[] inputArgs)
{
var consoleBuffer = new StringBuilder();
var consoleOutput = new CommandStatusWriter(new StringWriter(consoleBuffer));
CommandLineRunner.RunCommand(inputArgs, consoleOutput);
return consoleBuffer.ToString();
}
// TODO: Test the case where the imported replicate has the wrong path without Lorenzo's data
//[TestMethod]
public void TestLorenzo()
{
var consoleBuffer = new StringBuilder();
var consoleOutput = new CommandStatusWriter(new StringWriter(consoleBuffer));
var testFilesDir = new TestFilesDir(TestContext, COMMAND_FILE);
string docPath = testFilesDir.GetTestPath("VantageQCSkyline.sky");
string tsvPath = testFilesDir.GetTestPath("Exported_test_report.csv");
string dataPath = testFilesDir.GetTestPath("VantageQCSkyline.skyd");
var args = new[]
{
"--in=" + docPath,
"--import-file=" + dataPath,
"--report-name=TestQCReport",
"--report-file=" + tsvPath,
"--report-format=TSV"
};
//There are no tests. This is for debugging.
CommandLineRunner.RunCommand(args, consoleOutput);
}
//[TestMethod]
public void CountInstancesTest()
{
string s = "hello,hello,hello";
Assert.AreEqual(3,CountInstances("hello",s));
s += "hi";
Assert.AreEqual(3,CountInstances("hello",s));
Assert.AreEqual(0,CountInstances("",""));
Assert.AreEqual(0,CountInstances("hi","howdy"));
}
public static int CountInstances(string search, string searchSpace)
{
if (searchSpace.Length == 0)
{
return 0;
}
int count = 0;
int lastIndex = searchSpace.IndexOf(search, StringComparison.Ordinal);
for (; !Equals(-1, lastIndex) && lastIndex + search.Length <= searchSpace.Length; count++)
{
lastIndex = searchSpace.IndexOf(search, StringComparison.Ordinal);
searchSpace = searchSpace.Substring(lastIndex + 1);
lastIndex = searchSpace.IndexOf(search, StringComparison.Ordinal);
}
return count;
}
public static int CountErrors(string searchSpace, bool allowUnlocalized = false)
{
const string enError = "Error";
string localError = Resources.CommandLineTest_ConsoleAddFastaTest_Error;
int count = CountInstances(localError, searchSpace);
if (allowUnlocalized && !Equals(localError, enError))
count += CountInstances(enError, searchSpace);
return count;
}
[TestMethod]
public void ConsoleBadRawFileImportTest()
{
// Run this test only if we can read Thermo's raw files
if(ExtensionTestContext.CanImportThermoRaw &&
ExtensionTestContext.CanImportWatersRaw)
{
const string testZipPath = @"TestA\ImportAllCmdLineTest.zip";
var testFilesDir = new TestFilesDir(TestContext, testZipPath);
// Contents:
// ImportAllCmdLineTest
// -- REP01
// -- CE_Vantage_15mTorr_0001_REP1_01.raw|mzML
// -- CE_Vantage_15mTorr_0001_REP1_02.raw|mzML
// -- REP02
// -- CE_Vantage_15mTorr_0001_REP2_01.raw|mzML
// -- CE_Vantage_15mTorr_0001_REP2_02.raw|mzML
// -- 160109_Mix1_calcurve_070.mzML
// -- 160109_Mix1_calcurve_073.mzML
// -- 160109_Mix1_calcurve_071.raw (Waters .raw directory)
// -- 160109_Mix1_calcurve_074.raw (Waters .raw directory)
// -- bad_file.raw (Should not be imported. Only in ImportAllCmdLineTest.zip)
// -- bad_file_folder
// -- bad_file.raw (Should not be imported. Only in ImportAllCmdLineTest.zip)
// -- FullScan.RAW|mzML (should not be imported)
// -- FullScan_folder
// -- FullScan.RAW|mzML (should not be imported)
var docPath = testFilesDir.GetTestPath("test.sky");
var rawPath = testFilesDir.GetTestPath("bad_file.raw");
var msg = RunCommand("--in=" + docPath,
"--import-file=" + rawPath,
"--save");
AssertEx.Contains(msg, string.Format(Resources.CommandLine_ImportResultsFile_Warning__Cannot_read_file__0____Ignoring___, rawPath));
// the document should not have changed
SrmDocument doc = ResultsUtil.DeserializeDocument(docPath);
Assert.IsFalse(doc.Settings.HasResults);
msg = RunCommand("--in=" + docPath,
"--import-all=" + testFilesDir.FullPath,
"--import-warn-on-failure",
"--save");
string expected = string.Format(Resources.CommandLine_ImportResultsFile_Warning__Cannot_read_file__0____Ignoring___, rawPath);
AssertEx.Contains(msg, expected);
doc = ResultsUtil.DeserializeDocument(docPath);
Assert.IsTrue(doc.Settings.HasResults, TextUtil.LineSeparate("No results found.", "Output:", msg));
Assert.AreEqual(6, doc.Settings.MeasuredResults.Chromatograms.Count,
string.Format("Expected 6 replicates, found: {0}",
string.Join(", ", doc.Settings.MeasuredResults.Chromatograms.Select(chromSet => chromSet.Name).ToArray())));
Assert.IsTrue(doc.Settings.MeasuredResults.ContainsChromatogram("REP01"));
Assert.IsTrue(doc.Settings.MeasuredResults.ContainsChromatogram("REP02"));
Assert.IsTrue(doc.Settings.MeasuredResults.ContainsChromatogram("160109_Mix1_calcurve_071"));
Assert.IsTrue(doc.Settings.MeasuredResults.ContainsChromatogram("160109_Mix1_calcurve_074"));
Assert.IsTrue(doc.Settings.MeasuredResults.ContainsChromatogram("160109_Mix1_calcurve_070"));
Assert.IsTrue(doc.Settings.MeasuredResults.ContainsChromatogram("160109_Mix1_calcurve_073"));
// We should not have a replicate named "bad_file"
Assert.IsFalse(doc.Settings.MeasuredResults.ContainsChromatogram("bad_file"));
// Or a replicate named "bad_file_folder"
Assert.IsFalse(doc.Settings.MeasuredResults.ContainsChromatogram("bad_file_folder"));
}
}
[TestMethod]
public void ConsoleImportNonSRMFile()
{
bool useRaw = ExtensionTestContext.CanImportThermoRaw && ExtensionTestContext.CanImportWatersRaw;
string extRaw = useRaw
? ExtensionTestContext.ExtThermoRaw
: ".mzML";
string testZipPath = useRaw
? @"TestA\ImportAllCmdLineTest.zip"
: @"TestA\ImportAllCmdLineTestMzml.zip";
var testFilesDir = new TestFilesDir(TestContext, testZipPath);
// Contents:
// ImportAllCmdLineTest
// -- REP01
// -- CE_Vantage_15mTorr_0001_REP1_01.raw|mzML
// -- CE_Vantage_15mTorr_0001_REP1_02.raw|mzML
// -- REP02
// -- CE_Vantage_15mTorr_0001_REP2_01.raw|mzML
// -- CE_Vantage_15mTorr_0001_REP2_02.raw|mzML
// -- 160109_Mix1_calcurve_070.mzML
// -- 160109_Mix1_calcurve_073.mzML
// -- 160109_Mix1_calcurve_071.raw (Waters .raw directory)
// -- 160109_Mix1_calcurve_074.raw (Waters .raw directory)
// -- bad_file.raw (Should not be imported. Only in ImportAllCmdLineTest.zip)
// -- bad_file_folder
// -- bad_file.raw (Should not be imported. Only in ImportAllCmdLineTest.zip)
// -- FullScan.RAW|mzML (should not be imported)
// -- FullScan_folder
// -- FullScan.RAW|mzML (should not be imported)
var docPath = testFilesDir.GetTestPath("test.sky");
var outPath = testFilesDir.GetTestPath("import_nonSRM_file.sky");
var rawPath = testFilesDir.GetTestPath("FullScan" + extRaw);
// Try to import FullScan.RAW|mzML
var msg = RunCommand("--in=" + docPath,
"--import-file=" + rawPath,
"--import-warn-on-failure",
"--out=" + outPath);
CheckRunCommandOutputContains(string.Format(Resources.CommandLine_ImportResultsFile_Warning__Failed_importing_the_results_file__0____Ignoring___, rawPath), msg);
// Read the saved document. FullScan.RAW|mzML should not have been imported
SrmDocument doc = ResultsUtil.DeserializeDocument(outPath);
Assert.IsFalse(doc.Settings.HasResults);
// Import all files in the directory. FullScan.RAW|mzML should not be imported
msg = RunCommand("--in=" + outPath,
"--import-all=" + testFilesDir.FullPath,
"--import-warn-on-failure",
"--save");
CheckRunCommandOutputContains(string.Format(Resources.CommandLine_ImportResultsFile_Warning__Failed_importing_the_results_file__0____Ignoring___, rawPath), msg);
doc = ResultsUtil.DeserializeDocument(outPath);
Assert.IsTrue(doc.Settings.HasResults);
Assert.AreEqual(6, doc.Settings.MeasuredResults.Chromatograms.Count,
string.Format("Expected 6 replicates, found: {0}",
string.Join(", ", doc.Settings.MeasuredResults.Chromatograms.Select(chromSet => chromSet.Name).ToArray())));
Assert.IsTrue(doc.Settings.MeasuredResults.ContainsChromatogram("REP01"));
Assert.IsTrue(doc.Settings.MeasuredResults.ContainsChromatogram("REP02"));
Assert.IsTrue(doc.Settings.MeasuredResults.ContainsChromatogram("160109_Mix1_calcurve_071"));
Assert.IsTrue(doc.Settings.MeasuredResults.ContainsChromatogram("160109_Mix1_calcurve_074"));
Assert.IsTrue(doc.Settings.MeasuredResults.ContainsChromatogram("160109_Mix1_calcurve_070"));
Assert.IsTrue(doc.Settings.MeasuredResults.ContainsChromatogram("160109_Mix1_calcurve_073"));
// We should not have a replicate named "FullScan"
Assert.IsFalse(doc.Settings.MeasuredResults.ContainsChromatogram("FullScan"));
// Or a replicate named "FullScan_folder"
Assert.IsFalse(doc.Settings.MeasuredResults.ContainsChromatogram("FullScan_folder"));
}
[TestMethod]
public void ConsoleMultiReplicateImportTest()
{
bool useRaw = ExtensionTestContext.CanImportThermoRaw && ExtensionTestContext.CanImportWatersRaw;
string testZipPath = useRaw
? @"TestA\ImportAllCmdLineTest.zip"
: @"TestA\ImportAllCmdLineTestMzml.zip";
string extRaw = useRaw
? ".raw"
: ".mzML";
var testFilesDir = new TestFilesDir(TestContext, testZipPath);
// Contents:
// ImportAllCmdLineTest
// -- REP01
// -- CE_Vantage_15mTorr_0001_REP1_01.raw|mzML
// -- CE_Vantage_15mTorr_0001_REP1_02.raw|mzML
// -- REP02
// -- CE_Vantage_15mTorr_0001_REP2_01.raw|mzML
// -- CE_Vantage_15mTorr_0001_REP2_02.raw|mzML
// -- 160109_Mix1_calcurve_070.mzML
// -- 160109_Mix1_calcurve_073.mzML
// -- 160109_Mix1_calcurve_071.raw (Waters .raw directory)
// -- 160109_Mix1_calcurve_074.raw (Waters .raw directory)
// -- bad_file.raw (Should not be imported. Only in ImportAllCmdLineTest.zip)
// -- bad_file_folder
// -- bad_file.raw (Should not be imported. Only in ImportAllCmdLineTest.zip)
// -- FullScan.RAW|mzML (should not be imported)
// -- FullScan_folder
// -- FullScan.RAW|mzML (should not be imported)
var docPath = testFilesDir.GetTestPath("test.sky");
var outPath0 = testFilesDir.GetTestPath("Imported_multiple0.sky");
FileEx.SafeDelete(outPath0);
var outPath1 = testFilesDir.GetTestPath("Imported_multiple1.sky");
FileEx.SafeDelete(outPath1);
var outPath2 = testFilesDir.GetTestPath("Imported_multiple2.sky");
FileEx.SafeDelete(outPath2);
var outPath3 = testFilesDir.GetTestPath("Imported_multiple3.sky");
FileEx.SafeDelete(outPath3);
var outPath4 = testFilesDir.GetTestPath("Imported_multiple4.sky");
FileEx.SafeDelete(outPath4);
var rawPath = new MsDataFilePath(testFilesDir.GetTestPath(@"REP01\CE_Vantage_15mTorr_0001_REP1_01" + extRaw));
// Test: Cannot use --import-file and --import-all options simultaneously
var msg = RunCommand("--in=" + docPath,
"--import-file=" + rawPath.FilePath,
"--import-replicate-name=Unscheduled01",
"--import-all=" + testFilesDir.FullPath,
"--out=" + outPath1);
Assert.IsTrue(msg.Contains(Resources.CommandArgs_ParseArgsInternal_Error____import_file_and___import_all_options_cannot_be_used_simultaneously_), msg);
// output file should not exist
Assert.IsFalse(File.Exists(outPath1));
// Test: Use --import-replicate-name with --import-all for single-replicate, multi-file import
const string singleName = "Unscheduled01";
msg = RunCommand("--in=" + docPath,
"--import-replicate-name=" + singleName,
"--import-all=" + testFilesDir.GetTestPath("REP01"),
"--out=" + outPath0);
// Used to give this error
// Assert.IsTrue(msg.Contains(Resources.CommandArgs_ParseArgsInternal_Error____import_replicate_name_cannot_be_used_with_the___import_all_option_), msg);
// // output file should not exist
Assert.IsTrue(File.Exists(outPath0), msg);
SrmDocument doc0 = ResultsUtil.DeserializeDocument(outPath0);
Assert.AreEqual(1, doc0.Settings.MeasuredResults.Chromatograms.Count);
Assert.IsTrue(doc0.Settings.MeasuredResults.ContainsChromatogram(singleName));
Assert.AreEqual(2, doc0.Settings.MeasuredResults.Chromatograms[0].MSDataFileInfos.Count);
// Test: Cannot use --import-naming-pattern with --import-file
msg = RunCommand("--in=" + docPath,
"--import-file=" + rawPath.FilePath,
"--import-naming-pattern=prefix_(.*)",
"--out=" + outPath1);
Assert.IsTrue(msg.Contains(Resources.CommandArgs_ParseArgsInternal_Error____import_naming_pattern_cannot_be_used_with_the___import_file_option_), msg);
// output file should not exist
Assert.IsFalse(File.Exists(outPath1));
// Test: invalid regular expression (1)
msg = RunCommand("--in=" + docPath,
"--import-all=" + testFilesDir.FullPath,
"--import-naming-pattern=A",
"--out=" + outPath1);
// output file should not exist
Assert.IsFalse(File.Exists(outPath1));
Assert.IsTrue(msg.Contains(string.Format(Resources.CommandArgs_ParseArgsInternal_Error__Regular_expression___0___does_not_have_any_groups___String, "A")), msg);
// Test: invalid regular expression (2)
msg = RunCommand("--in=" + docPath,
"--import-all=" + testFilesDir.FullPath,
"--import-naming-pattern=invalid",
"--out=" + outPath1);
// output file should not exist
Assert.IsTrue(!File.Exists(outPath1));
Assert.IsTrue(msg.Contains(string.Format(Resources.CommandArgs_ParseArgsInternal_Error__Regular_expression___0___does_not_have_any_groups___String, "invalid")), msg);
// Test: Import files in the "REP01" directory;
// Use a naming pattern that will cause the replicate names of the two files to be the same
msg = RunCommand("--in=" + docPath,
"--import-all=" + testFilesDir.GetTestPath("REP01"),
"--import-naming-pattern=.*_(REP[0-9]+)_(.+)",
"--out=" + outPath1);
Assert.IsFalse(File.Exists(outPath1));
Assert.IsTrue(msg.Contains(string.Format(Resources.CommandLine_ApplyNamingPattern_Error__Duplicate_replicate_name___0___after_applying_regular_expression_,"REP1")), msg);
// Test: Import files in the "REP01" directory; Use a naming pattern
msg = RunCommand("--in=" + docPath,
"--import-all=" + testFilesDir.GetTestPath("REP01"),
"--import-naming-pattern=.*_([0-9]+)",
"--out=" + outPath1);
Assert.IsTrue(File.Exists(outPath1), msg);
SrmDocument doc = ResultsUtil.DeserializeDocument(outPath1);
Assert.AreEqual(2, doc.Settings.MeasuredResults.Chromatograms.Count);
Assert.IsTrue(doc.Settings.MeasuredResults.ContainsChromatogram("01"));
Assert.IsTrue(doc.Settings.MeasuredResults.ContainsChromatogram("02"));
Assert.IsFalse(File.Exists(outPath2));
// Test: Import a single file
// Import REP01\CE_Vantage_15mTorr_0001_REP1_01.raw;
// Use replicate name "REP01"
msg = RunCommand("--in=" + docPath,
"--import-file=" + rawPath.FilePath,
"--import-replicate-name=REP01",
"--out=" + outPath2);
Assert.IsTrue(File.Exists(outPath2), msg);
doc = ResultsUtil.DeserializeDocument(outPath2);
Assert.AreEqual(1, doc.Settings.MeasuredResults.Chromatograms.Count);
int initialFileCount = 0;
foreach (var chromatogram in doc.Settings.MeasuredResults.Chromatograms)
{
initialFileCount += chromatogram.MSDataFilePaths.Count();
}
// Import another single file.
var rawPath2 = MsDataFileUri.Parse(testFilesDir.GetTestPath("160109_Mix1_calcurve_070.mzML"));
msg = RunCommand("--in=" + outPath2,
"--import-file=" + rawPath2.GetFilePath(),
"--import-replicate-name=160109_Mix1_calcurve_070",
"--save");
doc = ResultsUtil.DeserializeDocument(outPath2);
Assert.AreEqual(2, doc.Settings.MeasuredResults.Chromatograms.Count, msg);
ChromatogramSet chromatSet;
int idx;
doc.Settings.MeasuredResults.TryGetChromatogramSet("160109_Mix1_calcurve_070", out chromatSet, out idx);
Assert.IsNotNull(chromatSet, msg);
Assert.IsTrue(chromatSet.MSDataFilePaths.Contains(rawPath2));
// Test: Import all files and sub-folders in test directory
// The document should already contain a replicate named "REP01".
// Only one more file should be added to the "REP01" replicate.
// The document should also already contain replicate "160109_Mix1_calcurve_070".
// There should be notes about ignoring the two files that are already in the document.
msg = RunCommand("--in=" + outPath2,
"--import-all=" + testFilesDir.FullPath,
"--import-warn-on-failure",
"--save");
// ExtensionTestContext.ExtThermo raw uses different case from file on disk
// which happens to make a good test case.
MsDataFilePath rawPathDisk = GetThermoDiskPath(rawPath);
// These messages are due to files that were already in the document.
Assert.IsTrue(msg.Contains(string.Format(Resources.CommandLine_RemoveImportedFiles__0______1___Note__The_file_has_already_been_imported__Ignoring___, "REP01", rawPathDisk)), msg);
Assert.IsTrue(msg.Contains(string.Format(Resources.CommandLine_RemoveImportedFiles__0______1___Note__The_file_has_already_been_imported__Ignoring___, "160109_Mix1_calcurve_070", rawPath2)), msg);
// Assert.IsTrue(msg.Contains(string.Format("160109_Mix1_calcurve_070 -> {0}",rawPath2)), msg);
doc = ResultsUtil.DeserializeDocument(outPath2);
Assert.IsTrue(doc.Settings.HasResults);
Assert.AreEqual(6, doc.Settings.MeasuredResults.Chromatograms.Count,
string.Format("Expected 6 replicates, found: {0}",
string.Join(", ", doc.Settings.MeasuredResults.Chromatograms.Select(chromSet => chromSet.Name).ToArray())));
// count the number of files imported into the document
int totalImportedFiles = 0;
foreach (var chromatogram in doc.Settings.MeasuredResults.Chromatograms)
{
totalImportedFiles += chromatogram.MSDataFilePaths.Count();
}
// We should have imported 7 more file
Assert.AreEqual(initialFileCount + 7, totalImportedFiles);
// In the "REP01" replicate we should have 2 files
ChromatogramSet chromatogramSet;
int index;
doc.Settings.MeasuredResults.TryGetChromatogramSet("REP01", out chromatogramSet, out index);
Assert.IsNotNull(chromatogramSet);
Assert.IsTrue(chromatogramSet.MSDataFilePaths.Count() == 2);
Assert.IsTrue(chromatogramSet.MSDataFilePaths.Contains(rawPath));
Assert.IsTrue(chromatogramSet.MSDataFilePaths.Contains(
new MsDataFilePath(testFilesDir.GetTestPath(@"REP01\CE_Vantage_15mTorr_0001_REP1_01" +
extRaw))));
Assert.IsTrue(!useRaw || chromatogramSet.MSDataFilePaths.Contains(
GetThermoDiskPath(new MsDataFilePath(testFilesDir.GetTestPath(@"REP01\CE_Vantage_15mTorr_0001_REP1_02" + extRaw)))));
Assert.IsFalse(File.Exists(outPath3));
// Test: Import a single file
// Import 160109_Mix1_calcurve_074.raw;
// Use replicate name "REP01"
var rawPath3 = testFilesDir.GetTestPath("160109_Mix1_calcurve_074" + extRaw);
msg = RunCommand("--in=" + docPath,
"--import-file=" + rawPath3,
"--import-replicate-name=REP01",
"--out=" + outPath3);
Assert.IsTrue(File.Exists(outPath3), msg);
doc = ResultsUtil.DeserializeDocument(outPath3);
Assert.AreEqual(1, doc.Settings.MeasuredResults.Chromatograms.Count);
// Now import all files and sub-folders in test directory.
// This should return an error since the replicate "REP01" that already
// exists in the document has an unexpected file: '160109_Mix1_calcurve_074.raw'.
msg = RunCommand("--in=" + outPath3,
"--import-all=" + testFilesDir.FullPath,
"--save");
Assert.IsTrue(
msg.Contains(
string.Format(
Resources.CommandLine_CheckReplicateFiles_Error__Replicate__0__in_the_document_has_an_unexpected_file__1__,"REP01",
rawPath3)), msg);
// Test: Import non-recursive
// Make sure only files directly in the folder get imported
string badFilePath = testFilesDir.GetTestPath("bad_file" + extRaw);
string badFileMoved = badFilePath + ".save";
if (File.Exists(badFilePath))
File.Move(badFilePath, badFileMoved);
string fullScanPath = testFilesDir.GetTestPath("FullScan" + extRaw);
string fullScanMoved = fullScanPath + ".save";
File.Move(fullScanPath, fullScanMoved);
msg = RunCommand("--in=" + docPath,
"--import-all-files=" + testFilesDir.FullPath,
"--out=" + outPath4);
Assert.IsTrue(File.Exists(outPath4), msg);
doc = ResultsUtil.DeserializeDocument(outPath4);
Assert.IsTrue(doc.Settings.HasResults);
Assert.AreEqual(4, doc.Settings.MeasuredResults.Chromatograms.Count,
string.Format("Expected 4 replicates from files, found: {0}",
string.Join(", ", doc.Settings.MeasuredResults.Chromatograms.Select(chromSet => chromSet.Name).ToArray())));
if (File.Exists(badFileMoved))
File.Move(badFileMoved, badFilePath);
File.Move(fullScanMoved, fullScanPath);
}
//[TestMethod]
// TODO: Uncomment this test when it can clean up before/after itself
public void ConsolePanoramaImportTest()
{
bool useRaw = ExtensionTestContext.CanImportThermoRaw && ExtensionTestContext.CanImportWatersRaw;
string testZipPath = useRaw
? @"TestA\ImportAllCmdLineTest.zip"
: @"TestA\ImportAllCmdLineTestMzml.zip";
string extRaw = useRaw
? ".raw"
: ".mzML";
var testFilesDir = new TestFilesDir(TestContext, testZipPath);
// Contents:
// ImportAllCmdLineTest
// -- REP01
// -- CE_Vantage_15mTorr_0001_REP1_01.raw|mzML
// -- CE_Vantage_15mTorr_0001_REP1_02.raw|mzML
// -- REP02
// -- CE_Vantage_15mTorr_0001_REP2_01.raw|mzML
// -- CE_Vantage_15mTorr_0001_REP2_02.raw|mzML
// -- 160109_Mix1_calcurve_070.mzML
// -- 160109_Mix1_calcurve_073.mzML
// -- 160109_Mix1_calcurve_071.raw (Waters .raw directory)
// -- 160109_Mix1_calcurve_074.raw (Waters .raw directory)
// -- bad_file.raw (Should not be imported. Only in ImportAllCmdLineTest.zip)
// -- bad_file_folder
// -- bad_file.raw (Should not be imported. Only in ImportAllCmdLineTest.zip)
// -- FullScan.RAW|mzML (should not be imported)
// -- FullScan_folder
// -- FullScan.RAW|mzML (should not be imported)
var docPath = testFilesDir.GetTestPath("test.sky");
// Test: Import a file to an empty document and upload to the panorama server
var rawPath = new MsDataFilePath(testFilesDir.GetTestPath(@"REP01\CE_Vantage_15mTorr_0001_REP1_01" + extRaw));
var msg = RunCommand("--in=" + docPath,
"--import-file=" + rawPath.FilePath,
//"--import-on-or-after=1/1/2014",
"--save",
"--panorama-server=https://panoramaweb.org",
"--panorama-folder=/MacCoss/SkylineUploadTest/",
"--panorama-username=skylinetest@proteinms.net",
"--panorama-password=skylinetest");
SrmDocument doc = ResultsUtil.DeserializeDocument(docPath);
Assert.AreEqual(1, doc.Settings.MeasuredResults.Chromatograms.Count);
Assert.IsFalse(msg.Contains("Skipping Panorama import."), msg);
// Test: Import a second file and upload to the panorama server
rawPath = new MsDataFilePath(testFilesDir.GetTestPath(@"REP01\CE_Vantage_15mTorr_0001_REP1_02" + extRaw));
msg = RunCommand("--in=" + docPath,
"--import-file=" + rawPath.FilePath,
"--save",
"--panorama-server=https://panoramaweb.org",
"--panorama-folder=/MacCoss/SkylineUploadTest/",
"--panorama-username=skylinetest@proteinms.net",
"--panorama-password=skylinetest");
doc = ResultsUtil.DeserializeDocument(docPath);
Assert.AreEqual(2, doc.Settings.MeasuredResults.Chromatograms.Count);
Assert.IsFalse(msg.Contains("Skipping Panorama import."), msg);
}
[TestMethod]
public void ConsoleAddToolTest()
{
// Get a unique tool title.
string title = GetTitleHelper();
const string command = @"C:\Windows\Notepad.exe";
const string arguments = "$(DocumentDir) Other";
const string initialDirectory = @"C:\";
Settings.Default.ToolList.Clear(); // in case any previous run had trouble
// Test adding a tool.
RunCommand("--tool-add=" + title,
"--tool-command=" + command,
"--tool-arguments=" + arguments,
"--tool-initial-dir=" + initialDirectory);
Assert.IsTrue(Settings.Default.ToolList.Count > 0, "The expected tool was not added to the list.");
int index = Settings.Default.ToolList.Count -1;
ToolDescription tool = Settings.Default.ToolList[index];
Assert.AreEqual(title, tool.Title);
Assert.AreEqual(command,tool.Command);
Assert.AreEqual(arguments,tool.Arguments);
Assert.AreEqual(initialDirectory,tool.InitialDirectory);
// Remove that tool.
Settings.Default.ToolList.RemoveAt(index);
// Test a tool with no Initial Directory and no arguments
RunCommand("--tool-add=" + title,
"--tool-command=" + command);
int index1 = Settings.Default.ToolList.Count - 1;
ToolDescription tool1 = Settings.Default.ToolList[index1];
Assert.AreEqual(title, tool1.Title);
Assert.AreEqual(command, tool1.Command);
Assert.AreEqual("", tool1.Arguments);
Assert.AreEqual("", tool1.InitialDirectory);
// Remove that Tool.
Settings.Default.ToolList.RemoveAt(index1);
// Test failure to add tool
string output = RunCommand("--tool-add=" + title);
Assert.IsTrue(output.Contains(Resources.CommandLine_ImportTool_The_tool_was_not_imported___));
string output2 = RunCommand("--tool-command=" + command);
Assert.IsTrue(output2.Contains(Resources.CommandLine_ImportTool_The_tool_was_not_imported___));
const string badCommand = "test";
string output3 = RunCommand("--tool-add=" + title,"--tool-command=" + badCommand);
Assert.IsTrue(output3.Contains(string.Format(Resources.CommandLine_ImportTool_Error__the_provided_command_for_the_tool__0__is_not_of_a_supported_type___Supported_Types_are___1_, title, "*.exe; *.com; *.pif; *.cmd; *.bat")));
Assert.IsTrue(output3.Contains(Resources.CommandLine_ImportTool_The_tool_was_not_imported___));
// Now test conflicting titles.
// Add the tool.
RunCommand("--tool-add=" + title,
"--tool-command=" + command,
"--tool-arguments=" + arguments,
"--tool-initial-dir=" + initialDirectory);
ToolDescription tool2 = Settings.Default.ToolList[Settings.Default.ToolList.Count - 1];
Assert.AreEqual(title, tool2.Title); // tool with title of title exists.
// Add another tool with the same title.
string output4 = RunCommand("--tool-add=" + title,
"--tool-command=" + command);
Assert.IsTrue(output4.Contains((string.Format(Resources.CommandLine_ImportTool_, "TestTool1"))));
ToolDescription tool3 = Settings.Default.ToolList.Last();
Assert.AreNotEqual("", tool3.Arguments);
Assert.AreNotEqual("", tool3.InitialDirectory);
// Specify overwrite
string output5 = RunCommand("--tool-add=" + title,
"--tool-command=" + command,
"--tool-conflict-resolution=overwrite");
Assert.IsTrue((output5.Contains(string.Format(Resources.CommandLine_ImportTool_Warning__the_tool__0__was_overwritten,"TestTool1"))));
// Check arguments and initialDir were written over.
ToolDescription tool4 = Settings.Default.ToolList.Last();
Assert.AreEqual(title,tool4.Title);
Assert.AreEqual("", tool4.Arguments);
Assert.AreEqual("", tool4.InitialDirectory);
// Specify skip
string output6 = RunCommand("--tool-add=" + title,
"--tool-command=" + command,
"--tool-arguments=thisIsATest",
"--tool-conflict-resolution=skip");
Assert.IsTrue((output6.Contains(string.Format(Resources.CommandLine_ImportTool_Warning__skipping_tool__0__due_to_a_name_conflict_,"TestTool1"))));
// Check Arguments
ToolDescription tool5 = Settings.Default.ToolList.Last();
Assert.AreEqual(title, tool5.Title);
Assert.AreEqual("", tool5.Arguments); // unchanged.
// It now complains in this case.
string output7 = RunCommand( "--tool-arguments=" + arguments,
"--tool-initial-dir=" + initialDirectory);
Assert.IsTrue(output7.Contains(Resources.CommandLine_ImportTool_Error__to_import_a_tool_it_must_have_a_name_and_a_command___Use___tool_add_to_specify_a_name_and_use___tool_command_to_specify_a_command___The_tool_was_not_imported___));
// Test adding a tool.
const string newToolTitle = "TestTitle";
const string reportTitle = "\"Transition Results\"";
RunCommand("--tool-add=" + newToolTitle,
"--tool-command=" + command,
"--tool-arguments=" + arguments,
"--tool-initial-dir=" + initialDirectory,
"--tool-output-to-immediate-window",
"--tool-report=" + reportTitle);
int index3 = Settings.Default.ToolList.Count - 1;
ToolDescription tool6 = Settings.Default.ToolList[index3];
Assert.AreEqual(newToolTitle, tool6.Title);
Assert.AreEqual(command, tool6.Command);
Assert.AreEqual(arguments, tool6.Arguments);
Assert.AreEqual(initialDirectory, tool6.InitialDirectory);
Assert.IsTrue(tool6.OutputToImmediateWindow);
Assert.AreEqual(reportTitle, tool6.ReportTitle);
// Remove that tool.
Settings.Default.ToolList.RemoveAt(index3);
const string importReportArgument = ToolMacros.INPUT_REPORT_TEMP_PATH;
string output8 = RunCommand("--tool-add=" + newToolTitle,
"--tool-command=" + command,
"--tool-arguments=" + importReportArgument,
"--tool-initial-dir=" + initialDirectory,
"--tool-output-to-immediate-window");
Assert.IsTrue(output8.Contains(string.Format(Resources.CommandLine_ImportTool_Error__If__0__is_and_argument_the_tool_must_have_a_Report_Title__Use_the___tool_report_parameter_to_specify_a_report_, "$(InputReportTempPath)")));
const string reportTitle3 = "fakeReport";
string output9 = RunCommand("--tool-add=" + newToolTitle,
"--tool-command=" + command,
"--tool-arguments=" + importReportArgument,
"--tool-initial-dir=" + initialDirectory,
"--tool-output-to-immediate-window",
"--tool-report=" + reportTitle3);
Assert.IsTrue(output9.Contains(string.Format(Resources.CommandLine_ImportTool_Error__Please_import_the_report_format_for__0____Use_the___report_add_parameter_to_add_the_missing_custom_report_, reportTitle3)));
Assert.IsTrue(output9.Contains(Resources.CommandLine_ImportTool_The_tool_was_not_imported___));
}
[TestMethod]
public void TestInstallFromZip()
{
// Using clause here overwrites failure exception when it fails
var movedDir = new MovedDirectory(ToolDescriptionHelpers.GetToolsDirectory(), Program.StressTest);
try
{
Settings.Default.ToolList.Clear();
var testFilesDir = new TestFilesDir(TestContext, COMMAND_FILE);
{
// Test bad input
const string badFileName = "BadFilePath";
Assert.IsFalse(File.Exists(badFileName));
const string command = "--tool-add-zip=" + badFileName;
string output = RunCommand(command);
Assert.IsTrue(output.Contains(Resources.CommandLine_ImportToolsFromZip_Error__the_file_specified_with_the___tool_add_zip_command_does_not_exist__Please_verify_the_file_location_and_try_again_));
}
{
string notZip = testFilesDir.GetTestPath("Broken_file.sky");
Assert.IsTrue(File.Exists(notZip));
string command = "--tool-add-zip=" + notZip;
string output = RunCommand(command);
Assert.IsTrue(output.Contains(Resources.CommandLine_ImportToolsFromZip_Error__the_file_specified_with_the___tool_add_zip_command_is_not_a__zip_file__Please_specify_a_valid__zip_file_));
}
{
var uniqueReportZip = testFilesDir.GetTestPath("UniqueReport.zip");
Assert.IsTrue(File.Exists(uniqueReportZip));
string command = "--tool-add-zip=" + uniqueReportZip;
string output = RunCommand(command);
Assert.IsTrue(Settings.Default.ToolList.Count == 1);
ToolDescription newTool = Settings.Default.ToolList.Last();
Assert.AreEqual("HelloWorld", newTool.Title);
Assert.IsTrue(newTool.OutputToImmediateWindow);
Assert.AreEqual("UniqueReport", newTool.ReportTitle);
string path = newTool.ToolDirPath;
Assert.IsTrue(File.Exists(Path.Combine(path, "HelloWorld.exe")));
Assert.IsTrue(output.Contains(string.Format(Resources.CommandLine_ImportToolsFromZip_Installed_tool__0_,"HelloWorld")));
//Try to add the same tool again. Get conflicting report and tool with no overwrite specified.
string output1 = RunCommand(command);
Assert.IsTrue(output1.Contains(string.Format(Resources.AddZipToolHelper_ShouldOverwrite_Error__There_is_a_conflicting_tool + Resources.AddZipToolHelper_ShouldOverwrite__in_the_file__0_, "UniqueReport.zip")));
Assert.IsTrue(
output1.Contains(
Resources.AddZipToolHelper_ShouldOverwrite_Please_specify__overwrite__or__parallel__with_the___tool_zip_conflict_resolution_command_));
//Now run with overwrite specified.
string output2 = RunCommand(command, "--tool-zip-conflict-resolution=overwrite");
Assert.IsTrue(output2.Contains(string.Format(Resources.AddZipToolHelper_ShouldOverwrite_Overwriting_tool___0_,"HelloWorld")));
//Now install in parallel.
string output3 = RunCommand(command, "--tool-zip-conflict-resolution=parallel");
Assert.IsTrue(output3.Contains(string.Format(Resources.CommandLine_ImportToolsFromZip_Installed_tool__0_, "HelloWorld1")));
ToolDescription newTool1 = Settings.Default.ToolList.Last();
Assert.AreEqual("HelloWorld1", newTool1.Title);
Assert.IsTrue(newTool1.OutputToImmediateWindow);
Assert.AreEqual("UniqueReport", newTool1.ReportTitle);
string path1 = newTool1.ToolDirPath;
Assert.IsTrue(File.Exists(Path.Combine(path1, "HelloWorld.exe")));
//Cleanup.
Settings.Default.ToolList.Clear();
DirectoryEx.SafeDelete(ToolDescriptionHelpers.GetToolsDirectory());
Settings.Default.PersistedViews.RemoveView(PersistedViews.ExternalToolsGroup.Id, "UniqueReport");
Settings.Default.PersistedViews.RemoveView(PersistedViews.ExternalToolsGroup.Id, "UniqueReport1");
}
{
//Test working with packages and ProgramPath Macro.
var testCommandLine = testFilesDir.GetTestPath("TestCommandLine.zip");
Assert.IsTrue(File.Exists(testCommandLine));
string command = "--tool-add-zip=" + testCommandLine;
string output = RunCommand(command);
StringAssert.Contains(output, Resources.AddZipToolHelper_InstallProgram_Error__Package_installation_not_handled_in_SkylineRunner___If_you_have_already_handled_package_installation_use_the___tool_ignore_required_packages_flag);
string output1 = RunCommand(command, "--tool-ignore-required-packages");
StringAssert.Contains(output1, string.Format(
Resources.AddZipToolHelper_FindProgramPath_A_tool_requires_Program__0__Version__1__and_it_is_not_specified_with_the___tool_program_macro_and___tool_program_path_commands__Tool_Installation_Canceled_,
"Bogus",
"2.15.2"));
string path = testFilesDir.GetTestPath("NumberWriter.exe");
string output2 = RunCommand(command, "--tool-ignore-required-packages",
"--tool-program-macro=Bogus,2.15.2",
"--tool-program-path=" + path);
StringAssert.Contains(output2, string.Format(Resources.CommandLine_ImportToolsFromZip_Installed_tool__0_, "TestCommandline"));
ToolDescription newTool = Settings.Default.ToolList.Last();
Assert.AreEqual("TestCommandline", newTool.Title);
Assert.AreEqual("$(ProgramPath(Bogus,2.15.2))", newTool.Command);
Assert.AreEqual("100 12", newTool.Arguments);
ProgramPathContainer ppc = new ProgramPathContainer("Bogus", "2.15.2");
Assert.IsTrue(Settings.Default.ToolFilePaths.ContainsKey(ppc));
Assert.AreEqual(path, Settings.Default.ToolFilePaths[ppc]);
Settings.Default.ToolFilePaths.Remove(ppc);
Settings.Default.ToolList.Clear();
DirectoryEx.SafeDelete(ToolDescriptionHelpers.GetToolsDirectory());
}
{
//Test working with annotations.
var testCommandLine = testFilesDir.GetTestPath("TestAnnotations.zip");
Assert.IsTrue(File.Exists(testCommandLine));
string command = "--tool-add-zip=" + testCommandLine;
string output = RunCommand(command);
Assert.IsTrue(output.Contains(string.Format(Resources.CommandLine_ImportToolsFromZip_Installed_tool__0_, "AnnotationTest\\Tool1")));
Assert.IsTrue(output.Contains(string.Format(Resources.CommandLine_ImportToolsFromZip_Installed_tool__0_, "AnnotationTest\\Tool2")));
Assert.IsTrue(output.Contains(string.Format(Resources.CommandLine_ImportToolsFromZip_Installed_tool__0_, "AnnotationTest\\Tool3")));
Assert.IsTrue(output.Contains(string.Format(Resources.CommandLine_ImportToolsFromZip_Installed_tool__0_, "AnnotationTest\\Tool4")));
}
{
var conflictingAnnotations = testFilesDir.GetTestPath("ConflictAnnotations.zip");
Assert.IsTrue(File.Exists(conflictingAnnotations));
string command = "--tool-add-zip=" + conflictingAnnotations;
string output = RunCommand(command);
Assert.IsTrue(
output.Contains(string.Format(Resources.AddZipToolHelper_ShouldOverwriteAnnotations_There_are_annotations_with_conflicting_names__Please_use_the___tool_zip_overwrite_annotations_command_)));
output = RunCommand(command, "--tool-zip-overwrite-annotations=false");
Assert.IsTrue(output.Contains(string.Format(Resources.AddZipToolHelper_ShouldOverwriteAnnotations_There_are_conflicting_annotations__Keeping_existing_)));
Assert.IsTrue(
output.Contains(
string.Format(
Resources.AddZipToolHelper_ShouldOverwriteAnnotations_Warning__the_annotation__0__may_not_be_what_your_tool_requires_,
"SampleID")));
output = RunCommand(command, "--tool-zip-overwrite-annotations=true");
Assert.IsTrue(output.Contains(string.Format(Resources.AddZipToolHelper_ShouldOverwriteAnnotations_There_are_conflicting_annotations__Overwriting_)));
Assert.IsTrue(output.Contains(string.Format(Resources.AddZipToolHelper_ShouldOverwriteAnnotations_Warning__the_annotation__0__is_being_overwritten,"SampleID")));
Settings.Default.AnnotationDefList = new AnnotationDefList();
Settings.Default.ToolList.Clear();
DirectoryEx.SafeDelete(ToolDescriptionHelpers.GetToolsDirectory());
}
}
finally
{
try { movedDir.Dispose(); }
// ReSharper disable once EmptyGeneralCatchClause
catch (Exception) {}
}
}
// TODO: Don removed this test because it was failing in multiple runs under TestRunner
//[TestMethod]
public void ConsoleAddSkyrTest()
{
int initialNumber = Settings.Default.ReportSpecList.Count;
// Assumes the title TextREportexam is a unique title.
// Add test.skyr which only has one report type named TextREportexam
var commandFilesDir = new TestFilesDir(TestContext, COMMAND_FILE);
var skyrFile = commandFilesDir.GetTestPath("test.skyr");
string output = RunCommand("--report-add=" + skyrFile);
Assert.AreEqual(initialNumber+1, Settings.Default.ReportSpecList.Count);
Assert.AreEqual("TextREportexam", Settings.Default.ReportSpecList.Last().GetKey());
Assert.IsTrue(output.Contains("Success"));
var skyrAdded = Settings.Default.ReportSpecList.Last();
// Attempt to add the same skyr again.
string output2 = RunCommand("--report-add=" + skyrFile);
Assert.IsTrue(output2.Contains("Error"));
// Do want to use == to show it is the same object, unchanged
Assert.IsTrue(ReferenceEquals(skyrAdded, Settings.Default.ReportSpecList.Last()));
// Specify skip
string output4 = RunCommand("--report-add=" + skyrFile,
"--report-conflict-resolution=skip");
Assert.IsTrue(output4.Contains("skipping"));
// Do want to use == to show it is the same object, unchanged
Assert.IsTrue(ReferenceEquals(skyrAdded, Settings.Default.ReportSpecList.Last()));
// Specify overwrite
string output3 = RunCommand("--report-add=" + skyrFile,
"--report-conflict-resolution=overwrite");
Assert.IsTrue(output3.Contains("overwriting"));
// Do want to use == to show it is not the same object, changed
Assert.IsFalse(ReferenceEquals(skyrAdded, Settings.Default.ReportSpecList.Last()));
}
// TODO: Don removed this test because it was failing in multiple runs under TestRunner
//[TestMethod]
public void ConsoleRunCommandsTest()
{
int toolListCount = Settings.Default.ToolList.Count;
var commandFilesDir = new TestFilesDir(TestContext, COMMAND_FILE);
var commandsToRun = commandFilesDir.GetTestPath("ToolList2.txt");
string output = RunCommand("--batch-commands=" + commandsToRun);
Assert.IsTrue(output.Contains("NeWtOOl was added to the Tools Menu"));
Assert.IsTrue(output.Contains("iHope was added to the Tools Menu"));
Assert.IsTrue(output.Contains("thisWorks was added to the Tools Menu"));
Assert.IsTrue(output.Contains("FirstTry was added to the Tools Menu"));
Assert.IsTrue(Settings.Default.ToolList.Any(t => t.Title == "NeWtOOl" && t.Command == @"C:\Windows\Notepad.exe" && t.Arguments == "$(DocumentDir)" && t.InitialDirectory == @"C:\"));
Assert.IsTrue(Settings.Default.ToolList.Any(t => t.Title == "iHope" && t.Command == @"C:\Windows\Notepad.exe"));
Assert.IsTrue(Settings.Default.ToolList.Any(t => t.Title == "thisWorks"));
Assert.IsTrue(Settings.Default.ToolList.Any(t => t.Title == "FirstTry"));
Assert.AreEqual(toolListCount+4, Settings.Default.ToolList.Count);
// run the same command again. this time each should be skipped.
string output2 = RunCommand("--batch-commands=" + commandsToRun);
Assert.IsFalse(output2.Contains("NeWtOOl was added to the Tools Menu"));
Assert.IsFalse(output2.Contains("iHope was added to the Tools Menu"));
Assert.IsFalse(output2.Contains("thisWorks was added to the Tools Menu"));
Assert.IsFalse(output2.Contains("FirstTry was added to the Tools Menu"));
Assert.IsTrue(Settings.Default.ToolList.Any(t => t.Title == "NeWtOOl" && t.Command == @"C:\Windows\Notepad.exe" && t.Arguments == "$(DocumentDir)" && t.InitialDirectory == @"C:\"));
Assert.IsTrue(Settings.Default.ToolList.Any(t => t.Title == "iHope" && t.Command == @"C:\Windows\Notepad.exe"));
Assert.IsTrue(Settings.Default.ToolList.Any(t => t.Title == "thisWorks"));
Assert.IsTrue(Settings.Default.ToolList.Any(t => t.Title == "FirstTry"));
// the number of tools is unchanged.
Assert.AreEqual(toolListCount + 4, Settings.Default.ToolList.Count);
}
[TestMethod]
public void ConsoleExportToolsTest()
{
Settings.Default.ToolList.Clear();
// Get a unique tool title.
string title = GetTitleHelper();
const string command = @"C:\Windows\Notepad.exe";
const string arguments = "$(DocumentDir) Other";
const string initialDirectory = @"C:\";
// Test adding a tool.
RunCommand("--tool-add=" + title,
"--tool-command=" + command,
"--tool-arguments=" + arguments,
"--tool-initial-dir=" + initialDirectory);
// Test adding a tool.
const string newToolTitle = "TestTitle";
const string reportTitle = "\"Transition Results\"";
RunCommand("--tool-add=" + newToolTitle,
"--tool-command=" + command,
"--tool-arguments=" + arguments,
"--tool-initial-dir=" + initialDirectory,
"--tool-output-to-immediate-window",
"--tool-report=" + reportTitle);
string filePath = Path.GetTempFileName();
RunCommand("--tool-list-export=" + filePath);
using (StreamReader sr = new StreamReader(filePath))
{
string line1 = sr.ReadLine();
Assert.IsTrue(line1!=null);
Assert.IsTrue(line1.Contains(string.Format("--tool-add=\"{0}\"",title)));
Assert.IsTrue(line1.Contains(string.Format("--tool-command=\"{0}\"",command)));
Assert.IsTrue(line1.Contains(string.Format("--tool-arguments=\"{0}\"", arguments)));
Assert.IsTrue(line1.Contains(string.Format("--tool-initial-dir=\"{0}\"", initialDirectory)));
Assert.IsTrue(line1.Contains("--tool-conflict-resolution=skip"));
Assert.IsTrue(line1.Contains("--tool-report=\"\""));
string line2 = sr.ReadLine();
Assert.IsTrue(line2 != null);
Assert.IsTrue(line2.Contains(string.Format("--tool-add=\"{0}\"", newToolTitle)));
Assert.IsTrue(line2.Contains(string.Format("--tool-command=\"{0}\"", command)));
Assert.IsTrue(line2.Contains(string.Format("--tool-arguments=\"{0}\"", arguments)));
Assert.IsTrue(line2.Contains(string.Format("--tool-initial-dir=\"{0}\"", initialDirectory)));
Assert.IsTrue(line2.Contains("--tool-conflict-resolution=skip"));
Assert.IsTrue(line2.Contains(string.Format("--tool-report=\"{0}\"",reportTitle)));
Assert.IsTrue(line2.Contains("--tool-output-to-immediate-window"));
}
FileEx.SafeDelete(filePath);
}
[TestMethod]
public void ConsoleParserTest()
{
// Assert.AreEqual(new[] { "--test=foo bar", "--new" }, CommandLine.ParseArgs("\"--test=foo bar\" --new"));
// The above line of code would not pass so this other form works better.
// Test case "--test=foo bar" --new
string[] expected1 = { "--test=foo bar", "--new" };
string[] actual1 = CommandLine.ParseArgs("\"--test=foo bar\" --new");
Assert.AreEqual(expected1[0], actual1[0]);
Assert.AreEqual(expected1[1], actual1[1]);
// Or even better. A function that does the same assertion as above.
Assert.IsTrue(ParserTestHelper(new[] { "--test=foo bar", "--new" }, CommandLine.ParseArgs("\"--test=foo bar\" --new")));
// Test case --test="foo bar" --new
string[] expected2 = {"--test=foo bar", "--new"};
string[] actual2 = CommandLine.ParseArgs("--test=\"foo bar\" --new");
Assert.AreEqual(expected2[0],actual2[0]);
Assert.AreEqual(expected2[1],actual2[1]);
Assert.IsTrue(ParserTestHelper(new[] { "--test=foo bar", "--new" }, CommandLine.ParseArgs("--test=\"foo bar\" --new")));
// Test case --test="i said ""foo bar""" -new
string[] expected3 = { "--test=i said \"foo bar\"", "--new" };
string[] actual3 = CommandLine.ParseArgs("--test=\"i said \"\"foo bar\"\"\" --new");
Assert.AreEqual(expected3[0], actual3[0]);
Assert.AreEqual(expected3[1], actual3[1]);
Assert.IsTrue(ParserTestHelper(new[] { "--test=i said \"foo bar\"", "--new" }, CommandLine.ParseArgs("--test=\"i said \"\"foo bar\"\"\" --new")));
// Test case "--test=foo --new --bar"
Assert.IsTrue(ParserTestHelper(new[] { "--test=foo --new --bar" }, CommandLine.ParseArgs("\"--test=foo --new --bar\"")));
// Test case --test="" --new --bar
Assert.IsTrue(ParserTestHelper(new[] { "--test=", "--new", "--bar" }, CommandLine.ParseArgs("--test=\"\" --new --bar")));
// Test case of all spaces
string[] test = CommandLine.ParseArgs(" ");
Assert.IsTrue(ParserTestHelper(new string[] {}, test));
}
[TestMethod]
public void CommandLineArrayParserTest()
{
// Test case [] = "" - an empty array
Assert.AreEqual(string.Empty, CommandLine.JoinArgs(new string[0]));
// Test case [a,b,c] = "a b c" - a simple array with no spaces
Assert.AreEqual("a b c", CommandLine.JoinArgs(new [] {"a", "b", "c"}));
// Test case [a b, c, d] = ""a b" c d" - multiword string at beginning of array
Assert.AreEqual("\"a b\" c d", CommandLine.JoinArgs(new [] {"a b", "c", "d"}));
// Test case [a, b, c d] = "a b "c d"" - multiword string at end of array
Assert.AreEqual("a b \"c d\"", CommandLine.JoinArgs(new [] { "a", "b", "c d" }));
// Test case [a, b c d, e] = " a "b c d" e" - multiword string at middle of array
Assert.AreEqual("a \"b c d\" e", CommandLine.JoinArgs(new [] { "a", "b c d", "e" }));
// Test case [a, b c, d e f, g, h i] = "a "b c" "d e f" g "h i"" - multiple multiword strings
Assert.AreEqual("a \"b c\" \"d e f\" g \"h i\"", CommandLine.JoinArgs(new [] { "a", "b c", "d e f", "g" , "h i" }));
// Test case [a "b" c] = "a "b" c" - nested quotes
Assert.AreEqual("\"a \"b\" c\"", CommandLine.JoinArgs(new [] {"a \"b\" c"}));
// Test case [a bc] = "a bc" - tabbed whitespace only
Assert.AreEqual("\"a\tbc\"", CommandLine.JoinArgs(new [] {"a\tbc"}));
// Test case [a,,c] = "a "" c" - empty string
Assert.AreEqual("a \"\" c", CommandLine.JoinArgs(new [] {"a", string.Empty, "c"}));
}
[TestMethod]
public void ConsolePanoramaArgsTest()
{
var testFilesDir = new TestFilesDir(TestContext, ZIP_FILE);
string docPath = testFilesDir.GetTestPath("BSA_Protea_label_free_20100323_meth3_multi.sky");
// Error: missing panorama args
var output = RunCommand("--in=" + docPath,
"--panorama-server=https://panoramaweb.org");
Assert.IsTrue(
output.Contains(string.Format(Resources.CommandArgs_PanoramaArgsComplete_plural_,
TextUtil.LineSeparate("--panorama-username", "--panorama-password", "--panorama-folder"))));
output = RunCommand("--in=" + docPath,
"--panorama-server=https://panoramaweb.org",
"--panorama-username=user",
"--panorama-password=passwd");
Assert.IsTrue(
output.Contains(string.Format(Resources.CommandArgs_PanoramaArgsComplete_, "--panorama-folder")));
// Error: invalid server URL
const string badServer = "bad server url";
output = RunCommand("--in=" + docPath,
"--panorama-server=" + badServer,
"--panorama-username=user",
"--panorama-password=passwd",
"--panorama-folder=folder");
Assert.IsTrue(output.Contains(string.Format(
Resources.EditServerDlg_OkDialog_The_text__0__is_not_a_valid_server_name_, badServer
)));
var buffer = new StringBuilder();
var helper = new CommandArgs.PanoramaHelper(new StringWriter(buffer));
// Error: Unknown server
var serverUri = PanoramaUtil.ServerNameToUri("unknown.server-state.com");
var client = new TestPanoramaClient() { MyServerState = ServerState.unknown, ServerUri = serverUri };
helper.ValidateServer(client, null, null);
Assert.IsTrue(
buffer.ToString()
.Contains(
string.Format(Resources.EditServerDlg_OkDialog_Unknown_error_connecting_to_the_server__0__,
serverUri.AbsoluteUri)));
buffer.Clear();
// Error: Not a Panorama Server
serverUri = PanoramaUtil.ServerNameToUri("www.google.com");
client = new TestPanoramaClient() {MyPanoramaState = PanoramaState.other, ServerUri = serverUri};
helper.ValidateServer(client, null, null);
Assert.IsTrue(
buffer.ToString()
.Contains(
string.Format(Resources.EditServerDlg_OkDialog_The_server__0__is_not_a_Panorama_server,
serverUri.AbsoluteUri)));
buffer.Clear();
// Error: Invalid user
serverUri = PanoramaUtil.ServerNameToUri(PanoramaUtil.PANORAMA_WEB);
client = new TestPanoramaClient() { MyUserState = UserState.nonvalid, ServerUri = serverUri };
helper.ValidateServer(client, "invalid", "user");
Assert.IsTrue(
buffer.ToString()
.Contains(
Resources
.EditServerDlg_OkDialog_The_username_and_password_could_not_be_authenticated_with_the_panorama_server));
buffer.Clear();
// Error: unknown exception
client = new TestPanoramaClientThrowsException();
helper.ValidateServer(client, null, null);
Assert.IsTrue(
buffer.ToString()
.Contains(
string.Format(Resources.PanoramaHelper_ValidateServer_, "GetServerState threw an exception")));
buffer.Clear();
// Error: folder does not exist
client = new TestPanoramaClient() { MyFolderState = FolderState.notfound, ServerUri = serverUri };
var server = helper.ValidateServer(client, "user", "password");
var folder = "folder/not/found";
helper.ValidateFolder(client, server, folder);
Assert.IsTrue(
buffer.ToString()
.Contains(
string.Format(
Resources.PanoramaUtil_VerifyFolder_Folder__0__does_not_exist_on_the_Panorama_server__1_,
folder, client.ServerUri)));
buffer.Clear();
// Error: no permissions on folder
client = new TestPanoramaClient() { MyFolderState = FolderState.nopermission, ServerUri = serverUri };
folder = "no/permissions";
helper.ValidateFolder(client, server, folder);
Assert.IsTrue(
buffer.ToString()
.Contains(
string.Format(
Resources.PanoramaUtil_VerifyFolder_User__0__does_not_have_permissions_to_upload_to_the_Panorama_folder__1_,
"user", folder)));
buffer.Clear();
// Error: not a Panorama folder
client = new TestPanoramaClient() { MyFolderState = FolderState.notpanorama, ServerUri = serverUri };
folder = "not/panorama";
helper.ValidateFolder(client, server, folder);
Assert.IsTrue(
buffer.ToString()
.Contains(string.Format(Resources.PanoramaUtil_VerifyFolder__0__is_not_a_Panorama_folder,
folder)));
}
private static string GetTitleHelper()
{
int i = 1;
do
{
if (Settings.Default.ToolList.All(item => item.Title != (string.Format("TestTool{0}", i))))
{
return string.Format("TestTool{0}", i);
}
i++;
} while (true);
}
// Compare two string arrays. Check each actual string is equal to the expected one.
private static bool ParserTestHelper (string[] actual, string[] expected )
{
if (actual.Length == expected.Length)
{
for (int i = 0; i < actual.Length; i++)
{
if (!actual[i].Equals(expected[i]))
{
return false;
}
}
}
return true;
}
private static MsDataFilePath GetThermoDiskPath(MsDataFilePath pathToRaw)
{
return ExtensionTestContext.CanImportThermoRaw && ExtensionTestContext.CanImportWatersRaw
? pathToRaw.SetFilePath(Path.ChangeExtension(pathToRaw.FilePath, "raw"))
: pathToRaw;
}
private static void CheckRunCommandOutputContains(string expectedMessage, string actualMessage)
{
Assert.IsTrue(actualMessage.Contains(expectedMessage),
string.Format("Expected RunCommand result message containing \n\"{0}\",\ngot\n\"{1}\"\ninstead.", expectedMessage, actualMessage));
}
private class TestPanoramaClient : IPanoramaClient
{
public Uri ServerUri { get; set; }
public ServerState MyServerState { get; set; }
public PanoramaState MyPanoramaState { get; set; }
public UserState MyUserState { get; set; }
public FolderState MyFolderState { get; set; }
public TestPanoramaClient()
{
MyServerState = ServerState.available;
MyPanoramaState = PanoramaState.panorama;
MyUserState = UserState.valid;
MyFolderState = FolderState.valid;
}
public virtual ServerState GetServerState()
{
return MyServerState;
}
public PanoramaState IsPanorama()
{
return MyPanoramaState;
}
public UserState IsValidUser(string username, string password)
{
return MyUserState;
}
public FolderState IsValidFolder(string folderPath, string username, string password)
{
return MyFolderState;
}
}
private class TestPanoramaClientThrowsException : TestPanoramaClient
{
public override ServerState GetServerState()
{
throw new Exception("GetServerState threw an exception");
}
}
}
} | 1 | 12,383 | Obviously, don't merge this to master. | ProteoWizard-pwiz | .cs |
@@ -160,6 +160,18 @@ module RSpec::Core
end
end
+ describe "#exit_early" do
+ it "returns the passed exit code" do
+ expect(reporter.exit_early(42)).to eq(42)
+ end
+
+ it "reports zero examples" do
+ allow(reporter).to receive(:report)
+ reporter.exit_early(42)
+ expect(reporter).to have_received(:report).with(0)
+ end
+ end
+
describe "#report" do
it "supports one arg (count)" do
reporter.report(1) {} | 1 | module RSpec::Core
RSpec.describe Reporter do
include FormatterSupport
let(:config) { Configuration.new }
let(:world) { World.new(config) }
let(:reporter) { Reporter.new config }
let(:start_time) { Time.now }
let(:example) { super() }
describe "finish" do
let(:formatter) { double("formatter") }
%w[start_dump dump_pending dump_failures dump_summary close].map(&:to_sym).each do |message|
it "sends #{message} to the formatter(s) that respond to message" do
reporter.register_listener formatter, message
expect(formatter.as_null_object).to receive(message)
reporter.finish
end
it "doesnt notify formatters about messages they dont implement" do
expect { reporter.finish }.to_not raise_error
end
end
it "dumps the failure summary after the profile and deprecation summary so failures don't scroll off the screen and get missed" do
config.profile_examples = 10
formatter = instance_double("RSpec::Core::Formatter::ProgressFormatter")
reporter.register_listener(formatter, :dump_summary, :dump_profile, :deprecation_summary)
expect(formatter).to receive(:deprecation_summary).ordered
expect(formatter).to receive(:dump_profile).ordered
expect(formatter).to receive(:dump_summary).ordered
reporter.finish
end
it "allows the profiler to be used without being manually setup" do
config.profile_examples = true
expect {
reporter.finish
}.to_not raise_error
end
end
describe 'start' do
before { config.start_time = start_time }
it 'notifies the formatter of start with example count' do
formatter = double("formatter")
reporter.register_listener formatter, :start
expect(formatter).to receive(:start) do |notification|
expect(notification.count).to eq 3
expect(notification.load_time).to eq 5
end
reporter.start 3, (start_time + 5)
end
it 'notifies the formatter of the seed used before notifing of start' do
formatter = double("formatter")
reporter.register_listener formatter, :seed
reporter.register_listener formatter, :start
expect(formatter).to receive(:seed).ordered.with(
an_object_having_attributes(:seed => config.seed, :seed_used? => config.seed_used?)
)
expect(formatter).to receive(:start).ordered
reporter.start 1
end
end
context "given one formatter" do
it "passes messages to that formatter" do
formatter = double("formatter", :example_started => nil)
reporter.register_listener formatter, :example_started
example = new_example
expect(formatter).to receive(:example_started) do |notification|
expect(notification.example).to eq example
end
reporter.example_started(example)
end
it "passes messages to the formatter in the correct order" do
order = []
formatter = double("formatter")
allow(formatter).to receive(:example_group_started) { |n| order << "Started: #{n.group.description}" }
allow(formatter).to receive(:example_started) { |n| order << "Started Example" }
allow(formatter).to receive(:example_finished) { |n| order << "Finished Example" }
allow(formatter).to receive(:example_passed) { |n| order << "Passed" }
allow(formatter).to receive(:example_pending) { |n| order << "Pending" }
allow(formatter).to receive(:example_failed) { |n| order << "Failed" }
allow(formatter).to receive(:example_group_finished) { |n| order << "Finished: #{n.group.description}" }
reporter.register_listener formatter, :example_group_started, :example_group_finished,
:example_started, :example_finished,
:example_passed, :example_failed, :example_pending
group = RSpec.describe("root")
group.describe("context 1") do
example("passing example") {}
example("pending example", :skip => true) { }
end
group.describe("context 2") do
example("failed example") { fail }
end
group.run(reporter)
expect(order).to eq([
"Started: root",
"Started: context 1",
"Started Example",
"Finished Example",
"Passed",
"Started Example",
"Finished Example",
"Pending",
"Finished: context 1",
"Started: context 2",
"Started Example",
"Finished Example",
"Failed",
"Finished: context 2",
"Finished: root"
])
end
end
context "given an example group with no examples" do
it "does not pass example_group_started or example_group_finished to formatter" do
formatter = double("formatter")
expect(formatter).not_to receive(:example_group_started)
expect(formatter).not_to receive(:example_group_finished)
reporter.register_listener formatter, :example_group_started, :example_group_finished
group = RSpec.describe("root")
group.run(reporter)
end
end
context "given multiple formatters" do
it "passes messages to all formatters" do
formatters = (1..2).map { double("formatter", :example_started => nil) }
example = new_example
formatters.each do |formatter|
expect(formatter).to receive(:example_started) do |notification|
expect(notification.example).to eq example
end
reporter.register_listener formatter, :example_started
end
reporter.example_started(example)
end
end
describe "#report" do
it "supports one arg (count)" do
reporter.report(1) {}
end
it "yields itself" do
yielded = nil
reporter.report(3) { |r| yielded = r }
expect(yielded).to eq(reporter)
end
end
describe "#register_listener" do
let(:listener) { double("listener", :start => nil) }
before { reporter.register_listener listener, :start }
it 'will register the listener to specified notifications' do
expect(reporter.registered_listeners :start).to eq [listener]
end
it 'will match string notification names' do
reporter.register_listener listener, "stop"
expect(reporter.registered_listeners :stop).to eq [listener]
end
it 'will send notifications when a subscribed event is triggered' do
expect(listener).to receive(:start) do |notification|
expect(notification.count).to eq 42
end
reporter.start 42
end
it 'will ignore duplicated listeners' do
reporter.register_listener listener, :start
expect(listener).to receive(:start).once
reporter.start 42
end
end
describe "#publish" do
let(:listener) { double("listener", :custom => nil) }
before do
reporter.register_listener listener, :custom, :start
end
it 'will send custom events to registered listeners' do
expect(listener).to receive(:custom).with(RSpec::Core::Notifications::NullNotification)
reporter.publish :custom
end
it 'will raise when encountering RSpec standard events' do
expect { reporter.publish :start }.to raise_error(
StandardError,
a_string_including("not internal RSpec ones")
)
end
it 'will ignore event names sent as strings' do
expect(listener).not_to receive(:custom)
reporter.publish "custom"
end
it 'will provide a custom notification object based on the options hash' do
expect(listener).to receive(:custom).with(
an_object_having_attributes(:my_data => :value)
)
reporter.publish :custom, :my_data => :value
end
end
describe "#abort_with" do
before { allow(reporter).to receive(:exit!) }
it "publishes the message and notifies :close" do
listener = double("Listener")
reporter.register_listener(listener, :message, :close)
stream = StringIO.new
allow(listener).to receive(:message) { |n| stream << n.message }
allow(listener).to receive(:close) { stream.close }
reporter.register_listener(listener)
reporter.abort_with("Booom!", 1)
expect(stream).to have_attributes(:string => "Booom!").and be_closed
end
it "exits with the provided exit code" do
reporter.abort_with("msg", 13)
expect(reporter).to have_received(:exit!).with(13)
end
end
describe "timing" do
before do
config.start_time = start_time
end
it "uses RSpec::Core::Time as to not be affected by changes to time in examples" do
formatter = double(:formatter)
reporter.register_listener formatter, :dump_summary
reporter.start 1
allow(Time).to receive_messages(:now => ::Time.utc(2012, 10, 1))
duration = nil
allow(formatter).to receive(:dump_summary) do |notification|
duration = notification.duration
end
reporter.finish
expect(duration).to be < 0.2
end
it "captures the load time so it can report it later" do
formatter = instance_double("ProgressFormatter")
reporter.register_listener formatter, :dump_summary
reporter.start 3, (start_time + 5)
expect(formatter).to receive(:dump_summary) do |notification|
expect(notification.load_time).to eq(5)
end
reporter.finish
end
end
describe "#notify_non_example_exception" do
it "sends a `message` notification that contains the formatted exception details" do
formatter_out = StringIO.new
formatter = Formatters::ProgressFormatter.new(formatter_out)
reporter.register_listener formatter, :message
line = __LINE__ + 1
exception = 1 / 0 rescue $!
reporter.notify_non_example_exception(exception, "NonExample Context")
expect(formatter_out.string).to start_with(<<-EOS.gsub(/^ +\|/, '').chomp)
|
|NonExample Context
|Failure/Error: exception = 1 / 0 rescue $!
|
|ZeroDivisionError:
| divided by 0
|# #{Metadata.relative_path(__FILE__)}:#{line}
EOS
end
it "records the fact that a non example failure has occurred" do
expect {
reporter.notify_non_example_exception(Exception.new, "NonExample Context")
}.to change(world, :non_example_failure).from(a_falsey_value).to(true)
end
end
end
end
| 1 | 17,088 | Nearly there! This should assert on the output, take the result of "start" and "finish" as an example :) | rspec-rspec-core | rb |
@@ -63,11 +63,11 @@ func NewDriverSub() *driverSub {
return ds
}
-func (s *driverSub) ReceiveBatch(ctx context.Context) ([]*driver.Message, error) {
+func (s *driverSub) ReceiveBatch(ctx context.Context, maxMessages int) ([]*driver.Message, error) {
for {
select {
case <-s.sem:
- ms := s.grabQueue()
+ ms := s.grabQueue(maxMessages)
if len(ms) != 0 {
return ms, nil
} | 1 | // Copyright 2018 The Go Cloud Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pubsub_test
import (
"context"
"errors"
"fmt"
"math/rand"
"sync"
"testing"
"github.com/google/go-cloud/internal/pubsub"
"github.com/google/go-cloud/internal/pubsub/driver"
"github.com/google/go-cloud/internal/retry"
)
type driverTopic struct {
subs []*driverSub
}
func (t *driverTopic) SendBatch(ctx context.Context, ms []*driver.Message) error {
for _, s := range t.subs {
select {
case <-s.sem:
s.q = append(s.q, ms...)
s.sem <- struct{}{}
case <-ctx.Done():
return ctx.Err()
}
}
return nil
}
func (t *driverTopic) Close() error {
return nil
}
func (s *driverTopic) IsRetryable(error) bool { return false }
type driverSub struct {
sem chan struct{}
// Normally this queue would live on a separate server in the cloud.
q []*driver.Message
}
func NewDriverSub() *driverSub {
ds := &driverSub{
sem: make(chan struct{}, 1),
}
ds.sem <- struct{}{}
return ds
}
func (s *driverSub) ReceiveBatch(ctx context.Context) ([]*driver.Message, error) {
for {
select {
case <-s.sem:
ms := s.grabQueue()
if len(ms) != 0 {
return ms, nil
}
case <-ctx.Done():
return nil, ctx.Err()
default:
}
}
}
func (s *driverSub) grabQueue() []*driver.Message {
defer func() { s.sem <- struct{}{} }()
if len(s.q) > 0 {
ms := s.q
s.q = nil
return ms
}
return nil
}
func (s *driverSub) SendAcks(ctx context.Context, ackIDs []driver.AckID) error {
return nil
}
func (s *driverSub) Close() error {
return nil
}
func (s *driverSub) IsRetryable(error) bool { return false }
func TestSendReceive(t *testing.T) {
ctx := context.Background()
ds := NewDriverSub()
dt := &driverTopic{
subs: []*driverSub{ds},
}
topic := pubsub.NewTopic(dt)
defer topic.Close()
m := &pubsub.Message{Body: []byte("user signed up")}
if err := topic.Send(ctx, m); err != nil {
t.Fatal(err)
}
sub := pubsub.NewSubscription(ds)
defer sub.Close()
m2, err := sub.Receive(ctx)
if err != nil {
t.Fatal(err)
}
if string(m2.Body) != string(m.Body) {
t.Fatalf("received message has body %q, want %q", m2.Body, m.Body)
}
}
func TestConcurrentReceivesGetAllTheMessages(t *testing.T) {
howManyToSend := int(1e3)
ctx, cancel := context.WithCancel(context.Background())
dt := &driverTopic{}
// Make a subscription and start goroutines to receive from it.
var wg sync.WaitGroup
wg.Add(howManyToSend)
ds := NewDriverSub()
dt.subs = append(dt.subs, ds)
s := pubsub.NewSubscription(ds)
defer s.Close()
var mu sync.Mutex
receivedMsgs := make(map[string]int)
for i := 0; i < 10; i++ {
go func() {
for {
m, err := s.Receive(ctx)
if err != nil {
if isCanceled(err) {
return
}
t.Fatal(err)
}
mu.Lock()
receivedMsgs[string(m.Body)]++
mu.Unlock()
wg.Done()
}
}()
}
// Send messages.
topic := pubsub.NewTopic(dt)
defer topic.Close()
sentMsgs := make(map[string]int)
for i := 0; i < howManyToSend; i++ {
bod := fmt.Sprintf("%d", rand.Int())
m := &pubsub.Message{Body: []byte(bod)}
sentMsgs[string(m.Body)]++
if err := topic.Send(ctx, m); err != nil {
t.Fatal(err)
}
}
// Wait for all the goroutines to finish processing all the messages.
wg.Wait()
cancel()
// Check that all the messages were received.
sum := 0
for _, n := range receivedMsgs {
sum += n
}
if sum != howManyToSend {
t.Errorf("received %d messages, want %d", sum, howManyToSend)
}
for k, v := range sentMsgs {
v2 := receivedMsgs[k]
if v2 != v {
t.Errorf("got %d for %q, want %d", v2, k, v)
}
}
}
func TestCancelSend(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ds := NewDriverSub()
dt := &driverTopic{
subs: []*driverSub{ds},
}
topic := pubsub.NewTopic(dt)
defer topic.Close()
m := &pubsub.Message{}
// Intentionally break the driver subscription by acquiring its semaphore.
// Now topic.Send will have to wait for cancellation.
<-ds.sem
cancel()
if err := topic.Send(ctx, m); err == nil {
t.Error("got nil, want cancellation error")
}
}
func TestCancelReceive(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ds := NewDriverSub()
s := pubsub.NewSubscription(ds)
defer s.Close()
cancel()
// Without cancellation, this Receive would hang.
if _, err := s.Receive(ctx); err == nil {
t.Error("got nil, want cancellation error")
}
}
func TestRetryTopic(t *testing.T) {
// Test that Send is retried if the driver returns a retryable error.
ft := &failTopic{}
top := pubsub.NewTopic(ft)
err := top.Send(context.Background(), &pubsub.Message{})
if err != nil {
t.Errorf("Send: got %v, want nil", err)
}
if got, want := ft.calls, nRetryCalls+1; got != want {
t.Errorf("calls: got %d, want %d", got, want)
}
}
var errRetry = errors.New("retry")
func isRetryable(err error) bool {
return err == errRetry
}
const nRetryCalls = 2
type failTopic struct {
driver.Topic
calls int
}
func (t *failTopic) SendBatch(ctx context.Context, ms []*driver.Message) error {
t.calls++
if t.calls <= nRetryCalls {
return errRetry
}
return nil
}
func (t *failTopic) IsRetryable(err error) bool { return isRetryable(err) }
func TestRetryReceive(t *testing.T) {
fs := &failSub{}
sub := pubsub.NewSubscription(fs)
_, err := sub.Receive(context.Background())
if err != nil {
t.Errorf("Receive: got %v, want nil", err)
}
if got, want := fs.calls, nRetryCalls+1; got != want {
t.Errorf("calls: got %d, want %d", got, want)
}
}
type failSub struct {
driver.Subscription
calls int
}
func (t *failSub) ReceiveBatch(ctx context.Context) ([]*driver.Message, error) {
t.calls++
if t.calls <= nRetryCalls {
return nil, errRetry
}
return []*driver.Message{{Body: []byte("")}}, nil
}
func (t *failSub) IsRetryable(err error) bool { return isRetryable(err) }
// TODO(jba): add a test for retry of SendAcks.
func isCanceled(err error) bool {
if cerr, ok := err.(*retry.ContextError); ok {
err = cerr.CtxErr
}
return err == context.Canceled
}
| 1 | 12,338 | This loop is going to burn CPU if len(s.q) == 0. Maybe a small sleep? | google-go-cloud | go |
@@ -30,7 +30,10 @@ import com.pingcap.tikv.exception.TiClientInternalException;
import com.pingcap.tikv.key.Key;
import com.pingcap.tikv.util.ConcreteBackOffer;
import com.pingcap.tikv.util.Pair;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
import org.apache.log4j.Logger;
import org.tikv.kvproto.Metapb.Peer;
import org.tikv.kvproto.Metapb.Store; | 1 | /*
*
* Copyright 2017 PingCAP, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.pingcap.tikv.region;
import static com.pingcap.tikv.codec.KeyUtils.formatBytes;
import static com.pingcap.tikv.util.KeyRangeUtils.makeRange;
import com.google.common.collect.RangeMap;
import com.google.common.collect.TreeRangeMap;
import com.google.protobuf.ByteString;
import com.pingcap.tikv.ReadOnlyPDClient;
import com.pingcap.tikv.TiSession;
import com.pingcap.tikv.exception.GrpcException;
import com.pingcap.tikv.exception.TiClientInternalException;
import com.pingcap.tikv.key.Key;
import com.pingcap.tikv.util.ConcreteBackOffer;
import com.pingcap.tikv.util.Pair;
import java.util.*;
import org.apache.log4j.Logger;
import org.tikv.kvproto.Metapb.Peer;
import org.tikv.kvproto.Metapb.Store;
import org.tikv.kvproto.Metapb.StoreState;
public class RegionManager {
private static final Logger logger = Logger.getLogger(RegionManager.class);
private RegionCache cache;
private final ReadOnlyPDClient pdClient;
// To avoid double retrieval, we used the async version of grpc
// When rpc not returned, instead of call again, it wait for previous one done
public RegionManager(ReadOnlyPDClient pdClient) {
this.cache = new RegionCache(pdClient);
this.pdClient = pdClient;
}
public static class RegionCache {
private final Map<Long, TiRegion> regionCache;
private final Map<Long, Store> storeCache;
private final RangeMap<Key, Long> keyToRegionIdCache;
private final ReadOnlyPDClient pdClient;
public RegionCache(ReadOnlyPDClient pdClient) {
regionCache = new HashMap<>();
storeCache = new HashMap<>();
keyToRegionIdCache = TreeRangeMap.create();
this.pdClient = pdClient;
}
public synchronized TiRegion getRegionByKey(ByteString key) {
Long regionId;
regionId = keyToRegionIdCache.get(Key.toRawKey(key));
if (logger.isDebugEnabled()) {
logger.debug(String.format("getRegionByKey key[%s] -> ID[%s]", formatBytes(key), regionId));
}
if (regionId == null) {
logger.debug("Key not find in keyToRegionIdCache:" + formatBytes(key));
TiRegion region = pdClient.getRegionByKey(ConcreteBackOffer.newGetBackOff(), key);
if (!putRegion(region)) {
throw new TiClientInternalException("Invalid Region: " + region.toString());
}
return region;
}
TiRegion region = regionCache.get(regionId);
if (logger.isDebugEnabled()) {
logger.debug(String.format("getRegionByKey ID[%s] -> Region[%s]", regionId, region));
}
return region;
}
private synchronized boolean putRegion(TiRegion region) {
if (logger.isDebugEnabled()) {
logger.debug("putRegion: " + region);
}
regionCache.put(region.getId(), region);
keyToRegionIdCache.put(makeRange(region.getStartKey(), region.getEndKey()), region.getId());
return true;
}
private synchronized TiRegion getRegionById(long regionId) {
TiRegion region = regionCache.get(regionId);
if (logger.isDebugEnabled()) {
logger.debug(String.format("getRegionByKey ID[%s] -> Region[%s]", regionId, region));
}
if (region == null) {
region = pdClient.getRegionByID(ConcreteBackOffer.newGetBackOff(), regionId);
if (!putRegion(region)) {
throw new TiClientInternalException("Invalid Region: " + region.toString());
}
}
return region;
}
/** Removes region associated with regionId from regionCache. */
public synchronized void invalidateRegion(long regionId) {
try {
if (logger.isDebugEnabled()) {
logger.debug(String.format("invalidateRegion ID[%s]", regionId));
}
TiRegion region = regionCache.get(regionId);
keyToRegionIdCache.remove(makeRange(region.getStartKey(), region.getEndKey()));
} catch (Exception ignore) {
} finally {
regionCache.remove(regionId);
}
}
public synchronized void invalidateAllRegionForStore(long storeId) {
List<TiRegion> regionToRemove = new ArrayList<>();
for (TiRegion r : regionCache.values()) {
if (r.getLeader().getStoreId() == storeId) {
if (logger.isDebugEnabled()) {
logger.debug(String.format("invalidateAllRegionForStore Region[%s]", r));
}
regionToRemove.add(r);
}
}
// remove region
for (TiRegion r : regionToRemove) {
regionCache.remove(r.getId());
keyToRegionIdCache.remove(makeRange(r.getStartKey(), r.getEndKey()));
}
}
public synchronized void invalidateStore(long storeId) {
storeCache.remove(storeId);
}
public synchronized Store getStoreById(long id) {
try {
Store store = storeCache.get(id);
if (store == null) {
store = pdClient.getStore(ConcreteBackOffer.newGetBackOff(), id);
}
if (store.getState().equals(StoreState.Tombstone)) {
return null;
}
storeCache.put(id, store);
return store;
} catch (Exception e) {
throw new GrpcException(e);
}
}
}
public TiSession getSession() {
return pdClient.getSession();
}
public TiRegion getRegionByKey(ByteString key) {
return cache.getRegionByKey(key);
}
public TiRegion getRegionById(long regionId) {
return cache.getRegionById(regionId);
}
public Pair<TiRegion, Store> getRegionStorePairByKey(ByteString key) {
TiRegion region = cache.getRegionByKey(key);
if (region == null) {
throw new TiClientInternalException("Region not exist for key:" + formatBytes(key));
}
if (!region.isValid()) {
throw new TiClientInternalException("Region invalid: " + region.toString());
}
Peer leader = region.getLeader();
long storeId = leader.getStoreId();
return Pair.create(region, cache.getStoreById(storeId));
}
public Pair<TiRegion, Store> getRegionStorePairByRegionId(long id) {
TiRegion region = cache.getRegionById(id);
if (!region.isValid()) {
throw new TiClientInternalException("Region invalid: " + region.toString());
}
Peer leader = region.getLeader();
long storeId = leader.getStoreId();
return Pair.create(region, cache.getStoreById(storeId));
}
public Store getStoreById(long id) {
return cache.getStoreById(id);
}
public void onRegionStale(long regionId) {
cache.invalidateRegion(regionId);
}
public boolean updateLeader(long regionId, long storeId) {
TiRegion r = cache.regionCache.get(regionId);
if (r != null) {
if (!r.switchPeer(storeId)) {
// failed to switch leader, possibly region is outdated, we need to drop region cache from
// regionCache
logger.warn("Cannot find peer when updating leader (" + regionId + "," + storeId + ")");
// drop region cache using verId
cache.invalidateRegion(regionId);
return false;
}
}
return true;
}
/**
* Clears all cache when a TiKV server does not respond
*
* @param regionId region's id
* @param storeId TiKV store's id
*/
public void onRequestFail(long regionId, long storeId) {
cache.invalidateRegion(regionId);
cache.invalidateAllRegionForStore(storeId);
}
public void invalidateStore(long storeId) {
cache.invalidateStore(storeId);
}
public void invalidateRegion(long regionId) {
cache.invalidateRegion(regionId);
}
}
| 1 | 9,206 | can you give a reason why session should be contained in RegionManager? | pingcap-tispark | java |
@@ -37,6 +37,16 @@ func TestAuthenticate_WhenSignatureIsCorrect(t *testing.T) {
assert.Exactly(t, originalSignerID, signerID, "Original signer should be extracted")
}
+func TestAuthenticate_WhenBase64MessageSignatureIsCorrect(t *testing.T) {
+ message := []byte("MystVpnSessionId:Boop!")
+ signature := SignatureBase64("V6ifmvLuAT+hbtLBX/0xm3C0afywxTIdw1HqLmA4onpwmibHbxVhl50Gr3aRUZMqw1WxkfSIVdhpbCluHGBKsgE=")
+
+ extractor := &extractor{}
+ signerID, err := extractor.Extract(message, signature)
+ assert.NoError(t, err)
+ assert.Exactly(t, originalSignerID, signerID, "Original signer should be extracted")
+}
+
func TestAuthenticate_WhenSignatureIsEmpty(t *testing.T) {
message := []byte("Boop!")
signature := SignatureHex("") | 1 | /*
* Copyright (C) 2017 The "MysteriumNetwork/node" Authors.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package identity
import (
"github.com/stretchr/testify/assert"
"testing"
)
var (
originalSignerID = FromAddress("0x53a835143c0ef3bbcbfa796d7eb738ca7dd28f68")
hijackedSignerID = FromAddress("0xded9913d38bfe94845b9e21fd32f43d0240e2f34")
)
func TestAuthenticate_WhenSignatureIsCorrect(t *testing.T) {
message := []byte("Boop!")
signature := SignatureHex("1f89542f406b2d638fe09cd9912d0b8c0b5ebb4aef67d52ab046973e34fb430a1953576cd19d140eddb099aea34b2985fbd99e716d3b2f96a964141fdb84b32000")
extractor := &extractor{}
signerID, err := extractor.Extract(message, signature)
assert.NoError(t, err)
assert.Exactly(t, originalSignerID, signerID, "Original signer should be extracted")
}
func TestAuthenticate_WhenSignatureIsEmpty(t *testing.T) {
message := []byte("Boop!")
signature := SignatureHex("")
extractor := &extractor{}
signerID, err := extractor.Extract(message, signature)
assert.EqualError(t, err, "empty signature")
assert.Exactly(t, Identity{}, signerID)
}
func TestAuthenticate_WhenSignatureIsMalformed(t *testing.T) {
message := []byte("Boop!")
signature := SignatureHex("7369676e6564")
extractor := &extractor{}
signerID, err := extractor.Extract(message, signature)
assert.EqualError(t, err, "invalid signature length")
assert.Exactly(t, Identity{}, signerID)
}
func TestAuthenticate_WhenMessageIsChanged(t *testing.T) {
message := []byte("Boop changed!")
signature := SignatureHex("1f89542f406b2d638fe09cd9912d0b8c0b5ebb4aef67d52ab046973e34fb430a1953576cd19d140eddb099aea34b2985fbd99e716d3b2f96a964141fdb84b32000")
extractor := &extractor{}
signerID, err := extractor.Extract(message, signature)
assert.NoError(t, err)
assert.NotEqual(t, originalSignerID, signerID, "Original signer should not be extracted")
assert.Exactly(t, hijackedSignerID, signerID, "Another signer extracted")
}
| 1 | 11,061 | You are comparing two signerIDs here, error message should be "Signers should match" | mysteriumnetwork-node | go |
@@ -57,6 +57,15 @@ type IAMRoleCredentials struct {
type TaskIAMRoleCredentials struct {
ARN string
IAMRoleCredentials IAMRoleCredentials
+ lock sync.RWMutex
+}
+
+// GetIAMRoleCredentials returns the IAM role credentials in the task IAM role struct
+func (role *TaskIAMRoleCredentials) GetIAMRoleCredentials() IAMRoleCredentials {
+ role.lock.RLock()
+ defer role.lock.RUnlock()
+
+ return role.IAMRoleCredentials
}
// GenerateCredentialsEndpointRelativeURI generates the relative URI for the | 1 | // Copyright 2014-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package credentials
import (
"fmt"
"sync"
"github.com/aws/amazon-ecs-agent/agent/acs/model/ecsacs"
"github.com/aws/aws-sdk-go/aws"
)
const (
// CredentialsIDQueryParameterName is the name of GET query parameter for the task ID.
CredentialsIDQueryParameterName = "id"
// CredentialsPath is the path to the credentials handler.
CredentialsPath = V2CredentialsPath
V1CredentialsPath = "/v1/credentials"
V2CredentialsPath = "/v2/credentials"
// credentialsEndpointRelativeURIFormat defines the relative URI format
// for the credentials endpoint. The place holders are the API Path and
// credentials ID
credentialsEndpointRelativeURIFormat = v2CredentialsEndpointRelativeURIFormat
v1CredentialsEndpointRelativeURIFormat = "%s?" + CredentialsIDQueryParameterName + "=%s"
v2CredentialsEndpointRelativeURIFormat = "%s/%s"
)
// IAMRoleCredentials is used to save credentials sent by ACS
type IAMRoleCredentials struct {
CredentialsID string `json:"-"`
RoleArn string `json:"RoleArn"`
AccessKeyID string `json:"AccessKeyId"`
SecretAccessKey string `json:"SecretAccessKey"`
SessionToken string `json:"Token"`
// Expiration is a string instead of a timestamp. This is to avoid any loss of context
// while marshalling/unmarshalling this field in the agent. The agent just echo's
// whatever is sent by the backend.
Expiration string `json:"Expiration"`
}
// TaskIAMRoleCredentials wraps the task arn and the credentials object for the same
type TaskIAMRoleCredentials struct {
ARN string
IAMRoleCredentials IAMRoleCredentials
}
// GenerateCredentialsEndpointRelativeURI generates the relative URI for the
// credentials endpoint, for a given task id.
func (roleCredentials *IAMRoleCredentials) GenerateCredentialsEndpointRelativeURI() string {
return fmt.Sprintf(credentialsEndpointRelativeURIFormat, CredentialsPath, roleCredentials.CredentialsID)
}
// credentialsManager implements the Manager interface. It is used to
// save credentials sent from ACS and to retrieve credentials from
// the credentials endpoint
type credentialsManager struct {
// idToTaskCredentials maps credentials id to its corresponding TaskIAMRoleCredentials object
idToTaskCredentials map[string]*TaskIAMRoleCredentials
taskCredentialsLock sync.RWMutex
}
// IAMRoleCredentialsFromACS translates ecsacs.IAMRoleCredentials object to
// api.IAMRoleCredentials
func IAMRoleCredentialsFromACS(roleCredentials *ecsacs.IAMRoleCredentials) IAMRoleCredentials {
return IAMRoleCredentials{
CredentialsID: aws.StringValue(roleCredentials.CredentialsId),
SessionToken: aws.StringValue(roleCredentials.SessionToken),
RoleArn: aws.StringValue(roleCredentials.RoleArn),
AccessKeyID: aws.StringValue(roleCredentials.AccessKeyId),
SecretAccessKey: aws.StringValue(roleCredentials.SecretAccessKey),
Expiration: aws.StringValue(roleCredentials.Expiration),
}
}
// NewManager creates a new credentials manager object
func NewManager() Manager {
return &credentialsManager{
idToTaskCredentials: make(map[string]*TaskIAMRoleCredentials),
}
}
// SetTaskCredentials adds or updates credentials in the credentials manager
func (manager *credentialsManager) SetTaskCredentials(taskCredentials TaskIAMRoleCredentials) error {
manager.taskCredentialsLock.Lock()
defer manager.taskCredentialsLock.Unlock()
credentials := taskCredentials.IAMRoleCredentials
// Validate that credentials id is not empty
if credentials.CredentialsID == "" {
return fmt.Errorf("CredentialsId is empty")
}
// Validate that task arn is not empty
if taskCredentials.ARN == "" {
return fmt.Errorf("task ARN is empty")
}
// Check if credentials exists for the given credentials id
taskCredentialsInMap, ok := manager.idToTaskCredentials[credentials.CredentialsID]
if !ok {
// No existing credentials, create a new one
taskCredentialsInMap = &TaskIAMRoleCredentials{}
}
*taskCredentialsInMap = taskCredentials
manager.idToTaskCredentials[credentials.CredentialsID] = taskCredentialsInMap
return nil
}
// GetTaskCredentials retrieves credentials for a given credentials id
func (manager *credentialsManager) GetTaskCredentials(id string) (TaskIAMRoleCredentials, bool) {
manager.taskCredentialsLock.RLock()
defer manager.taskCredentialsLock.RUnlock()
taskCredentials, ok := manager.idToTaskCredentials[id]
if !ok {
return TaskIAMRoleCredentials{}, ok
}
return *taskCredentials, ok
}
// RemoveCredentials removes credentials from the credentials manager
func (manager *credentialsManager) RemoveCredentials(id string) {
manager.taskCredentialsLock.Lock()
defer manager.taskCredentialsLock.Unlock()
delete(manager.idToTaskCredentials, id)
}
| 1 | 16,456 | Who's acquiring this lock for writes? | aws-amazon-ecs-agent | go |
@@ -248,10 +248,10 @@ PublicKey.fromX = function(odd, x) {
* @param {String} [compressed] - If the public key is compressed
* @returns {null|Error} An error if exists
*/
-PublicKey.getValidationError = function(data, compressed) {
+PublicKey.getValidationError = function(data) {
var error;
try {
- new PublicKey(data, compressed);
+ new PublicKey(data);
} catch (e) {
error = e;
} | 1 | 'use strict';
var Point = require('./crypto/point');
var BN = require('./crypto/bn');
var Address = require('./address');
/**
*
* Instantiate a PublicKey from a 'PrivateKey', 'Point', 'string', 'Buffer'.
*
* @example
*
* // instantiate from a private key
* var key = PublicKey(privateKey, true);
*
* // export to as a DER hex encoded string
* var exported = key.toString();
*
* // import the public key
* var imported = PublicKey.fromString(exported);
*
* @param {String} data - The encoded data in various formats
* @param {String} [compressed] - If the public key is compressed
* @returns {PublicKey} A new valid instance of an PublicKey
* @constructor
*/
var PublicKey = function PublicKey(data, compressed) {
if (!(this instanceof PublicKey)) {
return new PublicKey(data, compressed);
}
if (!data) {
throw new TypeError('First argument is required, please include public key data.');
}
var info = {
compressed: typeof(compressed) !== 'undefined' ? compressed : true
};
// detect type of data
if (data instanceof Point) {
info.point = data;
} else if (typeof(data) === 'string'){
info = PublicKey._transformDER(new Buffer(data, 'hex' ));
} else if (data instanceof Buffer || data instanceof Uint8Array){
info = PublicKey._transformDER(data);
} else if (data.constructor && (data.constructor.name &&
data.constructor.name === 'PrivateKey')) {
info = PublicKey._transformPrivateKey(data);
} else {
throw new TypeError('First argument is an unrecognized data format.');
}
// validation
info.point.validate();
Object.defineProperty(this, 'point', {
configurable: false,
value: info.point
});
Object.defineProperty(this, 'compressed', {
configurable: false,
value: info.compressed
});
return this;
};
/**
* Internal function to transform a private key into a public key point
*
* @param {PrivateKey} privkey - An instance of PrivateKey
* @returns {Object} An object with keys: point and compressed
* @private
*/
PublicKey._transformPrivateKey = function(privkey) {
var info = {};
if (!privkey.constructor ||
(privkey.constructor.name && privkey.constructor.name !== 'PrivateKey')) {
throw new TypeError('Must be an instance of PrivateKey');
}
info.point = Point.getG().mul(privkey.bn);
info.compressed = privkey.compressed;
return info;
};
/**
* Internal function to transform DER into a public key point
*
* @param {Buffer} buf - An hex encoded buffer
* @returns {Object} An object with keys: point and compressed
* @private
*/
PublicKey._transformDER = function(buf){
var info = {};
if (!(buf instanceof Buffer) && !(buf instanceof Uint8Array)){
throw new TypeError('Must be a hex buffer of DER encoded public key');
}
var x;
var y;
var xbuf;
var ybuf;
if (buf[0] === 0x04) {
xbuf = buf.slice(1, 33);
ybuf = buf.slice(33, 65);
if (xbuf.length !== 32 || ybuf.length !== 32 || buf.length !== 65) {
throw new TypeError('Length of x and y must be 32 bytes');
}
x = BN(xbuf);
y = BN(ybuf);
info.point = Point(x, y);
info.compressed = false;
} else if (buf[0] === 0x03) {
xbuf = buf.slice(1);
x = BN(xbuf);
info = PublicKey._transformX(true, x);
info.compressed = true;
} else if (buf[0] == 0x02) {
xbuf = buf.slice(1);
x = BN(xbuf);
info = PublicKey._transformX(false, x);
info.compressed = true;
} else {
throw new TypeError('Invalid DER format public key');
}
return info;
};
/**
*
* Internal function to transform X into a public key point
*
* @param {Boolean} odd - If the point is above or below the x axis
* @param {Point} x - The x point
* @returns {Object} An object with keys: point and compressed
* @private
*/
PublicKey._transformX = function(odd, x){
var info = {};
if (typeof odd !== 'boolean') {
throw new TypeError('Must specify whether y is odd or not (true or false)');
}
info.point = Point.fromX(odd, x);
return info;
};
/**
*
* Instantiate a PublicKey from JSON
*
* @param {String} json - A JSON string of DER encoded public key
* @returns {PublicKey} A new valid instance of PublicKey
*/
PublicKey.fromJSON = function(json) {
var buf = new Buffer(json, 'hex');
var info = PublicKey._transformDER(buf);
return new PublicKey(info.point, info.compressed);
};
/**
*
* Instantiate a PublicKey from a PrivateKey
*
* @param {PrivateKey} privkey - An instance of PrivateKey
* @returns {PublicKey} A new valid instance of PublicKey
*/
PublicKey.fromPrivateKey = function(privkey) {
var info = PublicKey._transformPrivateKey(privkey);
return new PublicKey(info.point, info.compressed);
};
/**
*
* Instantiate a PublicKey from a Buffer
*
* @param {Buffer} buf - A DER hex buffer
* @returns {PublicKey} A new valid instance of PublicKey
*/
PublicKey.fromBuffer = function(buf) {
var info = PublicKey._transformDER(buf);
return new PublicKey(info.point, info.compressed);
};
/**
*
* Instantiate a PublicKey from a Point
*
* @param {Point} point - A Point instance
* @returns {PublicKey} A new valid instance of PublicKey
*/
PublicKey.fromPoint = function(point, compressed){
if (!(point instanceof Point)) {
throw new TypeError('First argument must be an instance of Point.');
}
return new PublicKey(point, compressed);
};
/**
*
* Instantiate a PublicKey from a DER Buffer
*
* @param {Buffer} buf - A DER Buffer
* @returns {PublicKey} A new valid instance of PublicKey
*/
PublicKey.fromDER = function(buf) {
var info = PublicKey._transformDER(buf);
return new PublicKey(info.point, info.compressed);
};
/**
*
* Instantiate a PublicKey from a DER hex encoded string
*
* @param {String} str - A DER hex string
* @param {String} [encoding] - The type of string encoding
* @returns {PublicKey} A new valid instance of PublicKey
*/
PublicKey.fromString = function(str, encoding) {
var buf = new Buffer(str, encoding || 'hex');
var info = PublicKey._transformDER(buf);
return new PublicKey(info.point, info.compressed);
};
/**
*
* Instantiate a PublicKey from an X Point
*
* @param {Boolean} odd - If the point is above or below the x axis
* @param {Point} x - The x point
* @returns {PublicKey} A new valid instance of PublicKey
*/
PublicKey.fromX = function(odd, x) {
var info = PublicKey._transformX(odd, x);
return new PublicKey(info.point, info.compressed);
};
/**
*
* Check if there would be any errors when initializing a PublicKey
*
* @param {String} data - The encoded data in various formats
* @param {String} [compressed] - If the public key is compressed
* @returns {null|Error} An error if exists
*/
PublicKey.getValidationError = function(data, compressed) {
var error;
try {
new PublicKey(data, compressed);
} catch (e) {
error = e;
}
return error;
};
/**
*
* Check if the parameters are valid
*
* @param {String} data - The encoded data in various formats
* @param {String} [compressed] - If the public key is compressed
* @returns {Boolean} If the public key would be valid
*/
PublicKey.isValid = function(data, compressed) {
return !PublicKey.getValidationError(data, compressed);
};
/**
*
* Will output the PublicKey to JSON
*
* @returns {String} A hex encoded string
*/
PublicKey.prototype.toJSON = function() {
return this.toBuffer().toString('hex');
};
/**
*
* Will output the PublicKey to a Buffer
*
* @returns {Buffer} A DER hex encoded buffer
*/
PublicKey.prototype.toBuffer = function() {
var compressed = typeof this.compressed === 'undefined' ? true : this.compressed;
return this.toDER(compressed);
};
/**
*
* Will output the PublicKey to a DER Buffer
*
* @returns {Buffer} A DER hex encoded buffer
*/
PublicKey.prototype.toDER = function(compressed) {
compressed = typeof(compressed) !== 'undefined' ? compressed : this.compressed;
if (typeof compressed !== 'boolean') {
throw new TypeError('Must specify whether the public key is compressed or not (true or false)');
}
var x = this.point.getX();
var y = this.point.getY();
var xbuf = x.toBuffer({size: 32});
var ybuf = y.toBuffer({size: 32});
var prefix;
if (!compressed) {
prefix = new Buffer([0x04]);
return Buffer.concat([prefix, xbuf, ybuf]);
} else {
var odd = ybuf[ybuf.length - 1] % 2;
if (odd) {
prefix = new Buffer([0x03]);
} else {
prefix = new Buffer([0x02]);
}
return Buffer.concat([prefix, xbuf]);
}
};
/**
*
* Will return an address for the public key
*
* @returns {Address} An address generated from the public key
*/
PublicKey.prototype.toAddress = function(network) {
return Address.fromPublicKey(this, network);
};
/**
*
* Will output the PublicKey to a DER encoded hex string
*
* @returns {String} A DER hex encoded string
*/
PublicKey.prototype.toString = function() {
var compressed = typeof this.compressed === 'undefined' ? true : this.compressed;
return this.toDER(compressed).toString('hex');
};
/**
*
* Will return a string formatted for the console
*
* @returns {String} Public key
*/
PublicKey.prototype.inspect = function() {
return '<PublicKey: ' + this.toString() + ', compressed: '+this.compressed+'>';
};
module.exports = PublicKey;
| 1 | 13,238 | This throws a linter error, don't use object creation for side effects. I feel like it's kinda messed up that the errors are thrown by the constructor and not that the constructor will throw them | bitpay-bitcore | js |
@@ -43,6 +43,18 @@ describe Trail do
end
end
+ describe ".by_topic" do
+ it "returns trails sorted by their topic's name" do
+ create(:trail, topic: create(:topic, name: "A"))
+ create(:trail, topic: create(:topic, name: "C"))
+ create(:trail, topic: create(:topic, name: "B"))
+
+ result = Trail.by_topic.map(&:topic_name)
+
+ expect(result).to eq %w(A B C)
+ end
+ end
+
describe ".completed_for" do
it "shows completed trails for a user" do
_incomplete = create(:trail) | 1 | require "rails_helper"
describe Trail do
it { should validate_presence_of(:name) }
it { should validate_presence_of(:description) }
it { should validate_presence_of(:topic) }
it { should belong_to(:topic) }
it { should have_many(:repositories).dependent(:destroy) }
it { should have_many(:statuses).dependent(:destroy) }
it { should have_many(:steps).dependent(:destroy) }
it { should have_many(:exercises).through(:steps) }
it { should have_many(:videos).through(:steps) }
describe ".most_recent_published" do
it "returns more recent trails first" do
create :trail, published: true, created_at: 2.day.ago, name: "two"
create :trail, published: true, created_at: 1.days.ago, name: "one"
create :trail, published: true, created_at: 3.days.ago, name: "three"
result = Trail.most_recent_published
expect(result.map(&:name)).to eq(%w(one two three))
end
it "only returns published trails" do
create :trail, published: true, name: "two"
create :trail, published: true, name: "one"
create :trail, published: false, name: "unpublished"
result = Trail.most_recent_published
expect(result.map(&:name)).to match_array(%w(one two))
end
end
context "self.published" do
it "returns published trails" do
_unpublished = create(:trail, published: false)
published = create(:trail, published: true)
expect(Trail.published).to eq([published])
end
end
describe ".completed_for" do
it "shows completed trails for a user" do
_incomplete = create(:trail)
completed = create(:trail)
user = create(:user)
create(
:status,
completeable: completed,
user: user,
state: Status::COMPLETE
)
result = Trail.completed_for(user)
expect(result).to match_array([completed])
end
end
describe "#steps_remaining_for" do
it "returns the number of exercises the user hasn't completed" do
user = create(:user)
other_user = create(:user)
exercises = create_list(:exercise, 3)
videos = create_list(:video, 2)
trail = create(:trail, exercises: exercises, videos: videos)
exercises.first.statuses.create!(user: user, state: Status::COMPLETE)
exercises.second.statuses.create!(user: user, state: Status::IN_PROGRESS)
exercises.first.statuses.create!(
user: other_user,
state: Status::COMPLETE
)
videos.first.statuses.create!(user: user, state: Status::COMPLETE)
videos.second.statuses.create!(user: user, state: Status::IN_PROGRESS)
videos.second.statuses.create!(user: other_user, state: Status::COMPLETE)
result = trail.steps_remaining_for(user)
expect(result).to eq(3)
end
it "returns the total number of steps for a user who hasn't started" do
user = create(:user)
exercises = create_list(:exercise, 2)
trail = create(:trail, exercises: exercises)
result = trail.steps_remaining_for(user)
expect(result).to eq(2)
end
end
describe "#find" do
it "finds its to_param value" do
trail = create(:trail)
result = Trail.find(trail.to_param)
expect(result).to eq(trail)
end
end
describe "#to_param" do
it "returns a value based on its name" do
trail = create(:trail, name: "Example Trail")
result = trail.to_param
expect(result).to eq("example-trail")
end
end
describe "#update_state_for" do
it "updates the status of an unstarted trail" do
user = create(:user)
exercise = create(:exercise)
trail = create(:trail, exercises: [exercise])
trail.update_state_for(user)
trail_status = trail.statuses.where(user: user).most_recent
expect(trail_status).to be_nil
end
it "updates the status of an in-progress trail" do
user = create(:user)
exercise = create(:exercise)
create(
:status,
completeable: exercise,
user: user,
state: Status::IN_PROGRESS
)
trail = create(:trail, exercises: [exercise])
trail.update_state_for(user)
trail_state = trail.statuses.where(user: user).most_recent.state
expect(trail_state).to eq Status::IN_PROGRESS
end
it "updates the status of a complete trail" do
user = create(:user)
trail = trail_with_exercise_states(user, Status::COMPLETE, nil)
exercise = trail.exercises.last
create(
:status,
completeable: exercise,
user: user,
state: Status::COMPLETE
)
trail.update_state_for(user)
trail_state = trail.statuses.where(user: user).most_recent.state
expect(trail_state).to eq Status::COMPLETE
end
end
describe "#exercises" do
it "should be in order of the step position" do
trail = create(:trail)
second_step = create(:step, trail: trail, position: 2)
first_step = create(:step, trail: trail, position: 1)
expect(trail.exercises).
to eq([first_step.completeable, second_step.completeable])
end
end
describe "#completeables" do
it "should return all videos and exercises for that trail" do
trail = create(:trail)
video = create(:video)
exercise = create(:exercise)
create(:step, completeable: video, trail: trail)
create(:step, completeable: exercise, trail: trail)
expect(trail.completeables).to eq [video, exercise]
end
end
describe "#step_ids=" do
it "should preserve ordering" do
trail = create(:trail)
steps = create_list(:step, 3, trail: trail)
expect(trail.steps(true).map(&:id)).to eq(steps.map(&:id))
trail.step_ids = [steps[0], steps[2], steps[1]].map(&:id)
trail.save!
expect(trail.steps(true).map(&:id)).to eq(
[steps[0], steps[2], steps[1]].map(&:id)
)
end
end
describe "#teachers" do
it "returns unique teachers from its video steps" do
only_first = create(:user, name: "only_first")
only_second = create(:user, name: "only_second")
both = create(:user, name: "both")
trail = create(:trail)
other_trail_teacher = create(:user, name: "other_trail")
other_trail = create(:trail)
create(
:step,
trail: trail,
completeable: create(
:video,
teachers: [teacher(both), teacher(only_first)]
)
)
create(
:step,
trail: trail,
completeable: create(
:video,
teachers: [teacher(both), teacher(only_second)]
)
)
create(
:step,
trail: other_trail,
completeable: create(
:video,
teachers: [teacher(other_trail_teacher)]
)
)
result = trail.teachers
expect(result.map(&:name)).to match_array(%w(only_first only_second both))
end
def teacher(user)
Teacher.create!(user: user)
end
end
describe "#included_in_plan?" do
context "for a plan with trails" do
it "returns true" do
plan = build_stubbed(:plan, includes_trails: true)
trail = build_stubbed(:trail)
expect(trail).to be_included_in_plan(plan)
end
end
context "for a plan without trails" do
it "returns false" do
plan = build_stubbed(:plan, includes_trails: false)
trail = build_stubbed(:trail)
expect(trail).not_to be_included_in_plan(plan)
end
end
end
def trail_with_exercise_states(user, *states)
exercises =
states.map { |state| create_exercise_with_state(state, user: user) }
create(:trail, exercises: exercises)
end
def create_exercise_with_state(state, user:)
create(:exercise).tap do |exercise|
if state.present?
exercise.statuses.create!(user: user, state: state)
end
end
end
end
| 1 | 14,959 | Should there be a `trail.topic_name` method so we don't annoy Demeter? | thoughtbot-upcase | rb |
@@ -281,7 +281,7 @@ def _file_to_data_frame(ext, path, target, handler_args):
return dataframe
-def build_package(username, package, yaml_path, checks_path=None, dry_run=False, env='default'):
+def build_package(team, username, package, yaml_path, checks_path=None, dry_run=False, env='default'):
"""
Builds a package from a given Yaml file and installs it locally.
| 1 | """
parse build file, serialize package
"""
from collections import defaultdict, Iterable
import importlib
import json
from types import ModuleType
import os
import re
from pandas.errors import ParserError
from six import iteritems, itervalues, string_types
import yaml
from tqdm import tqdm
from .const import DEFAULT_BUILDFILE, PACKAGE_DIR_NAME, PARSERS, RESERVED
from .core import PackageFormat, BuildException, exec_yaml_python, load_yaml
from .hashing import digest_file, digest_string
from .package import Package, ParquetLib
from .store import PackageStore, VALID_NAME_RE, StoreException
from .util import FileWithReadProgress
from . import check_functions as qc # pylint:disable=W0611
def _have_pyspark():
"""
Check if we're running Pyspark
"""
if _have_pyspark.flag is None:
try:
if Package.get_parquet_lib() is ParquetLib.SPARK:
import pyspark # pylint:disable=W0612
_have_pyspark.flag = True
else:
_have_pyspark.flag = False
except ImportError:
_have_pyspark.flag = False
return _have_pyspark.flag
_have_pyspark.flag = None
def _path_hash(path, transform, kwargs):
"""
Generate a hash of source file path + transform + args
"""
sortedargs = ["%s:%r:%s" % (key, value, type(value))
for key, value in sorted(iteritems(kwargs))]
srcinfo = "{path}:{transform}:{{{kwargs}}}".format(path=os.path.abspath(path),
transform=transform,
kwargs=",".join(sortedargs))
return digest_string(srcinfo)
def _is_internal_node(node):
is_leaf = not node or node.get(RESERVED['file'])
return not is_leaf
def _pythonize_name(name):
safename = re.sub('[^A-Za-z0-9]+', '_', name).strip('_')
if safename and safename[0].isdigit():
safename = "n%s" % safename
if not VALID_NAME_RE.match(safename):
raise BuildException("Unable to determine a Python-legal name for %r" % name)
return safename
def _run_checks(dataframe, checks, checks_contents, nodename, rel_path, target, env='default'):
_ = env # TODO: env support for checks
print("Running data integrity checks...")
checks_list = re.split(r'[,\s]+', checks.strip())
unknown_checks = set(checks_list) - set(checks_contents)
if unknown_checks:
raise BuildException("Unknown check(s) '%s' for %s @ %s" %
(", ".join(list(unknown_checks)), rel_path, target))
for check in checks_list:
res = exec_yaml_python(checks_contents[check], dataframe, nodename, rel_path, target)
if not res and res is not None:
raise BuildException("Data check failed: %s on %s @ %s" % (
check, rel_path, target))
def _build_node(build_dir, package, name, node, fmt, target='pandas', checks_contents=None,
dry_run=False, env='default', ancestor_args={}):
"""
Parameters
----------
ancestor_args : dict
any transform inherited from an ancestor
plus any inherited handler kwargs
Users can thus define kwargs that affect entire subtrees
(e.g. transform: csv for 500 .txt files)
and overriding of ancestor or peer values.
Child transform or kwargs override ancestor k:v pairs.
"""
if _is_internal_node(node):
# NOTE: YAML parsing does not guarantee key order
# fetch local transform and kwargs values; we do it using ifs
# to prevent `key: None` from polluting the update
local_args = {}
if node.get(RESERVED['transform']):
local_args[RESERVED['transform']] = node.get(RESERVED['transform'])
if node.get(RESERVED['kwargs']):
local_args[RESERVED['kwargs']] = node.get(RESERVED['kwargs'])
group_args = ancestor_args.copy()
group_args.update({ k: v for k, v in iteritems(local_args) })
# if it's not a reserved word it's a group that we can descend
groups = { k: v for k, v in iteritems(node) if k not in RESERVED }
for child_name, child_table in groups.items():
if not isinstance(child_name, str) or not VALID_NAME_RE.match(child_name):
raise StoreException("Invalid node name: %r" % child_name)
_build_node(build_dir, package, name + '/' + child_name, child_table, fmt,
checks_contents=checks_contents, dry_run=dry_run, env=env, ancestor_args=group_args)
else: # leaf node
# handle group leaf nodes (empty groups)
if not node:
if not dry_run:
package.save_group(name)
return
# handle remaining leaf nodes types
rel_path = node.get(RESERVED['file'])
if not rel_path:
raise BuildException("Leaf nodes must define a %s key" % RESERVED['file'])
path = os.path.join(build_dir, rel_path)
# get either the locally defined transform or inherit from an ancestor
transform = node.get(RESERVED['transform']) or ancestor_args.get(RESERVED['transform'])
ID = 'id' # pylint:disable=C0103
if transform:
transform = transform.lower()
if (transform not in PARSERS) and (transform != ID):
raise BuildException("Unknown transform '%s' for %s @ %s" %
(transform, rel_path, target))
else: # guess transform if user doesn't provide one
_, ext = splitext_no_dot(rel_path)
transform = ext
if transform not in PARSERS:
transform = ID
print("Inferring 'transform: %s' for %s" % (transform, rel_path))
# TODO: parse/check environments:
# environments = node.get(RESERVED['environments'])
checks = node.get(RESERVED['checks'])
if transform == ID:
#TODO move this to a separate function
if checks:
with open(path, 'r') as fd:
data = fd.read()
_run_checks(data, checks, checks_contents, name, rel_path, target, env=env)
if not dry_run:
print("Copying %s..." % path)
package.save_file(path, name, rel_path)
else:
# copy so we don't modify shared ancestor_args
handler_args = dict(ancestor_args.get(RESERVED['kwargs'], {}))
# local kwargs win the update
handler_args.update(node.get(RESERVED['kwargs'], {}))
# Check Cache
store = PackageStore()
path_hash = _path_hash(path, transform, handler_args)
source_hash = digest_file(path)
cachedobjs = []
if os.path.exists(store.cache_path(path_hash)):
with open(store.cache_path(path_hash), 'r') as entry:
cache_entry = json.load(entry)
if cache_entry['source_hash'] == source_hash:
cachedobjs = cache_entry['obj_hashes']
assert isinstance(cachedobjs, list)
# Check to see that cached objects actually exist in the store
if cachedobjs and all(os.path.exists(store.object_path(obj)) for obj in cachedobjs):
# Use existing objects instead of rebuilding
package.save_cached_df(cachedobjs, name, rel_path, transform, target, fmt)
else:
# read source file into DataFrame
print("Serializing %s..." % path)
if _have_pyspark():
dataframe = _file_to_spark_data_frame(transform, path, target, handler_args)
else:
dataframe = _file_to_data_frame(transform, path, target, handler_args)
if checks:
# TODO: test that design works for internal nodes... e.g. iterating
# over the children and getting/checking the data, err msgs, etc.
_run_checks(dataframe, checks, checks_contents, name, rel_path, target, env=env)
# serialize DataFrame to file(s)
if not dry_run:
print("Saving as binary dataframe...")
obj_hashes = package.save_df(dataframe, name, rel_path, transform, target, fmt)
# Add to cache
cache_entry = dict(
source_hash=source_hash,
obj_hashes=obj_hashes
)
with open(store.cache_path(path_hash), 'w') as entry:
json.dump(cache_entry, entry)
def _remove_keywords(d):
"""
copy the dict, filter_keywords
Parameters
----------
d : dict
"""
return { k:v for k, v in iteritems(d) if k not in RESERVED }
def _file_to_spark_data_frame(ext, path, target, handler_args):
from pyspark import sql as sparksql
_ = target # TODO: why is this unused?
ext = ext.lower() # ensure that case doesn't matter
logic = PARSERS.get(ext)
kwargs = dict(logic['kwargs'])
kwargs.update(handler_args)
spark = sparksql.SparkSession.builder.getOrCreate()
dataframe = None
reader = None
# FIXME: Add json support?
if logic['attr'] == "read_csv":
sep = kwargs.get('sep')
reader = spark.read.format("csv").option("header", "true")
if sep:
reader = reader.option("delimiter", sep)
dataframe = reader.load(path)
for col in dataframe.columns:
pcol = _pythonize_name(col)
if col != pcol:
dataframe = dataframe.withColumnRenamed(col, pcol)
else:
dataframe = _file_to_data_frame(ext, path, target, handler_args)
return dataframe
def _file_to_data_frame(ext, path, target, handler_args):
_ = target # TODO: why is this unused?
logic = PARSERS.get(ext)
the_module = importlib.import_module(logic['module'])
if not isinstance(the_module, ModuleType):
raise BuildException("Missing required module: %s." % logic['module'])
# allow user to specify handler kwargs and override default kwargs
kwargs = logic['kwargs'].copy()
kwargs.update(handler_args)
failover = logic.get('failover', None)
handler = getattr(the_module, logic['attr'], None)
if handler is None:
raise BuildException("Invalid handler: %r" % logic['attr'])
dataframe = None
try_again = False
try:
size = os.path.getsize(path)
with tqdm(total=size, unit='B', unit_scale=True) as progress:
def _callback(count):
progress.update(count)
with FileWithReadProgress(path, _callback) as fd:
dataframe = handler(fd, **kwargs)
except (UnicodeDecodeError, ParserError) as error:
if failover:
warning = "Warning: failed fast parse on input %s.\n" % path
warning += "Switching to Python engine."
print(warning)
try_again = True
else:
raise error
except ValueError as error:
raise BuildException(str(error))
if try_again:
failover_args = {}
failover_args.update(failover)
failover_args.update(kwargs)
dataframe = handler(path, **failover_args)
# cast object columns to strings
# TODO does pyarrow finally support objects?
for name, col in dataframe.iteritems():
if col.dtype == 'object':
dataframe[name] = col.astype(str)
return dataframe
def build_package(username, package, yaml_path, checks_path=None, dry_run=False, env='default'):
"""
Builds a package from a given Yaml file and installs it locally.
Returns the name of the package.
"""
def find(key, value):
"""
find matching nodes recursively;
only descend iterables that aren't strings
"""
if isinstance(value, Iterable) and not isinstance(value, string_types):
for k, v in iteritems(value):
if k == key:
yield v
elif isinstance(v, dict):
for result in find(key, v):
yield result
elif isinstance(v, list):
for item in v:
for result in find(key, item):
yield result
build_data = load_yaml(yaml_path)
# default to 'checks.yml' if build.yml contents: contains checks, but
# there's no inlined checks: defined by build.yml
if (checks_path is None and list(find('checks', build_data['contents'])) and
'checks' not in build_data):
checks_path = 'checks.yml'
checks_contents = load_yaml(checks_path, optional=True)
elif checks_path is not None:
checks_contents = load_yaml(checks_path)
else:
checks_contents = None
build_package_from_contents(username, package, os.path.dirname(yaml_path), build_data,
checks_contents=checks_contents, dry_run=dry_run, env=env)
def build_package_from_contents(username, package, build_dir, build_data,
checks_contents=None, dry_run=False, env='default'):
contents = build_data.get('contents', {})
if not isinstance(contents, dict):
raise BuildException("'contents' must be a dictionary")
pkgformat = build_data.get('format', PackageFormat.default.value)
if not isinstance(pkgformat, str):
raise BuildException("'format' must be a string")
try:
pkgformat = PackageFormat(pkgformat)
except ValueError:
raise BuildException("Unsupported format: %r" % pkgformat)
# HDF5 no longer supported.
if pkgformat is PackageFormat.HDF5:
raise BuildException("HDF5 format is no longer supported; please use PARQUET instead.")
# inline checks take precedence
checks_contents = {} if checks_contents is None else checks_contents
checks_contents.update(build_data.get('checks', {}))
store = PackageStore()
newpackage = store.create_package(username, package, dry_run=dry_run)
_build_node(build_dir, newpackage, '', contents, pkgformat,
checks_contents=checks_contents, dry_run=dry_run, env=env)
if not dry_run:
newpackage.save_contents()
def splitext_no_dot(filename):
"""
Wrap os.path.splitext to return the name and the extension
without the '.' (e.g., csv instead of .csv)
"""
name, ext = os.path.splitext(filename)
ext = ext.lower()
return name, ext.strip('.')
def generate_contents(startpath, outfilename=DEFAULT_BUILDFILE):
"""
Generate a build file (yaml) based on the contents of a
directory tree.
"""
def _ignored_name(name):
return (
name.startswith('.') or
name == PACKAGE_DIR_NAME or
name.endswith('~') or
name == outfilename
)
def _generate_contents(dir_path):
safename_duplicates = defaultdict(list)
for name in os.listdir(dir_path):
if _ignored_name(name):
continue
path = os.path.join(dir_path, name)
if os.path.isdir(path):
nodename = name
ext = None
elif os.path.isfile(path):
nodename, ext = splitext_no_dot(name)
else:
continue
safename = _pythonize_name(nodename)
safename_duplicates[safename].append((name, nodename, ext))
safename_to_name = {}
for safename, duplicates in iteritems(safename_duplicates):
for name, nodename, ext in duplicates:
if len(duplicates) > 1 and ext:
new_safename = _pythonize_name(name) # Name with ext
else:
new_safename = safename
existing_name = safename_to_name.get(new_safename)
if existing_name is not None:
raise BuildException(
"Duplicate node names. %r was renamed to %r, which overlaps with %r" % (
name, new_safename, existing_name)
)
safename_to_name[new_safename] = name
contents = {}
for safename, name in iteritems(safename_to_name):
path = os.path.join(dir_path, name)
if os.path.isdir(path):
data = _generate_contents(path)
else:
rel_path = os.path.relpath(path, startpath)
data = dict(file=rel_path)
contents[safename] = data
return contents
return dict(
contents=_generate_contents(startpath)
)
def generate_build_file(startpath, outfilename=DEFAULT_BUILDFILE):
"""
Generate a build file (yaml) based on the contents of a
directory tree.
"""
buildfilepath = os.path.join(startpath, outfilename)
if os.path.exists(buildfilepath):
raise BuildException("Build file %s already exists." % buildfilepath)
contents = generate_contents(startpath, outfilename)
with open(buildfilepath, 'w') as outfile:
yaml.dump(contents, outfile, default_flow_style=False)
return buildfilepath
| 1 | 15,703 | instead of passing "None" it would be more readable to create a constant e.g. TEAM_PUBLIC = None and then call build_package(TEAM_PUBLIC, ...) | quiltdata-quilt | py |
@@ -9,6 +9,8 @@ gc_disable();
// show all errors
error_reporting(-1);
+require_once __DIR__ . '/Psalm/Internal/exception_handler.php';
+
$valid_short_options = [
'h',
'v', | 1 | <?php
require_once('command_functions.php');
use Psalm\Config;
use Psalm\Internal\Analyzer\ProjectAnalyzer;
gc_disable();
// show all errors
error_reporting(-1);
$valid_short_options = [
'h',
'v',
'c:',
'r:',
];
$valid_long_options = [
'clear-cache',
'config:',
'find-dead-code',
'help',
'root:',
'use-ini-defaults',
'version',
'tcp:',
'tcp-server',
'disable-on-change::',
'enable-autocomplete',
];
$args = array_slice($argv, 1);
$psalm_proxy = array_search('--language-server', $args, true);
if ($psalm_proxy !== false) {
unset($args[$psalm_proxy]);
}
array_map(
/**
* @param string $arg
*
* @return void
*/
function ($arg) use ($valid_long_options, $valid_short_options) {
if (substr($arg, 0, 2) === '--' && $arg !== '--') {
$arg_name = preg_replace('/=.*$/', '', substr($arg, 2));
if (!in_array($arg_name, $valid_long_options, true)
&& !in_array($arg_name . ':', $valid_long_options, true)
&& !in_array($arg_name . '::', $valid_long_options, true)
) {
fwrite(
STDERR,
'Unrecognised argument "--' . $arg_name . '"' . PHP_EOL
. 'Type --help to see a list of supported arguments' . PHP_EOL
);
error_log('Bad argument');
exit(1);
}
} elseif (substr($arg, 0, 2) === '-' && $arg !== '-' && $arg !== '--') {
$arg_name = preg_replace('/=.*$/', '', substr($arg, 1));
if (!in_array($arg_name, $valid_short_options, true)
&& !in_array($arg_name . ':', $valid_short_options, true)
) {
fwrite(
STDERR,
'Unrecognised argument "-' . $arg_name . '"' . PHP_EOL
. 'Type --help to see a list of supported arguments' . PHP_EOL
);
error_log('Bad argument');
exit(1);
}
}
},
$args
);
// get options from command line
$options = getopt(implode('', $valid_short_options), $valid_long_options);
if (!array_key_exists('use-ini-defaults', $options)) {
ini_set('display_errors', '1');
ini_set('display_startup_errors', '1');
ini_set('memory_limit', (string) (4 * 1024 * 1024 * 1024));
}
if (array_key_exists('help', $options)) {
$options['h'] = false;
}
if (array_key_exists('version', $options)) {
$options['v'] = false;
}
if (isset($options['config'])) {
$options['c'] = $options['config'];
}
if (isset($options['c']) && is_array($options['c'])) {
fwrite(STDERR, 'Too many config files provided' . PHP_EOL);
exit(1);
}
if (array_key_exists('h', $options)) {
echo <<<HELP
Usage:
psalm-language-server [options]
Options:
-h, --help
Display this help message
-v, --version
Display the Psalm version
-c, --config=psalm.xml
Path to a psalm.xml configuration file. Run psalm --init to create one.
-r, --root
If running Psalm globally you'll need to specify a project root. Defaults to cwd
--find-dead-code
Look for dead code
--clear-cache
Clears all cache files that the language server uses for this specific project
--use-ini-defaults
Use PHP-provided ini defaults for memory and error display
--tcp=url
Use TCP mode (by default Psalm uses STDIO)
--tcp-server
Use TCP in server mode (default is client)
--disable-on-change[=line-number-threshold]
If added, the language server will not respond to onChange events.
You can also specify a line count over which Psalm will not run on-change events.
--enable-autocomplete[=BOOL]
Enables or disables autocomplete on methods and properties. Default is true.
HELP;
exit;
}
if (getcwd() === false) {
fwrite(STDERR, 'Cannot get current working directory' . PHP_EOL);
exit(1);
}
if (isset($options['root'])) {
$options['r'] = $options['root'];
}
$current_dir = (string)getcwd() . DIRECTORY_SEPARATOR;
if (isset($options['r']) && is_string($options['r'])) {
$root_path = realpath($options['r']);
if (!$root_path) {
fwrite(
STDERR,
'Could not locate root directory ' . $current_dir . DIRECTORY_SEPARATOR . $options['r'] . PHP_EOL
);
exit(1);
}
$current_dir = $root_path . DIRECTORY_SEPARATOR;
}
$vendor_dir = getVendorDir($current_dir);
$first_autoloader = requireAutoloaders($current_dir, isset($options['r']), $vendor_dir);
if (array_key_exists('v', $options)) {
echo 'Psalm ' . PSALM_VERSION . PHP_EOL;
exit;
}
if (ini_get('pcre.jit') === '1'
&& PHP_OS === 'Darwin'
&& version_compare(PHP_VERSION, '7.3.0') >= 0
&& version_compare(PHP_VERSION, '7.4.0') < 0
) {
die(\Psalm\Internal\Fork\Pool::MAC_PCRE_MESSAGE . PHP_EOL . PHP_EOL);
}
$ini_handler = new \Psalm\Internal\Fork\PsalmRestarter('PSALM');
$ini_handler->disableExtension('grpc');
// If Xdebug is enabled, restart without it
$ini_handler->check();
setlocale(LC_CTYPE, 'C');
$path_to_config = get_path_to_config($options);
if (isset($options['tcp'])) {
if (!is_string($options['tcp'])) {
fwrite(STDERR, 'tcp url should be a string' . PHP_EOL);
exit(1);
}
}
$find_dead_code = isset($options['find-dead-code']);
$config = initialiseConfig($path_to_config, $current_dir, \Psalm\Report::TYPE_CONSOLE, $first_autoloader);
if ($config->resolve_from_config_file) {
$current_dir = $config->base_dir;
chdir($current_dir);
}
$config->setServerMode();
if (isset($options['clear-cache'])) {
$cache_directory = $config->getCacheDirectory();
Config::removeCacheDirectory($cache_directory);
echo 'Cache directory deleted' . PHP_EOL;
exit;
}
$providers = new Psalm\Internal\Provider\Providers(
new Psalm\Internal\Provider\FileProvider,
new Psalm\Internal\Provider\ParserCacheProvider($config),
new Psalm\Internal\Provider\FileStorageCacheProvider($config),
new Psalm\Internal\Provider\ClassLikeStorageCacheProvider($config),
new Psalm\Internal\Provider\FileReferenceCacheProvider($config)
);
$project_analyzer = new ProjectAnalyzer(
$config,
$providers
);
if (isset($options['disable-on-change'])) {
$project_analyzer->onchange_line_limit = (int) $options['disable-on-change'];
}
$project_analyzer->provide_completion = !isset($options['enable-autocomplete'])
|| !is_string($options['enable-autocomplete'])
|| strtolower($options['enable-autocomplete']) !== 'false';
$config->visitComposerAutoloadFiles($project_analyzer);
if ($find_dead_code) {
$project_analyzer->getCodebase()->reportUnusedCode();
}
$project_analyzer->server($options['tcp'] ?? null, isset($options['tcp-server']) ? true : false);
| 1 | 8,063 | Would it make sense to include this file via composer files-autoloading instead? | vimeo-psalm | php |
@@ -348,10 +348,12 @@ public class ExpectedConditions {
final String text) {
return new ExpectedCondition<Boolean>() {
+ private String elementText = null;
+
@Override
public Boolean apply(WebDriver driver) {
try {
- String elementText = element.getText();
+ elementText = element.getText();
return elementText.contains(text);
} catch (StaleElementReferenceException e) {
return null; | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.support.ui;
import com.google.common.base.Joiner;
import org.openqa.selenium.Alert;
import org.openqa.selenium.By;
import org.openqa.selenium.JavascriptExecutor;
import org.openqa.selenium.NoAlertPresentException;
import org.openqa.selenium.NoSuchElementException;
import org.openqa.selenium.NoSuchFrameException;
import org.openqa.selenium.StaleElementReferenceException;
import org.openqa.selenium.WebDriver;
import org.openqa.selenium.WebDriverException;
import org.openqa.selenium.WebElement;
import java.util.Arrays;
import java.util.List;
import java.util.Optional;
import java.util.logging.Logger;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* Canned {@link ExpectedCondition}s which are generally useful within webdriver tests.
*/
public class ExpectedConditions {
private static final Logger log = Logger.getLogger(ExpectedConditions.class.getName());
private ExpectedConditions() {
// Utility class
}
/**
* An expectation for checking the title of a page.
*
* @param title the expected title, which must be an exact match
* @return true when the title matches, false otherwise
*/
public static ExpectedCondition<Boolean> titleIs(final String title) {
return new ExpectedCondition<Boolean>() {
private String currentTitle = "";
@Override
public Boolean apply(WebDriver driver) {
currentTitle = driver.getTitle();
return title.equals(currentTitle);
}
@Override
public String toString() {
return String.format("title to be \"%s\". Current title: \"%s\"", title, currentTitle);
}
};
}
/**
* An expectation for checking that the title contains a case-sensitive substring
*
* @param title the fragment of title expected
* @return true when the title matches, false otherwise
*/
public static ExpectedCondition<Boolean> titleContains(final String title) {
return new ExpectedCondition<Boolean>() {
private String currentTitle = "";
@Override
public Boolean apply(WebDriver driver) {
currentTitle = driver.getTitle();
return currentTitle != null && currentTitle.contains(title);
}
@Override
public String toString() {
return String.format("title to contain \"%s\". Current title: \"%s\"", title, currentTitle);
}
};
}
/**
* An expectation for the URL of the current page to be a specific url.
*
* @param url the url that the page should be on
* @return <code>true</code> when the URL is what it should be
*/
public static ExpectedCondition<Boolean> urlToBe(final String url) {
return new ExpectedCondition<Boolean>() {
private String currentUrl = "";
@Override
public Boolean apply(WebDriver driver) {
currentUrl = driver.getCurrentUrl();
return currentUrl != null && currentUrl.equals(url);
}
@Override
public String toString() {
return String.format("url to be \"%s\". Current url: \"%s\"", url, currentUrl);
}
};
}
/**
* An expectation for the URL of the current page to contain specific text.
*
* @param fraction the fraction of the url that the page should be on
* @return <code>true</code> when the URL contains the text
*/
public static ExpectedCondition<Boolean> urlContains(final String fraction) {
return new ExpectedCondition<Boolean>() {
private String currentUrl = "";
@Override
public Boolean apply(WebDriver driver) {
currentUrl = driver.getCurrentUrl();
return currentUrl != null && currentUrl.contains(fraction);
}
@Override
public String toString() {
return String.format("url to contain \"%s\". Current url: \"%s\"", fraction, currentUrl);
}
};
}
/**
* Expectation for the URL to match a specific regular expression
*
* @param regex the regular expression that the URL should match
* @return <code>true</code> if the URL matches the specified regular expression
*/
public static ExpectedCondition<Boolean> urlMatches(final String regex) {
return new ExpectedCondition<Boolean>() {
private String currentUrl;
private Pattern pattern;
private Matcher matcher;
@Override
public Boolean apply(WebDriver driver) {
currentUrl = driver.getCurrentUrl();
pattern = Pattern.compile(regex);
matcher = pattern.matcher(currentUrl);
return matcher.find();
}
@Override
public String toString() {
return String
.format("url to match the regex \"%s\". Current url: \"%s\"", regex, currentUrl);
}
};
}
/**
* An expectation for checking that an element is present on the DOM of a page. This does not
* necessarily mean that the element is visible.
*
* @param locator used to find the element
* @return the WebElement once it is located
*/
public static ExpectedCondition<WebElement> presenceOfElementLocated(final By locator) {
return new ExpectedCondition<WebElement>() {
@Override
public WebElement apply(WebDriver driver) {
return driver.findElement(locator);
}
@Override
public String toString() {
return "presence of element located by: " + locator;
}
};
}
/**
* An expectation for checking that an element is present on the DOM of a page and visible.
* Visibility means that the element is not only displayed but also has a height and width that is
* greater than 0.
*
* @param locator used to find the element
* @return the WebElement once it is located and visible
*/
public static ExpectedCondition<WebElement> visibilityOfElementLocated(final By locator) {
return new ExpectedCondition<WebElement>() {
@Override
public WebElement apply(WebDriver driver) {
try {
return elementIfVisible(driver.findElement(locator));
} catch (StaleElementReferenceException | NoSuchElementException e) {
// Returns null because the element is no longer or not present in DOM.
return null;
}
}
@Override
public String toString() {
return "visibility of element located by " + locator;
}
};
}
/**
* An expectation for checking that all elements present on the web page that match the locator
* are visible. Visibility means that the elements are not only displayed but also have a height
* and width that is greater than 0.
*
* @param locator used to find the element
* @return the list of WebElements once they are located
*/
public static ExpectedCondition<List<WebElement>> visibilityOfAllElementsLocatedBy(
final By locator) {
return new ExpectedCondition<List<WebElement>>() {
@Override
public List<WebElement> apply(WebDriver driver) {
List<WebElement> elements = driver.findElements(locator);
for (WebElement element : elements) {
if (!element.isDisplayed()) {
return null;
}
}
return elements.size() > 0 ? elements : null;
}
@Override
public String toString() {
return "visibility of all elements located by " + locator;
}
};
}
/**
* An expectation for checking that all elements present on the web page that match the locator
* are visible. Visibility means that the elements are not only displayed but also have a height
* and width that is greater than 0.
*
* @param elements list of WebElements
* @return the list of WebElements once they are located
*/
public static ExpectedCondition<List<WebElement>> visibilityOfAllElements(
final WebElement... elements) {
return visibilityOfAllElements(Arrays.asList(elements));
}
/**
* An expectation for checking that all elements present on the web page that match the locator
* are visible. Visibility means that the elements are not only displayed but also have a height
* and width that is greater than 0.
*
* @param elements list of WebElements
* @return the list of WebElements once they are located
*/
public static ExpectedCondition<List<WebElement>> visibilityOfAllElements(
final List<WebElement> elements) {
return new ExpectedCondition<List<WebElement>>() {
@Override
public List<WebElement> apply(WebDriver driver) {
for (WebElement element : elements) {
if (!element.isDisplayed()) {
return null;
}
}
return elements.size() > 0 ? elements : null;
}
@Override
public String toString() {
return "visibility of all " + elements;
}
};
}
/**
* An expectation for checking that an element, known to be present on the DOM of a page, is
* visible. Visibility means that the element is not only displayed but also has a height and
* width that is greater than 0.
*
* @param element the WebElement
* @return the (same) WebElement once it is visible
*/
public static ExpectedCondition<WebElement> visibilityOf(final WebElement element) {
return new ExpectedCondition<WebElement>() {
@Override
public WebElement apply(WebDriver driver) {
return elementIfVisible(element);
}
@Override
public String toString() {
return "visibility of " + element;
}
};
}
/**
* @return the given element if it is visible and has non-zero size, otherwise null.
*/
private static WebElement elementIfVisible(WebElement element) {
return element.isDisplayed() ? element : null;
}
/**
* An expectation for checking that there is at least one element present on a web page.
*
* @param locator used to find the element
* @return the list of WebElements once they are located
*/
public static ExpectedCondition<List<WebElement>> presenceOfAllElementsLocatedBy(
final By locator) {
return new ExpectedCondition<List<WebElement>>() {
@Override
public List<WebElement> apply(WebDriver driver) {
List<WebElement> elements = driver.findElements(locator);
return elements.size() > 0 ? elements : null;
}
@Override
public String toString() {
return "presence of any elements located by " + locator;
}
};
}
/**
* An expectation for checking if the given text is present in the specified element.
*
* @param element the WebElement
* @param text to be present in the element
* @return true once the element contains the given text
*/
public static ExpectedCondition<Boolean> textToBePresentInElement(final WebElement element,
final String text) {
return new ExpectedCondition<Boolean>() {
@Override
public Boolean apply(WebDriver driver) {
try {
String elementText = element.getText();
return elementText.contains(text);
} catch (StaleElementReferenceException e) {
return null;
}
}
@Override
public String toString() {
return String.format("text ('%s') to be present in element %s", text, element);
}
};
}
/**
* An expectation for checking if the given text is present in the element that matches the given
* locator.
*
* @param locator used to find the element
* @param text to be present in the element found by the locator
* @return true once the first element located by locator contains the given text
*/
public static ExpectedCondition<Boolean> textToBePresentInElementLocated(final By locator,
final String text) {
return new ExpectedCondition<Boolean>() {
@Override
public Boolean apply(WebDriver driver) {
try {
String elementText = driver.findElement(locator).getText();
return elementText.contains(text);
} catch (StaleElementReferenceException e) {
return null;
}
}
@Override
public String toString() {
return String.format("text ('%s') to be present in element found by %s",
text, locator);
}
};
}
/**
* An expectation for checking if the given text is present in the specified elements value
* attribute.
*
* @param element the WebElement
* @param text to be present in the element's value attribute
* @return true once the element's value attribute contains the given text
*/
public static ExpectedCondition<Boolean> textToBePresentInElementValue(final WebElement element,
final String text) {
return new ExpectedCondition<Boolean>() {
@Override
public Boolean apply(WebDriver driver) {
try {
String elementText = element.getAttribute("value");
if (elementText != null) {
return elementText.contains(text);
}
return false;
} catch (StaleElementReferenceException e) {
return null;
}
}
@Override
public String toString() {
return String.format("text ('%s') to be the value of element %s", text, element);
}
};
}
/**
* An expectation for checking if the given text is present in the specified elements value
* attribute.
*
* @param locator used to find the element
* @param text to be present in the value attribute of the element found by the locator
* @return true once the value attribute of the first element located by locator contains the
* given text
*/
public static ExpectedCondition<Boolean> textToBePresentInElementValue(final By locator,
final String text) {
return new ExpectedCondition<Boolean>() {
@Override
public Boolean apply(WebDriver driver) {
try {
String elementText = driver.findElement(locator).getAttribute("value");
if (elementText != null) {
return elementText.contains(text);
}
return false;
} catch (StaleElementReferenceException e) {
return null;
}
}
@Override
public String toString() {
return String.format("text ('%s') to be the value of element located by %s",
text, locator);
}
};
}
/**
* An expectation for checking whether the given frame is available to switch to. <p> If the frame
* is available it switches the given driver to the specified frame.
*
* @param frameLocator used to find the frame (id or name)
* @return WebDriver instance after frame has been switched
*/
public static ExpectedCondition<WebDriver> frameToBeAvailableAndSwitchToIt(
final String frameLocator) {
return new ExpectedCondition<WebDriver>() {
@Override
public WebDriver apply(WebDriver driver) {
try {
return driver.switchTo().frame(frameLocator);
} catch (NoSuchFrameException e) {
return null;
}
}
@Override
public String toString() {
return "frame to be available: " + frameLocator;
}
};
}
/**
* An expectation for checking whether the given frame is available to switch to. <p> If the frame
* is available it switches the given driver to the specified frame.
*
* @param locator used to find the frame
* @return WebDriver instance after frame has been switched
*/
public static ExpectedCondition<WebDriver> frameToBeAvailableAndSwitchToIt(final By locator) {
return new ExpectedCondition<WebDriver>() {
@Override
public WebDriver apply(WebDriver driver) {
try {
return driver.switchTo().frame(driver.findElement(locator));
} catch (NoSuchFrameException e) {
return null;
}
}
@Override
public String toString() {
return "frame to be available: " + locator;
}
};
}
/**
* An expectation for checking whether the given frame is available to switch to. <p> If the frame
* is available it switches the given driver to the specified frameIndex.
*
* @param frameLocator used to find the frame (index)
* @return WebDriver instance after frame has been switched
*/
public static ExpectedCondition<WebDriver> frameToBeAvailableAndSwitchToIt(
final int frameLocator) {
return new ExpectedCondition<WebDriver>() {
@Override
public WebDriver apply(WebDriver driver) {
try {
return driver.switchTo().frame(frameLocator);
} catch (NoSuchFrameException e) {
return null;
}
}
@Override
public String toString() {
return "frame to be available: " + frameLocator;
}
};
}
/**
* An expectation for checking whether the given frame is available to switch to. <p> If the frame
* is available it switches the given driver to the specified webelement.
*
* @param frameLocator used to find the frame (webelement)
* @return WebDriver instance after frame has been switched
*/
public static ExpectedCondition<WebDriver> frameToBeAvailableAndSwitchToIt(
final WebElement frameLocator) {
return new ExpectedCondition<WebDriver>() {
@Override
public WebDriver apply(WebDriver driver) {
try {
return driver.switchTo().frame(frameLocator);
} catch (NoSuchFrameException e) {
return null;
}
}
@Override
public String toString() {
return "frame to be available: " + frameLocator;
}
};
}
/**
* An expectation for checking that an element is either invisible or not present on the DOM.
*
* @param locator used to find the element
* @return true if the element is not displayed or the element doesn't exist or stale element
*/
public static ExpectedCondition<Boolean> invisibilityOfElementLocated(final By locator) {
return new ExpectedCondition<Boolean>() {
@Override
public Boolean apply(WebDriver driver) {
try {
return !(driver.findElement(locator).isDisplayed());
} catch (NoSuchElementException e) {
// Returns true because the element is not present in DOM. The
// try block checks if the element is present but is invisible.
return true;
} catch (StaleElementReferenceException e) {
// Returns true because stale element reference implies that element
// is no longer visible.
return true;
}
}
@Override
public String toString() {
return "element to no longer be visible: " + locator;
}
};
}
/**
* An expectation for checking that an element with text is either invisible or not present on the
* DOM.
*
* @param locator used to find the element
* @param text of the element
* @return true if no such element, stale element or displayed text not equal that provided
*/
public static ExpectedCondition<Boolean> invisibilityOfElementWithText(final By locator,
final String text) {
return new ExpectedCondition<Boolean>() {
@Override
public Boolean apply(WebDriver driver) {
try {
return !driver.findElement(locator).getText().equals(text);
} catch (NoSuchElementException e) {
// Returns true because the element with text is not present in DOM. The
// try block checks if the element is present but is invisible.
return true;
} catch (StaleElementReferenceException e) {
// Returns true because stale element reference implies that element
// is no longer visible.
return true;
}
}
@Override
public String toString() {
return String.format("element containing '%s' to no longer be visible: %s",
text, locator);
}
};
}
/**
* An expectation for checking an element is visible and enabled such that you can click it.
*
* @param locator used to find the element
* @return the WebElement once it is located and clickable (visible and enabled)
*/
public static ExpectedCondition<WebElement> elementToBeClickable(final By locator) {
return new ExpectedCondition<WebElement>() {
@Override
public WebElement apply(WebDriver driver) {
WebElement element = visibilityOfElementLocated(locator).apply(driver);
try {
if (element != null && element.isEnabled()) {
return element;
}
return null;
} catch (StaleElementReferenceException e) {
return null;
}
}
@Override
public String toString() {
return "element to be clickable: " + locator;
}
};
}
/**
* An expectation for checking an element is visible and enabled such that you can click it.
*
* @param element the WebElement
* @return the (same) WebElement once it is clickable (visible and enabled)
*/
public static ExpectedCondition<WebElement> elementToBeClickable(final WebElement element) {
return new ExpectedCondition<WebElement>() {
@Override
public WebElement apply(WebDriver driver) {
WebElement visibleElement = visibilityOf(element).apply(driver);
try {
if (visibleElement != null && visibleElement.isEnabled()) {
return visibleElement;
}
return null;
} catch (StaleElementReferenceException e) {
return null;
}
}
@Override
public String toString() {
return "element to be clickable: " + element;
}
};
}
/**
* Wait until an element is no longer attached to the DOM.
*
* @param element The element to wait for.
* @return false if the element is still attached to the DOM, true otherwise.
*/
public static ExpectedCondition<Boolean> stalenessOf(final WebElement element) {
return new ExpectedCondition<Boolean>() {
@Override
public Boolean apply(WebDriver ignored) {
try {
// Calling any method forces a staleness check
element.isEnabled();
return false;
} catch (StaleElementReferenceException expected) {
return true;
}
}
@Override
public String toString() {
return String.format("element (%s) to become stale", element);
}
};
}
/**
* Wrapper for a condition, which allows for elements to update by redrawing.
*
* This works around the problem of conditions which have two parts: find an element and then
* check for some condition on it. For these conditions it is possible that an element is located
* and then subsequently it is redrawn on the client. When this happens a {@link
* StaleElementReferenceException} is thrown when the second part of the condition is checked.
*
* @param condition ExpectedCondition to wrap
* @param <T> return type of the condition provided
* @return the result of the provided condition
*/
public static <T> ExpectedCondition<T> refreshed(final ExpectedCondition<T> condition) {
return new ExpectedCondition<T>() {
@Override
public T apply(WebDriver driver) {
try {
return condition.apply(driver);
} catch (StaleElementReferenceException e) {
return null;
}
}
@Override
public String toString() {
return String.format("condition (%s) to be refreshed", condition);
}
};
}
/**
* An expectation for checking if the given element is selected.
*
* @param element WebElement to be selected
* @return true once the element is selected
*/
public static ExpectedCondition<Boolean> elementToBeSelected(final WebElement element) {
return elementSelectionStateToBe(element, true);
}
/**
* An expectation for checking if the given element is selected.
*
* @param element WebElement to be selected
* @param selected boolean state of the selection state of the element
* @return true once the element's selection stated is that of selected
*/
public static ExpectedCondition<Boolean> elementSelectionStateToBe(final WebElement element,
final boolean selected) {
return new ExpectedCondition<Boolean>() {
@Override
public Boolean apply(WebDriver driver) {
return element.isSelected() == selected;
}
@Override
public String toString() {
return String.format("element (%s) to %sbe selected", element, (selected ? "" : "not "));
}
};
}
public static ExpectedCondition<Boolean> elementToBeSelected(final By locator) {
return elementSelectionStateToBe(locator, true);
}
public static ExpectedCondition<Boolean> elementSelectionStateToBe(final By locator,
final boolean selected) {
return new ExpectedCondition<Boolean>() {
@Override
public Boolean apply(WebDriver driver) {
try {
WebElement element = driver.findElement(locator);
return element.isSelected() == selected;
} catch (StaleElementReferenceException e) {
return null;
}
}
@Override
public String toString() {
return String.format("element found by %s to %sbe selected",
locator, (selected ? "" : "not "));
}
};
}
public static ExpectedCondition<Alert> alertIsPresent() {
return new ExpectedCondition<Alert>() {
@Override
public Alert apply(WebDriver driver) {
try {
return driver.switchTo().alert();
} catch (NoAlertPresentException e) {
return null;
}
}
@Override
public String toString() {
return "alert to be present";
}
};
}
public static ExpectedCondition<Boolean> numberOfWindowsToBe(final int expectedNumberOfWindows) {
return new ExpectedCondition<Boolean>() {
@Override
public Boolean apply(WebDriver driver) {
try {
return driver.getWindowHandles().size() == expectedNumberOfWindows;
} catch (WebDriverException e) {
return null;
}
}
@Override
public String toString() {
return "number of open windows to be " + expectedNumberOfWindows;
}
};
}
/**
* An expectation with the logical opposite condition of the given condition.
*
* Note that if the Condition you are inverting throws an exception that is caught by the Ignored
* Exceptions, the inversion will not take place and lead to confusing results.
*
* @param condition ExpectedCondition to be inverted
* @return true once the condition is satisfied
*/
public static ExpectedCondition<Boolean> not(final ExpectedCondition<?> condition) {
return new ExpectedCondition<Boolean>() {
@Override
public Boolean apply(WebDriver driver) {
Object result = condition.apply(driver);
return result == null || result.equals(Boolean.FALSE);
}
@Override
public String toString() {
return "condition to not be valid: " + condition;
}
};
}
/**
* An expectation for checking WebElement with given locator has attribute with a specific value
*
* @param locator used to find the element
* @param attribute used to define css or html attribute
* @param value used as expected attribute value
* @return Boolean true when element has css or html attribute with the value
*/
public static ExpectedCondition<Boolean> attributeToBe(final By locator, final String attribute,
final String value) {
return new ExpectedCondition<Boolean>() {
private String currentValue = null;
@Override
public Boolean apply(WebDriver driver) {
WebElement element = driver.findElement(locator);
currentValue = element.getAttribute(attribute);
if (currentValue == null||currentValue.isEmpty()) {
currentValue = element.getCssValue(attribute);
}
return value.equals(currentValue);
}
@Override
public String toString() {
return String.format("element found by %s to have value \"%s\". Current value: \"%s\"",
locator, value, currentValue);
}
};
}
/**
* An expectation for checking WebElement with given locator has specific text
*
* @param locator used to find the element
* @param value used as expected text
* @return Boolean true when element has text value equal to @value
*/
public static ExpectedCondition<Boolean> textToBe(final By locator, final String value) {
return new ExpectedCondition<Boolean>() {
private String currentValue = null;
@Override
public Boolean apply(WebDriver driver) {
try {
currentValue = driver.findElement(locator).getText();
return currentValue.equals(value);
} catch (Exception e) {
return false;
}
}
@Override
public String toString() {
return String.format("element found by %s to have text \"%s\". Current text: \"%s\"",
locator, value, currentValue);
}
};
}
/**
* An expectation for checking WebElement with given locator has text with a value as a part of
* it
*
* @param locator used to find the element
* @param pattern used as expected text matcher pattern
* @return Boolean true when element has text value containing @value
*/
public static ExpectedCondition<Boolean> textMatches(final By locator, final Pattern pattern) {
return new ExpectedCondition<Boolean>() {
private String currentValue = null;
@Override
public Boolean apply(WebDriver driver) {
try {
currentValue = driver.findElement(locator).getText();
return pattern.matcher(currentValue).find();
} catch (Exception e) {
return false;
}
}
@Override
public String toString() {
return String
.format("text found by %s to match pattern \"%s\". Current text: \"%s\"",
locator, pattern.pattern(), currentValue);
}
};
}
/**
* An expectation for checking number of WebElements with given locator being more than defined number
*
* @param locator used to find the element
* @param number used to define minimum number of elements
* @return Boolean true when size of elements list is more than defined
*/
public static ExpectedCondition<List<WebElement>> numberOfElementsToBeMoreThan(final By locator,
final Integer number) {
return new ExpectedCondition<List<WebElement>>() {
private Integer currentNumber = 0;
@Override
public List<WebElement> apply(WebDriver webDriver) {
List<WebElement> elements = webDriver.findElements(locator);
currentNumber = elements.size();
return currentNumber > number ? elements : null;
}
@Override
public String toString() {
return String.format("number of elements found by %s to be more than \"%s\". Current number: \"%s\"",
locator, number, currentNumber);
}
};
}
/**
* An expectation for checking number of WebElements with given locator being less than defined
* number
*
* @param locator used to find the element
* @param number used to define maximum number of elements
* @return Boolean true when size of elements list is less than defined
*/
public static ExpectedCondition<List<WebElement>> numberOfElementsToBeLessThan(final By locator,
final Integer number) {
return new ExpectedCondition<List<WebElement>>() {
private Integer currentNumber = 0;
@Override
public List<WebElement> apply(WebDriver webDriver) {
List<WebElement> elements = webDriver.findElements(locator);
currentNumber = elements.size();
return currentNumber < number ? elements : null;
}
@Override
public String toString() {
return String.format("number of elements found by %s to be less than \"%s\". Current number: \"%s\"",
locator, number, currentNumber);
}
};
}
/**
* An expectation for checking number of WebElements with given locator
*
* @param locator used to find the element
* @param number used to define number of elements
* @return Boolean true when size of elements list is equal to defined
*/
public static ExpectedCondition<List<WebElement>> numberOfElementsToBe(final By locator,
final Integer number) {
return new ExpectedCondition<List<WebElement>>() {
private Integer currentNumber = 0;
@Override
public List<WebElement> apply(WebDriver webDriver) {
List<WebElement> elements = webDriver.findElements(locator);
currentNumber = elements.size();
return currentNumber.equals(number) ? elements : null;
}
@Override
public String toString() {
return String
.format("number of elements found by %s to be \"%s\". Current number: \"%s\"",
locator, number, currentNumber);
}
};
}
/**
* An expectation for checking given WebElement has attribute with a specific value
*
* @param element used to check its parameters
* @param attribute used to define css or html attribute
* @param value used as expected attribute value
* @return Boolean true when element has css or html attribute with the value
*/
public static ExpectedCondition<Boolean> attributeToBe(final WebElement element,
final String attribute,
final String value) {
return new ExpectedCondition<Boolean>() {
private String currentValue = null;
@Override
public Boolean apply(WebDriver driver) {
currentValue = element.getAttribute(attribute);
if (currentValue == null || currentValue.isEmpty()) {
currentValue = element.getCssValue(attribute);
}
return value.equals(currentValue);
}
@Override
public String toString() {
return String.format(attribute + " to be \"%s\". Current " + attribute + ": \"%s\"", value,
currentValue);
}
};
}
/**
* An expectation for checking WebElement with given locator has attribute which contains specific
* value
*
* @param element used to check its parameters
* @param attribute used to define css or html attribute
* @param value used as expected attribute value
* @return Boolean true when element has css or html attribute which contains the value
*/
public static ExpectedCondition<Boolean> attributeContains(final WebElement element,
final String attribute,
final String value) {
return new ExpectedCondition<Boolean>() {
private String currentValue = null;
@Override
public Boolean apply(WebDriver driver) {
return getAttributeOrCssValue(element, attribute)
.map(seen -> seen.contains(value))
.orElse(false);
}
@Override
public String toString() {
return String.format("value to contain \"%s\". Current value: \"%s\"", value, currentValue);
}
};
}
/**
* An expectation for checking WebElement with given locator has attribute which contains specific
* value
*
* @param locator used to define WebElement to check its parameters
* @param attribute used to define css or html attribute
* @param value used as expected attribute value
* @return Boolean true when element has css or html attribute which contains the value
*/
public static ExpectedCondition<Boolean> attributeContains(final By locator,
final String attribute,
final String value) {
return new ExpectedCondition<Boolean>() {
private String currentValue = null;
@Override
public Boolean apply(WebDriver driver) {
return getAttributeOrCssValue(driver.findElement(locator), attribute)
.map(seen -> seen.contains(value))
.orElse(false);
}
@Override
public String toString() {
return String.format("value found by %s to contain \"%s\". Current value: \"%s\"",
locator, value, currentValue);
}
};
}
/**
* An expectation for checking WebElement any non empty value for given attribute
*
* @param element used to check its parameters
* @param attribute used to define css or html attribute
* @return Boolean true when element has css or html attribute with non empty value
*/
public static ExpectedCondition<Boolean> attributeToBeNotEmpty(final WebElement element,
final String attribute) {
return driver -> getAttributeOrCssValue(element, attribute).isPresent();
}
private static Optional<String> getAttributeOrCssValue(WebElement element, String name) {
String value = element.getAttribute(name);
if (value == null || value.isEmpty()) {
value = element.getCssValue(name);
}
if (value == null || value.isEmpty()) {
return Optional.empty();
}
return Optional.of(value);
}
/**
* An expectation for checking child WebElement as a part of parent element to be visible
*
* @param parent used to check parent element. For example table with locator
* By.id("fish")
* @param childLocator used to find the ultimate child element.
* @return visible nested element
*/
public static ExpectedCondition<List<WebElement>> visibilityOfNestedElementsLocatedBy(
final By parent,
final By childLocator) {
return new ExpectedCondition<List<WebElement>>() {
@Override
public List<WebElement> apply(WebDriver driver) {
WebElement current = driver.findElement(parent);
List<WebElement> allChildren = current.findElements(childLocator);
// The original code only checked the first element. Fair enough.
if (!allChildren.isEmpty() && allChildren.get(0).isDisplayed()) {
return allChildren;
}
return null;
}
@Override
public String toString() {
return String.format("visibility of elements located by %s -> %s", parent, childLocator);
}
};
}
/**
* An expectation for checking child WebElement as a part of parent element to be visible
*
* @param element used as parent element. For example table with locator By.xpath("//table")
* @param childLocator used to find child element. For example td By.xpath("./tr/td")
* @return visible subelement
*/
public static ExpectedCondition<List<WebElement>> visibilityOfNestedElementsLocatedBy(
final WebElement element, final By childLocator) {
return new ExpectedCondition<List<WebElement>>() {
@Override
public List<WebElement> apply(WebDriver webDriver) {
List<WebElement> allChildren = element.findElements(childLocator);
// The original code only checked the visibility of the first element.
if (!allChildren.isEmpty() && allChildren.get(0).isDisplayed()) {
return allChildren;
}
return null;
}
@Override
public String toString() {
return String.format("visibility of element located by %s -> %s", element, childLocator);
}
};
}
/**
* An expectation for checking child WebElement as a part of parent element to present
*
* @param locator used to check parent element. For example table with locator
* By.xpath("//table")
* @param childLocator used to find child element. For example td By.xpath("./tr/td")
* @return subelement
*/
public static ExpectedCondition<WebElement> presenceOfNestedElementLocatedBy(
final By locator,
final By childLocator) {
return new ExpectedCondition<WebElement>() {
@Override
public WebElement apply(WebDriver webDriver) {
return webDriver.findElement(locator).findElement(childLocator);
}
@Override
public String toString() {
return String.format("visibility of element located by %s -> %s", locator, childLocator);
}
};
}
/**
* An expectation for checking child WebElement as a part of parent element to be present
*
* @param element used as parent element
* @param childLocator used to find child element. For example td By.xpath("./tr/td")
* @return subelement
*/
public static ExpectedCondition<WebElement> presenceOfNestedElementLocatedBy(
final WebElement element,
final By childLocator) {
return new ExpectedCondition<WebElement>() {
@Override
public WebElement apply(WebDriver webDriver) {
return element.findElement(childLocator);
}
@Override
public String toString() {
return String.format("visibility of element located by %s", childLocator);
}
};
}
/**
* An expectation for checking child WebElement as a part of parent element to present
*
* @param parent used to check parent element. For example table with locator
* By.xpath("//table")
* @param childLocator used to find child element. For example td By.xpath("./tr/td")
* @return subelement
*/
public static ExpectedCondition<List<WebElement>> presenceOfNestedElementsLocatedBy(
final By parent,
final By childLocator) {
return new ExpectedCondition<List<WebElement>>() {
@Override
public List<WebElement> apply(WebDriver driver) {
List<WebElement> allChildren = driver.findElement(parent).findElements(childLocator);
return allChildren.isEmpty() ? null : allChildren;
}
@Override
public String toString() {
return String.format("visibility of element located by %s -> %s", parent, childLocator);
}
};
}
/**
* An expectation for checking all elements from given list to be invisible
*
* @param elements used to check their invisibility
* @return Boolean true when all elements are not visible anymore
*/
public static ExpectedCondition<Boolean> invisibilityOfAllElements(
final WebElement... elements) {
return invisibilityOfAllElements(Arrays.asList(elements));
}
/**
* An expectation for checking all elements from given list to be invisible
*
* @param elements used to check their invisibility
* @return Boolean true when all elements are not visible anymore
*/
public static ExpectedCondition<Boolean> invisibilityOfAllElements(
final List<WebElement> elements) {
return new ExpectedCondition<Boolean>() {
@Override
public Boolean apply(WebDriver webDriver) {
return elements.stream().allMatch(ExpectedConditions::isInvisible);
}
@Override
public String toString() {
return "invisibility of all elements " + elements;
}
};
}
/**
* An expectation for checking the element to be invisible
*
* @param element used to check its invisibility
* @return Boolean true when elements is not visible anymore
*/
public static ExpectedCondition<Boolean> invisibilityOf(final WebElement element) {
return new ExpectedCondition<Boolean>() {
@Override
public Boolean apply(WebDriver webDriver) {
return isInvisible(element);
}
@Override
public String toString() {
return "invisibility of " + element;
}
};
}
private static boolean isInvisible(final WebElement element) {
try {
return !element.isDisplayed();
} catch (StaleElementReferenceException | NoSuchElementException ignored) {
// We can assume a stale element isn't displayed.
return true;
}
}
/**
* An expectation with the logical or condition of the given list of conditions.
*
* Each condition is checked until at least one of them returns true or not null.
*
* @param conditions ExpectedCondition is a list of alternative conditions
* @return true once one of conditions is satisfied
*/
public static ExpectedCondition<Boolean> or(final ExpectedCondition<?>... conditions) {
return new ExpectedCondition<Boolean>() {
@Override
public Boolean apply(WebDriver driver) {
RuntimeException lastException = null;
for (ExpectedCondition<?> condition : conditions) {
try {
Object result = condition.apply(driver);
if (result != null) {
if (result instanceof Boolean) {
if (Boolean.TRUE.equals(result)) {
return true;
}
} else {
return true;
}
}
} catch (RuntimeException e) {
lastException = e;
}
}
if (lastException != null) {
throw lastException;
}
return false;
}
@Override
public String toString() {
StringBuilder message = new StringBuilder("at least one condition to be valid: ");
Joiner.on(" || ").appendTo(message, conditions);
return message.toString();
}
};
}
/**
* An expectation with the logical and condition of the given list of conditions.
*
* Each condition is checked until all of them return true or not null
*
* @param conditions ExpectedCondition is a list of alternative conditions
* @return true once all conditions are satisfied
*/
public static ExpectedCondition<Boolean> and(final ExpectedCondition<?>... conditions) {
return new ExpectedCondition<Boolean>() {
@Override
public Boolean apply(WebDriver driver) {
for (ExpectedCondition<?> condition : conditions) {
Object result = condition.apply(driver);
if (result instanceof Boolean) {
if (Boolean.FALSE.equals(result)) {
return false;
}
}
if (result == null) {
return false;
}
}
return true;
}
@Override
public String toString() {
StringBuilder message = new StringBuilder("all conditions to be valid: ");
Joiner.on(" && ").appendTo(message, conditions);
return message.toString();
}
};
}
/**
* An expectation to check if js executable.
*
* Useful when you know that there should be a Javascript value or something at the stage.
*
* @param javaScript used as executable script
* @return true once javaScript executed without errors
*/
public static ExpectedCondition<Boolean> javaScriptThrowsNoExceptions(final String javaScript) {
return new ExpectedCondition<Boolean>() {
@Override
public Boolean apply(WebDriver driver) {
try {
((JavascriptExecutor) driver).executeScript(javaScript);
return true;
} catch (WebDriverException e) {
return false;
}
}
@Override
public String toString() {
return String.format("js %s to be executable", javaScript);
}
};
}
/**
* An expectation for String value from javascript
*
* @param javaScript as executable js line
* @return true once js return string
*/
public static ExpectedCondition<Object> jsReturnsValue(final String javaScript) {
return new ExpectedCondition<Object>() {
@Override
public Object apply(WebDriver driver) {
try {
Object value = ((JavascriptExecutor) driver).executeScript(javaScript);
if (value instanceof List) {
return ((List<?>) value).isEmpty() ? null : value;
}
if (value instanceof String) {
return ((String) value).isEmpty() ? null : value;
}
return value;
} catch (WebDriverException e) {
return null;
}
}
@Override
public String toString() {
return String.format("js %s to be executable", javaScript);
}
};
}
}
| 1 | 18,003 | Each `ExpectedCondition` implements `java.util.Function` These are expected to be stateless. This condition will leak previous `elementText` on the second usage, which doesn't seem ideal. | SeleniumHQ-selenium | js |
@@ -1,9 +1,10 @@
package sql_test
import (
- "github.com/influxdata/flux/plan"
"testing"
+ "github.com/influxdata/flux/plan"
+
"github.com/DATA-DOG/go-sqlmock"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts" | 1 | package sql_test
import (
"github.com/influxdata/flux/plan"
"testing"
"github.com/DATA-DOG/go-sqlmock"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
flux "github.com/influxdata/flux"
_ "github.com/influxdata/flux/builtin" // We need to import the builtins for the tests to work.
"github.com/influxdata/flux/execute"
"github.com/influxdata/flux/execute/executetest"
"github.com/influxdata/flux/querytest"
"github.com/influxdata/flux/stdlib/influxdata/influxdb"
fsql "github.com/influxdata/flux/stdlib/sql"
"github.com/influxdata/flux/values"
)
func TestSqlTo(t *testing.T) {
tests := []querytest.NewQueryTestCase{
{
Name: "from with database",
Raw: `import "sql" from(bucket: "mybucket") |> sql.to(driverName:"sqlmock", dataSourceName:"root@/db", table:"TestTable")`,
Want: &flux.Spec{
Operations: []*flux.Operation{
{
ID: "from0",
Spec: &influxdb.FromOpSpec{
Bucket: "mybucket",
},
},
{
ID: "toSQL1",
Spec: &fsql.ToSQLOpSpec{
DriverName: "sqlmock",
DataSourceName: "root@/db",
Table: "TestTable",
},
},
},
Edges: []flux.Edge{
{Parent: "from0", Child: "toSQL1"},
},
},
},
}
for _, tc := range tests {
tc := tc
t.Run(tc.Name, func(t *testing.T) {
t.Parallel()
querytest.NewQueryTestHelper(t, tc)
})
}
}
func TestToSQL_Process(t *testing.T) {
driverName := "sqlmock"
dsn := "root@/db"
_, _, _ = sqlmock.NewWithDSN(dsn)
type wanted struct {
Table []*executetest.Table
ColumnNames []string
ValueStrings [][]string
ValueArgs [][]interface{}
}
testCases := []struct {
name string
spec *fsql.ToSQLProcedureSpec
data flux.Table
want wanted
}{
{
name: "coltable with name in _measurement",
spec: &fsql.ToSQLProcedureSpec{
Spec: &fsql.ToSQLOpSpec{
DriverName: driverName,
DataSourceName: dsn,
Table: "TestTable2",
},
},
data: executetest.MustCopyTable(&executetest.Table{
ColMeta: []flux.ColMeta{
{Label: "_time", Type: flux.TTime},
{Label: "_measurement", Type: flux.TString},
{Label: "_value", Type: flux.TFloat},
{Label: "fred", Type: flux.TString},
},
Data: [][]interface{}{
{execute.Time(11), "a", 2.0, "one"},
{execute.Time(21), "a", 2.0, "one"},
{execute.Time(21), "b", 1.0, "seven"},
{execute.Time(31), "a", 3.0, "nine"},
{execute.Time(41), "c", 4.0, "elevendyone"},
},
}),
want: wanted{
Table: []*executetest.Table{{
ColMeta: []flux.ColMeta{
{Label: "_time", Type: flux.TTime},
{Label: "_measurement", Type: flux.TString},
{Label: "_value", Type: flux.TFloat},
{Label: "fred", Type: flux.TString},
},
Data: [][]interface{}{
{execute.Time(11), "a", 2.0, "one"},
{execute.Time(21), "a", 2.0, "one"},
{execute.Time(21), "b", 1.0, "seven"},
{execute.Time(31), "a", 3.0, "nine"},
{execute.Time(41), "c", 4.0, "elevendyone"},
},
}},
ColumnNames: []string{"_time", "_measurement", "_value", "fred"},
ValueStrings: [][]string{{"(?,?,?,?)", "(?,?,?,?)", "(?,?,?,?)", "(?,?,?,?)", "(?,?,?,?)"}},
ValueArgs: [][]interface{}{{
values.Time(int64(execute.Time(11))).Time(), "a", 2.0, "one",
values.Time(int64(execute.Time(21))).Time(), "a", 2.0, "one",
values.Time(int64(execute.Time(21))).Time(), "b", 1.0, "seven",
values.Time(int64(execute.Time(31))).Time(), "a", 3.0, "nine",
values.Time(int64(execute.Time(41))).Time(), "c", 4.0, "elevendyone"}},
},
},
{
name: "coltable with ints",
spec: &fsql.ToSQLProcedureSpec{
Spec: &fsql.ToSQLOpSpec{
DriverName: driverName,
DataSourceName: dsn,
Table: "TestTable2",
},
},
data: executetest.MustCopyTable(&executetest.Table{
ColMeta: []flux.ColMeta{
{Label: "_time", Type: flux.TTime},
{Label: "_measurement", Type: flux.TString},
{Label: "_value", Type: flux.TInt},
{Label: "fred", Type: flux.TString},
},
Data: [][]interface{}{
{execute.Time(11), "a", int64(2), "one"},
{execute.Time(21), "a", int64(2), "one"},
{execute.Time(21), "b", int64(1), "seven"},
{execute.Time(31), "a", int64(3), "nine"},
{execute.Time(41), "c", int64(4), "elevendyone"},
},
}),
want: wanted{
Table: []*executetest.Table{{
ColMeta: []flux.ColMeta{
{Label: "_time", Type: flux.TTime},
{Label: "_measurement", Type: flux.TString},
{Label: "_value", Type: flux.TInt},
{Label: "fred", Type: flux.TString},
},
Data: [][]interface{}{
{execute.Time(11), "a", int64(2), "one"},
{execute.Time(21), "a", int64(2), "one"},
{execute.Time(21), "b", int64(1), "seven"},
{execute.Time(31), "a", int64(3), "nine"},
{execute.Time(41), "c", int64(4), "elevendyone"},
},
}},
ColumnNames: []string{"_time", "_measurement", "_value", "fred"},
ValueStrings: [][]string{{"(?,?,?,?)", "(?,?,?,?)", "(?,?,?,?)", "(?,?,?,?)", "(?,?,?,?)"}},
ValueArgs: [][]interface{}{{
values.Time(int64(execute.Time(11))).Time(), "a", int64(2), "one",
values.Time(int64(execute.Time(21))).Time(), "a", int64(2), "one",
values.Time(int64(execute.Time(21))).Time(), "b", int64(1), "seven",
values.Time(int64(execute.Time(31))).Time(), "a", int64(3), "nine",
values.Time(int64(execute.Time(41))).Time(), "c", int64(4), "elevendyone"}},
},
},
{
name: "coltable with uints",
spec: &fsql.ToSQLProcedureSpec{
Spec: &fsql.ToSQLOpSpec{
DriverName: driverName,
DataSourceName: dsn,
Table: "TestTable2",
},
},
data: executetest.MustCopyTable(&executetest.Table{
ColMeta: []flux.ColMeta{
{Label: "_time", Type: flux.TTime},
{Label: "_measurement", Type: flux.TString},
{Label: "_value", Type: flux.TUInt},
{Label: "fred", Type: flux.TString},
},
Data: [][]interface{}{
{execute.Time(11), "a", uint64(2), "one"},
{execute.Time(21), "a", uint64(2), "one"},
{execute.Time(21), "b", uint64(1), "seven"},
{execute.Time(31), "a", uint64(3), "nine"},
{execute.Time(41), "c", uint64(4), "elevendyone"},
},
}),
want: wanted{
Table: []*executetest.Table{{
ColMeta: []flux.ColMeta{
{Label: "_time", Type: flux.TTime},
{Label: "_measurement", Type: flux.TString},
{Label: "_value", Type: flux.TUInt},
{Label: "fred", Type: flux.TString},
},
Data: [][]interface{}{
{execute.Time(11), "a", uint64(2), "one"},
{execute.Time(21), "a", uint64(2), "one"},
{execute.Time(21), "b", uint64(1), "seven"},
{execute.Time(31), "a", uint64(3), "nine"},
{execute.Time(41), "c", uint64(4), "elevendyone"},
},
}},
ColumnNames: []string{"_time", "_measurement", "_value", "fred"},
ValueStrings: [][]string{{"(?,?,?,?)", "(?,?,?,?)", "(?,?,?,?)", "(?,?,?,?)", "(?,?,?,?)"}},
ValueArgs: [][]interface{}{{
values.Time(int64(execute.Time(11))).Time(), "a", uint64(2), "one",
values.Time(int64(execute.Time(21))).Time(), "a", uint64(2), "one",
values.Time(int64(execute.Time(21))).Time(), "b", uint64(1), "seven",
values.Time(int64(execute.Time(31))).Time(), "a", uint64(3), "nine",
values.Time(int64(execute.Time(41))).Time(), "c", uint64(4), "elevendyone"}},
},
},
{
name: "coltable with bool",
spec: &fsql.ToSQLProcedureSpec{
Spec: &fsql.ToSQLOpSpec{
DriverName: driverName,
DataSourceName: dsn,
Table: "TestTable2",
},
},
data: executetest.MustCopyTable(&executetest.Table{
ColMeta: []flux.ColMeta{
{Label: "_time", Type: flux.TTime},
{Label: "_measurement", Type: flux.TString},
{Label: "_value", Type: flux.TBool},
{Label: "fred", Type: flux.TString},
},
Data: [][]interface{}{
{execute.Time(11), "a", true, "one"},
{execute.Time(21), "a", true, "one"},
{execute.Time(21), "b", false, "seven"},
{execute.Time(31), "a", true, "nine"},
{execute.Time(41), "c", false, "elevendyone"},
},
}),
want: wanted{
Table: []*executetest.Table{{
ColMeta: []flux.ColMeta{
{Label: "_time", Type: flux.TTime},
{Label: "_measurement", Type: flux.TString},
{Label: "_value", Type: flux.TBool},
{Label: "fred", Type: flux.TString},
},
Data: [][]interface{}{
{execute.Time(11), "a", true, "one"},
{execute.Time(21), "a", true, "one"},
{execute.Time(21), "b", false, "seven"},
{execute.Time(31), "a", true, "nine"},
{execute.Time(41), "c", false, "elevendyone"},
},
}},
ColumnNames: []string{"_time", "_measurement", "_value", "fred"},
ValueStrings: [][]string{{"(?,?,?,?)", "(?,?,?,?)", "(?,?,?,?)", "(?,?,?,?)", "(?,?,?,?)"}},
ValueArgs: [][]interface{}{{
values.Time(int64(execute.Time(11))).Time(), "a", true, "one",
values.Time(int64(execute.Time(21))).Time(), "a", true, "one",
values.Time(int64(execute.Time(21))).Time(), "b", false, "seven",
values.Time(int64(execute.Time(31))).Time(), "a", true, "nine",
values.Time(int64(execute.Time(41))).Time(), "c", false, "elevendyone"}},
},
},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
d := executetest.NewDataset(executetest.RandomDatasetID())
c := execute.NewTableBuilderCache(executetest.UnlimitedAllocator)
c.SetTriggerSpec(plan.DefaultTriggerSpec)
transformation, err := fsql.NewToSQLTransformation(d, c, tc.spec)
if err != nil {
t.Fatal(err)
}
a := tc.data
colNames, valStrings, valArgs, err := fsql.CreateInsertComponents(transformation, a)
if err != nil {
t.Fatal(err)
}
if !cmp.Equal(tc.want.ColumnNames, colNames, cmpopts.EquateNaNs()) {
t.Log(cmp.Diff(tc.want.ColumnNames, colNames))
t.Fail()
}
if !cmp.Equal(tc.want.ValueStrings, valStrings, cmpopts.EquateNaNs()) {
t.Log(cmp.Diff(tc.want.ValueStrings, valStrings))
t.Fail()
}
if !cmp.Equal(tc.want.ValueArgs, valArgs, cmpopts.EquateNaNs()) {
t.Log(cmp.Diff(tc.want.ValueArgs, valArgs))
t.Fail()
}
})
}
}
| 1 | 12,214 | Extra newline here. Thanks for moving it under the `testing` import though. This just needs to be grouped with the other imports below it. | influxdata-flux | go |
@@ -71,7 +71,11 @@ public class MimeBodyPart extends BodyPart {
contentType += String.format(";\n name=\"%s\"", name);
}
setHeader(MimeHeader.HEADER_CONTENT_TYPE, contentType);
- setHeader(MimeHeader.HEADER_CONTENT_TRANSFER_ENCODING, "quoted-printable");
+ if ("8bit".equals(body.getEncoding())) {
+ setHeader(MimeHeader.HEADER_CONTENT_TRANSFER_ENCODING, "8bit");
+ } else {
+ setHeader(MimeHeader.HEADER_CONTENT_TRANSFER_ENCODING, "quoted-printable");
+ }
}
}
| 1 |
package com.fsck.k9.mail.internet;
import com.fsck.k9.mail.Body;
import com.fsck.k9.mail.BodyPart;
import com.fsck.k9.mail.MessagingException;
import java.io.BufferedWriter;
import java.io.IOException;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
/**
* TODO this is a close approximation of Message, need to update along with
* Message.
*/
public class MimeBodyPart extends BodyPart {
protected MimeHeader mHeader = new MimeHeader();
protected Body mBody;
protected int mSize;
public MimeBodyPart() throws MessagingException {
this(null);
}
public MimeBodyPart(Body body) throws MessagingException {
this(body, null);
}
public MimeBodyPart(Body body, String mimeType) throws MessagingException {
if (mimeType != null) {
addHeader(MimeHeader.HEADER_CONTENT_TYPE, mimeType);
}
setBody(body);
}
protected String getFirstHeader(String name) {
return mHeader.getFirstHeader(name);
}
public void addHeader(String name, String value) throws MessagingException {
mHeader.addHeader(name, value);
}
public void setHeader(String name, String value) throws MessagingException {
mHeader.setHeader(name, value);
}
public String[] getHeader(String name) throws MessagingException {
return mHeader.getHeader(name);
}
public void removeHeader(String name) throws MessagingException {
mHeader.removeHeader(name);
}
public Body getBody() {
return mBody;
}
public void setBody(Body body) throws MessagingException {
this.mBody = body;
if (body instanceof com.fsck.k9.mail.Multipart) {
com.fsck.k9.mail.Multipart multipart = ((com.fsck.k9.mail.Multipart)body);
multipart.setParent(this);
setHeader(MimeHeader.HEADER_CONTENT_TYPE, multipart.getContentType());
} else if (body instanceof TextBody) {
String contentType = String.format("%s;\n charset=utf-8", getMimeType());
String name = MimeUtility.getHeaderParameter(getContentType(), "name");
if (name != null) {
contentType += String.format(";\n name=\"%s\"", name);
}
setHeader(MimeHeader.HEADER_CONTENT_TYPE, contentType);
setHeader(MimeHeader.HEADER_CONTENT_TRANSFER_ENCODING, "quoted-printable");
}
}
public String getContentType() throws MessagingException {
String contentType = getFirstHeader(MimeHeader.HEADER_CONTENT_TYPE);
return (contentType == null) ? "text/plain" : contentType;
}
public String getDisposition() throws MessagingException {
return getFirstHeader(MimeHeader.HEADER_CONTENT_DISPOSITION);
}
public String getContentId() throws MessagingException {
String contentId = getFirstHeader(MimeHeader.HEADER_CONTENT_ID);
if (contentId == null) {
return null;
}
int first = contentId.indexOf('<');
int last = contentId.lastIndexOf('>');
return (first != -1 && last != -1) ?
contentId.substring(first + 1, last) :
contentId;
}
public String getMimeType() throws MessagingException {
return MimeUtility.getHeaderParameter(getContentType(), null);
}
public boolean isMimeType(String mimeType) throws MessagingException {
return getMimeType().equals(mimeType);
}
public int getSize() {
return mSize;
}
/**
* Write the MimeMessage out in MIME format.
*/
public void writeTo(OutputStream out) throws IOException, MessagingException {
BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(out), 1024);
mHeader.writeTo(out);
writer.write("\r\n");
writer.flush();
if (mBody != null) {
mBody.writeTo(out);
}
}
}
| 1 | 12,149 | Should we ignore case when doing the comparison? | k9mail-k-9 | java |
@@ -94,6 +94,7 @@ struct wlr_backend *wlr_backend_autocreate(struct wl_display *display) {
}
}
+#ifdef WLR_HAS_X11_BACKEND
const char *x11_display = getenv("DISPLAY");
if (x11_display) {
struct wlr_backend *x11_backend = | 1 | #include <assert.h>
#include <errno.h>
#include <libinput.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <wayland-server.h>
#include <wlr/backend/drm.h>
#include <wlr/backend/interface.h>
#include <wlr/backend/libinput.h>
#include <wlr/backend/multi.h>
#include <wlr/backend/session.h>
#include <wlr/backend/wayland.h>
#include <wlr/backend/x11.h>
#include <wlr/util/log.h>
void wlr_backend_init(struct wlr_backend *backend,
const struct wlr_backend_impl *impl) {
assert(backend);
backend->impl = impl;
wl_signal_init(&backend->events.destroy);
wl_signal_init(&backend->events.new_input);
wl_signal_init(&backend->events.new_output);
}
bool wlr_backend_start(struct wlr_backend *backend) {
if (backend->impl->start) {
return backend->impl->start(backend);
}
return true;
}
void wlr_backend_destroy(struct wlr_backend *backend) {
if (!backend) {
return;
}
if (backend->impl && backend->impl->destroy) {
backend->impl->destroy(backend);
} else {
free(backend);
}
}
struct wlr_egl *wlr_backend_get_egl(struct wlr_backend *backend) {
if (backend->impl->get_egl) {
return backend->impl->get_egl(backend);
}
return NULL;
}
struct wlr_renderer *wlr_backend_get_renderer(struct wlr_backend *backend) {
if (backend->impl->get_renderer) {
return backend->impl->get_renderer(backend);
}
return NULL;
}
static struct wlr_backend *attempt_wl_backend(struct wl_display *display) {
struct wlr_backend *backend = wlr_wl_backend_create(display, NULL);
if (backend) {
int outputs = 1;
const char *_outputs = getenv("WLR_WL_OUTPUTS");
if (_outputs) {
char *end;
outputs = (int)strtol(_outputs, &end, 10);
if (*end) {
wlr_log(L_ERROR, "WLR_WL_OUTPUTS specified with invalid integer, ignoring");
outputs = 1;
} else if (outputs < 0) {
wlr_log(L_ERROR, "WLR_WL_OUTPUTS specified with negative outputs, ignoring");
outputs = 1;
}
}
while (outputs--) {
wlr_wl_output_create(backend);
}
}
return backend;
}
struct wlr_backend *wlr_backend_autocreate(struct wl_display *display) {
struct wlr_backend *backend = wlr_multi_backend_create(display);
if (!backend) {
wlr_log(L_ERROR, "could not allocate multibackend");
return NULL;
}
if (getenv("WAYLAND_DISPLAY") || getenv("_WAYLAND_DISPLAY")) {
struct wlr_backend *wl_backend = attempt_wl_backend(display);
if (wl_backend) {
wlr_multi_backend_add(backend, wl_backend);
return backend;
}
}
const char *x11_display = getenv("DISPLAY");
if (x11_display) {
struct wlr_backend *x11_backend =
wlr_x11_backend_create(display, x11_display);
wlr_multi_backend_add(backend, x11_backend);
return backend;
}
// Attempt DRM+libinput
struct wlr_session *session = wlr_session_create(display);
if (!session) {
wlr_log(L_ERROR, "Failed to start a DRM session");
wlr_backend_destroy(backend);
return NULL;
}
struct wlr_backend *libinput = wlr_libinput_backend_create(display, session);
if (libinput) {
wlr_multi_backend_add(backend, libinput);
} else {
wlr_log(L_ERROR, "Failed to start libinput backend");
wlr_backend_destroy(backend);
wlr_session_destroy(session);
return NULL;
}
int gpus[8];
size_t num_gpus = wlr_session_find_gpus(session, 8, gpus);
struct wlr_backend *primary_drm = NULL;
wlr_log(L_INFO, "Found %zu GPUs", num_gpus);
for (size_t i = 0; i < num_gpus; ++i) {
struct wlr_backend *drm = wlr_drm_backend_create(display, session,
gpus[i], primary_drm);
if (!drm) {
wlr_log(L_ERROR, "Failed to open DRM device");
continue;
}
if (!primary_drm) {
primary_drm = drm;
}
wlr_multi_backend_add(backend, drm);
}
if (!primary_drm) {
wlr_log(L_ERROR, "Failed to open any DRM device");
wlr_backend_destroy(libinput);
wlr_session_destroy(session);
wlr_backend_destroy(backend);
return NULL;
}
return backend;
}
uint32_t usec_to_msec(uint64_t usec) {
return (uint32_t)(usec / 1000);
}
| 1 | 10,236 | The same `ifdef` should apply to `#include <wlr/backend/x11.h>` line 14 | swaywm-wlroots | c |
@@ -183,7 +183,14 @@ namespace Datadog.Trace.ClrProfiler.Integrations
catch (Exception ex)
{
// profiled app will continue working as expected without this method
- Log.ErrorException($"Error resolving {DiagnosticSourceTypeName}.{nameof(BeforeAction)}(...)", ex);
+ Log.ErrorRetrievingMethod(
+ exception: ex,
+ moduleVersionPointer: moduleVersionPtr,
+ mdToken: mdToken,
+ opCode: opCode,
+ instrumentedType: DiagnosticSourceTypeName,
+ methodName: nameof(BeforeAction),
+ instanceType: diagnosticSource?.GetType().AssemblyQualifiedName);
}
try | 1 | using System;
using System.Collections;
using System.Collections.Generic;
using Datadog.Trace.ClrProfiler.Emit;
using Datadog.Trace.ClrProfiler.ExtensionMethods;
using Datadog.Trace.Headers;
using Datadog.Trace.Logging;
namespace Datadog.Trace.ClrProfiler.Integrations
{
/// <summary>
/// The ASP.NET Core MVC 2 integration.
/// </summary>
public sealed class AspNetCoreMvc2Integration : IDisposable
{
private const string HttpContextKey = "__Datadog.Trace.ClrProfiler.Integrations." + nameof(AspNetCoreMvc2Integration);
private const string IntegrationName = "AspNetCoreMvc2";
private const string OperationName = "aspnet-coremvc.request";
private const string AspnetMvcCore = "Microsoft.AspNetCore.Mvc.Core";
private const string Major2 = "2";
/// <summary>
/// Type for unobtrusive hooking into Microsoft.AspNetCore.Mvc.Core pipeline.
/// </summary>
private const string DiagnosticSourceTypeName = "Microsoft.AspNetCore.Mvc.Internal.MvcCoreDiagnosticSourceExtensions";
/// <summary>
/// Base type used for traversing the pipeline in Microsoft.AspNetCore.Mvc.Core.
/// </summary>
private const string ResourceInvokerTypeName = "Microsoft.AspNetCore.Mvc.Internal.ResourceInvoker";
private static readonly Type DiagnosticSourceType = Type.GetType($"{DiagnosticSourceTypeName}, {AspnetMvcCore}");
private static readonly Type ResourceInvokerType = Type.GetType($"{ResourceInvokerTypeName}, {AspnetMvcCore}");
private static readonly ILog Log = LogProvider.GetLogger(typeof(AspNetCoreMvc2Integration));
private readonly object _httpContext;
private readonly Scope _scope;
/// <summary>
/// Initializes a new instance of the <see cref="AspNetCoreMvc2Integration"/> class.
/// </summary>
/// <param name="actionDescriptor">An ActionDescriptor with information about the current action.</param>
/// <param name="httpContext">The HttpContext for the current request.</param>
public AspNetCoreMvc2Integration(object actionDescriptor, object httpContext)
{
try
{
_httpContext = httpContext;
var request = _httpContext.GetProperty("Request").GetValueOrDefault();
GetTagValues(
actionDescriptor,
request,
out string httpMethod,
out string host,
out string resourceName,
out string url,
out string controllerName,
out string actionName);
SpanContext propagatedContext = null;
var tracer = Tracer.Instance;
if (tracer.ActiveScope == null)
{
try
{
// extract propagated http headers
var requestHeaders = request.GetProperty<IEnumerable>("Headers").GetValueOrDefault();
if (requestHeaders != null)
{
var headersCollection = new DictionaryHeadersCollection();
foreach (object header in requestHeaders)
{
var key = header.GetProperty<string>("Key").GetValueOrDefault();
var values = header.GetProperty<IList<string>>("Value").GetValueOrDefault();
if (key != null && values != null)
{
headersCollection.Add(key, values);
}
}
propagatedContext = SpanContextPropagator.Instance.Extract(headersCollection);
}
}
catch (Exception ex)
{
Log.ErrorException("Error extracting propagated HTTP headers.", ex);
}
}
_scope = tracer.StartActive(OperationName, propagatedContext);
var span = _scope.Span;
span.DecorateWebServerSpan(
resourceName: resourceName,
method: httpMethod,
host: host,
httpUrl: url);
span.SetTag(Tags.AspNetController, controllerName);
span.SetTag(Tags.AspNetAction, actionName);
// set analytics sample rate if enabled
var analyticsSampleRate = tracer.Settings.GetIntegrationAnalyticsSampleRate(IntegrationName, enabledWithGlobalSetting: true);
span.SetMetric(Tags.Analytics, analyticsSampleRate);
}
catch (Exception) when (DisposeObject(_scope))
{
// unreachable code
throw;
}
}
/// <summary>
/// Wrapper method used to instrument Microsoft.AspNetCore.Mvc.Internal.MvcCoreDiagnosticSourceExtensions.BeforeAction()
/// </summary>
/// <param name="diagnosticSource">The DiagnosticSource that this extension method was called on.</param>
/// <param name="actionDescriptor">An ActionDescriptor with information about the current action.</param>
/// <param name="httpContext">The HttpContext for the current request.</param>
/// <param name="routeData">A RouteData with information about the current route.</param>
/// <param name="opCode">The OpCode used in the original method call.</param>
/// <param name="mdToken">The mdToken of the original method call.</param>
/// <param name="moduleVersionPtr">A pointer to the module version GUID.</param>
[InterceptMethod(
CallerAssembly = AspnetMvcCore,
TargetAssembly = AspnetMvcCore,
TargetType = DiagnosticSourceTypeName,
TargetSignatureTypes = new[] { ClrNames.Void, ClrNames.Ignore, "Microsoft.AspNetCore.Mvc.Abstractions.ActionDescriptor", "Microsoft.AspNetCore.Http.HttpContext", "Microsoft.AspNetCore.Routing.RouteData" },
TargetMinimumVersion = Major2,
TargetMaximumVersion = Major2)]
public static void BeforeAction(
object diagnosticSource,
object actionDescriptor,
object httpContext,
object routeData,
int opCode,
int mdToken,
long moduleVersionPtr)
{
AspNetCoreMvc2Integration integration = null;
if (!Tracer.Instance.Settings.IsIntegrationEnabled(IntegrationName))
{
// integration disabled
return;
}
try
{
integration = new AspNetCoreMvc2Integration(actionDescriptor, httpContext);
if (httpContext.TryGetPropertyValue("Items", out IDictionary<object, object> contextItems))
{
contextItems[HttpContextKey] = integration;
}
}
catch (Exception ex)
{
Log.ErrorExceptionForFilter($"Error creating {nameof(AspNetCoreMvc2Integration)}.", ex);
}
Action<object, object, object, object> instrumentedMethod = null;
try
{
instrumentedMethod =
MethodBuilder<Action<object, object, object, object>>
.Start(moduleVersionPtr, mdToken, opCode, nameof(BeforeAction))
.WithConcreteType(DiagnosticSourceType)
.WithParameters(diagnosticSource, actionDescriptor, httpContext, routeData)
.WithNamespaceAndNameFilters(
ClrNames.Void,
ClrNames.Ignore,
"Microsoft.AspNetCore.Mvc.Abstractions.ActionDescriptor",
"Microsoft.AspNetCore.Http.HttpContext",
"Microsoft.AspNetCore.Routing.RouteData")
.Build();
}
catch (Exception ex)
{
// profiled app will continue working as expected without this method
Log.ErrorException($"Error resolving {DiagnosticSourceTypeName}.{nameof(BeforeAction)}(...)", ex);
}
try
{
// call the original method, catching and rethrowing any unhandled exceptions
instrumentedMethod?.Invoke(diagnosticSource, actionDescriptor, httpContext, routeData);
}
catch (Exception ex) when (integration?.SetException(ex) ?? false)
{
// unreachable code
throw;
}
}
/// <summary>
/// Wrapper method used to instrument Microsoft.AspNetCore.Mvc.Internal.MvcCoreDiagnosticSourceExtensions.AfterAction()
/// </summary>
/// <param name="diagnosticSource">The DiagnosticSource that this extension method was called on.</param>
/// <param name="actionDescriptor">An ActionDescriptor with information about the current action.</param>
/// <param name="httpContext">The HttpContext for the current request.</param>
/// <param name="routeData">A RouteData with information about the current route.</param>
/// <param name="opCode">The OpCode used in the original method call.</param>
/// <param name="mdToken">The mdToken of the original method call.</param>
/// <param name="moduleVersionPtr">A pointer to the module version GUID.</param>
[InterceptMethod(
CallerAssembly = AspnetMvcCore,
TargetAssembly = AspnetMvcCore,
TargetType = DiagnosticSourceTypeName,
TargetSignatureTypes = new[] { ClrNames.Void, ClrNames.Ignore, "Microsoft.AspNetCore.Mvc.Abstractions.ActionDescriptor", "Microsoft.AspNetCore.Http.HttpContext", "Microsoft.AspNetCore.Routing.RouteData" },
TargetMinimumVersion = Major2,
TargetMaximumVersion = Major2)]
public static void AfterAction(
object diagnosticSource,
object actionDescriptor,
object httpContext,
object routeData,
int opCode,
int mdToken,
long moduleVersionPtr)
{
AspNetCoreMvc2Integration integration = null;
if (!Tracer.Instance.Settings.IsIntegrationEnabled(IntegrationName))
{
// integration disabled
return;
}
try
{
if (httpContext.TryGetPropertyValue("Items", out IDictionary<object, object> contextItems))
{
integration = contextItems?[HttpContextKey] as AspNetCoreMvc2Integration;
}
}
catch (Exception ex)
{
Log.ErrorExceptionForFilter($"Error accessing {nameof(AspNetCoreMvc2Integration)}.", ex);
}
Action<object, object, object, object> instrumentedMethod = null;
string methodDef = $"{DiagnosticSourceTypeName}.{nameof(AfterAction)}(...)";
try
{
instrumentedMethod =
MethodBuilder<Action<object, object, object, object>>
.Start(moduleVersionPtr, mdToken, opCode, nameof(AfterAction))
.WithConcreteType(DiagnosticSourceType)
.WithParameters(diagnosticSource, actionDescriptor, httpContext, routeData)
.WithNamespaceAndNameFilters(
ClrNames.Void,
ClrNames.Ignore,
"Microsoft.AspNetCore.Mvc.Abstractions.ActionDescriptor",
"Microsoft.AspNetCore.Http.HttpContext",
"Microsoft.AspNetCore.Routing.RouteData")
.Build();
}
catch (Exception ex)
{
// profiled app will continue working as expected without this method
Log.ErrorException($"Error resolving {methodDef}", ex);
}
try
{
// call the original method, catching and rethrowing any unhandled exceptions
instrumentedMethod?.Invoke(diagnosticSource, actionDescriptor, httpContext, routeData);
}
catch (Exception ex)
{
integration?.SetException(ex);
throw;
}
finally
{
integration?.Dispose();
}
}
/// <summary>
/// Wrapper method used to catch unhandled exceptions in the incoming request pipeline for Microsoft.AspNetCore.Mvc.Core
/// </summary>
/// <param name="context">The DiagnosticSource that this extension method was called on.</param>
/// <param name="opCode">The OpCode used in the original method call.</param>
/// <param name="mdToken">The mdToken of the original method call.</param>
/// <param name="moduleVersionPtr">A pointer to the module version GUID.</param>
[InterceptMethod(
CallerAssembly = AspnetMvcCore,
TargetAssembly = AspnetMvcCore,
TargetType = ResourceInvokerTypeName,
TargetSignatureTypes = new[] { ClrNames.Void, ClrNames.Ignore },
TargetMinimumVersion = Major2,
TargetMaximumVersion = Major2)]
public static void Rethrow(object context, int opCode, int mdToken, long moduleVersionPtr)
{
if (context == null)
{
// Every rethrow method in every v2.x returns when the context is null
// We need the type of context to call the correct method as there are 3
// Remove this when we introduce the type arrays within the profiler
return;
}
var shouldTrace = Tracer.Instance.Settings.IsIntegrationEnabled(IntegrationName);
Action<object> instrumentedMethod;
try
{
instrumentedMethod =
MethodBuilder<Action<object>>
.Start(moduleVersionPtr, mdToken, opCode, nameof(Rethrow))
.WithConcreteType(ResourceInvokerType)
.WithParameters(context)
.WithNamespaceAndNameFilters(ClrNames.Void, ClrNames.Ignore)
.Build();
}
catch (Exception ex)
{
// profiled app will not continue working as expected without this method
var contextTypeName = context.GetType().FullName + " ";
var methodDef = $"{ResourceInvokerTypeName}.{nameof(Rethrow)}({contextTypeName} context)";
Log.ErrorException($"Error retrieving {methodDef}", ex);
throw;
}
AspNetCoreMvc2Integration integration = null;
if (shouldTrace)
{
try
{
if (context.TryGetPropertyValue("HttpContext", out object httpContext))
{
if (httpContext.TryGetPropertyValue("Items", out IDictionary<object, object> contextItems))
{
integration = contextItems?[HttpContextKey] as AspNetCoreMvc2Integration;
}
}
}
catch (Exception ex)
{
Log.ErrorExceptionForFilter($"Error accessing {nameof(AspNetCoreMvc2Integration)}.", ex);
}
}
try
{
// call the original method, catching and rethrowing any unhandled exceptions
instrumentedMethod.Invoke(context);
}
catch (Exception ex) when (integration?.SetException(ex) ?? false)
{
// unreachable code
throw;
}
}
/// <summary>
/// Tags the current span as an error. Called when an unhandled exception is thrown in the instrumented method.
/// </summary>
/// <param name="ex">The exception that was thrown and not handled in the instrumented method.</param>
/// <returns>Always <c>false</c>.</returns>
public bool SetException(Exception ex)
{
_scope?.Span?.SetException(ex);
return false;
}
/// <summary>
/// Performs application-defined tasks associated with freeing, releasing, or resetting unmanaged resources.
/// </summary>
public void Dispose()
{
try
{
if (_httpContext != null &&
_httpContext.TryGetPropertyValue("Response", out object response) &&
response.TryGetPropertyValue("StatusCode", out object statusCode))
{
_scope?.Span?.SetTag("http.status_code", statusCode.ToString());
}
}
finally
{
_scope?.Dispose();
}
}
private static void GetTagValues(
object actionDescriptor,
object request,
out string httpMethod,
out string host,
out string resourceName,
out string url,
out string controllerName,
out string actionName)
{
controllerName = actionDescriptor.GetProperty<string>("ControllerName").GetValueOrDefault()?.ToLowerInvariant();
actionName = actionDescriptor.GetProperty<string>("ActionName").GetValueOrDefault()?.ToLowerInvariant();
host = request.GetProperty("Host").GetProperty<string>("Value").GetValueOrDefault();
httpMethod = request.GetProperty<string>("Method").GetValueOrDefault()?.ToUpperInvariant() ?? "UNKNOWN";
string pathBase = request.GetProperty("PathBase").GetProperty<string>("Value").GetValueOrDefault();
string path = request.GetProperty("Path").GetProperty<string>("Value").GetValueOrDefault();
string queryString = request.GetProperty("QueryString").GetProperty<string>("Value").GetValueOrDefault();
url = $"{pathBase}{path}{queryString}";
string resourceUrl = actionDescriptor.GetProperty("AttributeRouteInfo").GetProperty<string>("Template").GetValueOrDefault() ??
UriHelpers.GetRelativeUrl(new Uri($"https://{host}{url}"), tryRemoveIds: true).ToLowerInvariant();
resourceName = $"{httpMethod} {resourceUrl}";
}
private bool DisposeObject(IDisposable disposable)
{
disposable?.Dispose();
return false;
}
}
}
| 1 | 15,592 | We're instrumenting a static method, so there's no instance in this case. `diagnosticSource` is the first argument. | DataDog-dd-trace-dotnet | .cs |
@@ -194,6 +194,7 @@ public class MessageCompose extends K9Activity implements OnClickListener,
private Action action;
private boolean requestReadReceipt = false;
+ private boolean isHighPriority = false;
private TextView chooseIdentityButton;
private EditText subjectView; | 1 | package com.fsck.k9.activity;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.regex.Pattern;
import android.annotation.SuppressLint;
import android.app.ActionBar;
import android.app.AlertDialog;
import android.app.AlertDialog.Builder;
import android.app.Dialog;
import android.app.PendingIntent;
import android.content.Context;
import android.content.DialogInterface;
import android.content.Intent;
import android.content.IntentSender;
import android.content.IntentSender.SendIntentException;
import android.content.pm.ActivityInfo;
import android.net.Uri;
import android.os.AsyncTask;
import android.os.Bundle;
import android.os.Handler;
import android.os.Parcelable;
import android.support.annotation.Nullable;
import android.support.annotation.StringRes;
import android.text.TextUtils;
import android.text.TextWatcher;
import timber.log.Timber;
import android.util.TypedValue;
import android.view.ContextThemeWrapper;
import android.view.LayoutInflater;
import android.view.Menu;
import android.view.MenuItem;
import android.view.View;
import android.view.View.OnClickListener;
import android.view.View.OnFocusChangeListener;
import android.view.Window;
import android.widget.EditText;
import android.widget.LinearLayout;
import android.widget.TextView;
import android.widget.Toast;
import com.fsck.k9.Account;
import com.fsck.k9.Account.MessageFormat;
import com.fsck.k9.Identity;
import com.fsck.k9.K9;
import com.fsck.k9.Preferences;
import com.fsck.k9.R;
import com.fsck.k9.activity.MessageLoaderHelper.MessageLoaderCallbacks;
import com.fsck.k9.activity.compose.AttachmentPresenter;
import com.fsck.k9.activity.compose.AttachmentPresenter.AttachmentMvpView;
import com.fsck.k9.activity.compose.AttachmentPresenter.WaitingAction;
import com.fsck.k9.activity.compose.ComposeCryptoStatus;
import com.fsck.k9.activity.compose.ComposeCryptoStatus.SendErrorState;
import com.fsck.k9.activity.compose.CryptoSettingsDialog.OnCryptoModeChangedListener;
import com.fsck.k9.activity.compose.IdentityAdapter;
import com.fsck.k9.activity.compose.IdentityAdapter.IdentityContainer;
import com.fsck.k9.activity.compose.PgpInlineDialog.OnOpenPgpInlineChangeListener;
import com.fsck.k9.activity.compose.PgpSignOnlyDialog.OnOpenPgpSignOnlyChangeListener;
import com.fsck.k9.activity.compose.RecipientMvpView;
import com.fsck.k9.activity.compose.RecipientPresenter;
import com.fsck.k9.activity.compose.RecipientPresenter.CryptoMode;
import com.fsck.k9.activity.compose.SaveMessageTask;
import com.fsck.k9.activity.misc.Attachment;
import com.fsck.k9.controller.MessagingController;
import com.fsck.k9.controller.MessagingListener;
import com.fsck.k9.controller.SimpleMessagingListener;
import com.fsck.k9.fragment.ProgressDialogFragment;
import com.fsck.k9.fragment.ProgressDialogFragment.CancelListener;
import com.fsck.k9.helper.Contacts;
import com.fsck.k9.helper.IdentityHelper;
import com.fsck.k9.helper.MailTo;
import com.fsck.k9.helper.ReplyToParser;
import com.fsck.k9.helper.SimpleTextWatcher;
import com.fsck.k9.helper.Utility;
import com.fsck.k9.mail.Address;
import com.fsck.k9.mail.Flag;
import com.fsck.k9.mail.Message;
import com.fsck.k9.mail.Message.RecipientType;
import com.fsck.k9.mail.MessagingException;
import com.fsck.k9.mail.internet.MimeMessage;
import com.fsck.k9.mailstore.LocalMessage;
import com.fsck.k9.mailstore.MessageViewInfo;
import com.fsck.k9.message.ComposePgpInlineDecider;
import com.fsck.k9.message.IdentityField;
import com.fsck.k9.message.IdentityHeaderParser;
import com.fsck.k9.message.MessageBuilder;
import com.fsck.k9.message.PgpMessageBuilder;
import com.fsck.k9.message.QuotedTextMode;
import com.fsck.k9.message.SimpleMessageBuilder;
import com.fsck.k9.message.SimpleMessageFormat;
import com.fsck.k9.search.LocalSearch;
import com.fsck.k9.ui.EolConvertingEditText;
import com.fsck.k9.ui.compose.QuotedMessageMvpView;
import com.fsck.k9.ui.compose.QuotedMessagePresenter;
@SuppressWarnings("deprecation") // TODO get rid of activity dialogs and indeterminate progress bars
public class MessageCompose extends K9Activity implements OnClickListener,
CancelListener, OnFocusChangeListener, OnCryptoModeChangedListener,
OnOpenPgpInlineChangeListener, OnOpenPgpSignOnlyChangeListener, MessageBuilder.Callback,
AttachmentPresenter.AttachmentsChangedListener, RecipientPresenter.RecipientsChangedListener {
private static final int DIALOG_SAVE_OR_DISCARD_DRAFT_MESSAGE = 1;
private static final int DIALOG_CONFIRM_DISCARD_ON_BACK = 2;
private static final int DIALOG_CHOOSE_IDENTITY = 3;
private static final int DIALOG_CONFIRM_DISCARD = 4;
private static final long INVALID_DRAFT_ID = MessagingController.INVALID_MESSAGE_ID;
public static final String ACTION_COMPOSE = "com.fsck.k9.intent.action.COMPOSE";
public static final String ACTION_REPLY = "com.fsck.k9.intent.action.REPLY";
public static final String ACTION_REPLY_ALL = "com.fsck.k9.intent.action.REPLY_ALL";
public static final String ACTION_FORWARD = "com.fsck.k9.intent.action.FORWARD";
public static final String ACTION_EDIT_DRAFT = "com.fsck.k9.intent.action.EDIT_DRAFT";
public static final String EXTRA_ACCOUNT = "account";
public static final String EXTRA_MESSAGE_REFERENCE = "message_reference";
public static final String EXTRA_MESSAGE_DECRYPTION_RESULT = "message_decryption_result";
private static final String STATE_KEY_SOURCE_MESSAGE_PROCED =
"com.fsck.k9.activity.MessageCompose.stateKeySourceMessageProced";
private static final String STATE_KEY_DRAFT_ID = "com.fsck.k9.activity.MessageCompose.draftId";
private static final String STATE_IDENTITY_CHANGED =
"com.fsck.k9.activity.MessageCompose.identityChanged";
private static final String STATE_IDENTITY =
"com.fsck.k9.activity.MessageCompose.identity";
private static final String STATE_IN_REPLY_TO = "com.fsck.k9.activity.MessageCompose.inReplyTo";
private static final String STATE_REFERENCES = "com.fsck.k9.activity.MessageCompose.references";
private static final String STATE_KEY_READ_RECEIPT = "com.fsck.k9.activity.MessageCompose.messageReadReceipt";
private static final String STATE_KEY_CHANGES_MADE_SINCE_LAST_SAVE = "com.fsck.k9.activity.MessageCompose.changesMadeSinceLastSave";
private static final String STATE_ALREADY_NOTIFIED_USER_OF_EMPTY_SUBJECT = "alreadyNotifiedUserOfEmptySubject";
private static final String FRAGMENT_WAITING_FOR_ATTACHMENT = "waitingForAttachment";
private static final int MSG_PROGRESS_ON = 1;
private static final int MSG_PROGRESS_OFF = 2;
public static final int MSG_SAVED_DRAFT = 4;
private static final int MSG_DISCARDED_DRAFT = 5;
private static final int REQUEST_MASK_RECIPIENT_PRESENTER = (1 << 8);
private static final int REQUEST_MASK_LOADER_HELPER = (1 << 9);
private static final int REQUEST_MASK_ATTACHMENT_PRESENTER = (1 << 10);
private static final int REQUEST_MASK_MESSAGE_BUILDER = (1 << 11);
/**
* Regular expression to remove the first localized "Re:" prefix in subjects.
*
* Currently:
* - "Aw:" (german: abbreviation for "Antwort")
*/
private static final Pattern PREFIX = Pattern.compile("^AW[:\\s]\\s*", Pattern.CASE_INSENSITIVE);
private QuotedMessagePresenter quotedMessagePresenter;
private MessageLoaderHelper messageLoaderHelper;
private AttachmentPresenter attachmentPresenter;
private Contacts contacts;
/**
* The account used for message composition.
*/
private Account account;
private Identity identity;
private boolean identityChanged = false;
private boolean signatureChanged = false;
// relates to the message being replied to, forwarded, or edited TODO split up?
private MessageReference relatedMessageReference;
/**
* Indicates that the source message has been processed at least once and should not
* be processed on any subsequent loads. This protects us from adding attachments that
* have already been added from the restore of the view state.
*/
private boolean relatedMessageProcessed = false;
private RecipientPresenter recipientPresenter;
private MessageBuilder currentMessageBuilder;
private boolean finishAfterDraftSaved;
private boolean alreadyNotifiedUserOfEmptySubject = false;
private boolean changesMadeSinceLastSave = false;
/**
* The database ID of this message's draft. This is used when saving drafts so the message in
* the database is updated instead of being created anew. This property is INVALID_DRAFT_ID
* until the first save.
*/
private long draftId = INVALID_DRAFT_ID;
private Action action;
private boolean requestReadReceipt = false;
private TextView chooseIdentityButton;
private EditText subjectView;
private EolConvertingEditText signatureView;
private EolConvertingEditText messageContentView;
private LinearLayout attachmentsView;
private String referencedMessageIds;
private String repliedToMessageId;
// The currently used message format.
private SimpleMessageFormat currentMessageFormat;
private boolean isInSubActivity = false;
private boolean navigateUp;
@Override
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
if (UpgradeDatabases.actionUpgradeDatabases(this, getIntent())) {
finish();
return;
}
requestWindowFeature(Window.FEATURE_INDETERMINATE_PROGRESS);
if (K9.getK9ComposerThemeSetting() != K9.Theme.USE_GLOBAL) {
// theme the whole content according to the theme (except the action bar)
ContextThemeWrapper themeContext = new ContextThemeWrapper(this,
K9.getK9ThemeResourceId(K9.getK9ComposerTheme()));
@SuppressLint("InflateParams") // this is the top level activity element, it has no root
View v = LayoutInflater.from(themeContext).inflate(R.layout.message_compose, null);
TypedValue outValue = new TypedValue();
// background color needs to be forced
themeContext.getTheme().resolveAttribute(R.attr.messageViewBackgroundColor, outValue, true);
v.setBackgroundColor(outValue.data);
setContentView(v);
} else {
setContentView(R.layout.message_compose);
}
initializeActionBar();
// on api level 15, setContentView() shows the progress bar for some reason...
setProgressBarIndeterminateVisibility(false);
final Intent intent = getIntent();
String messageReferenceString = intent.getStringExtra(EXTRA_MESSAGE_REFERENCE);
relatedMessageReference = MessageReference.parse(messageReferenceString);
final String accountUuid = (relatedMessageReference != null) ?
relatedMessageReference.getAccountUuid() :
intent.getStringExtra(EXTRA_ACCOUNT);
account = Preferences.getPreferences(this).getAccount(accountUuid);
if (account == null) {
account = Preferences.getPreferences(this).getDefaultAccount();
}
if (account == null) {
/*
* There are no accounts set up. This should not have happened. Prompt the
* user to set up an account as an acceptable bailout.
*/
startActivity(new Intent(this, Accounts.class));
changesMadeSinceLastSave = false;
finish();
return;
}
contacts = Contacts.getInstance(MessageCompose.this);
chooseIdentityButton = (TextView) findViewById(R.id.identity);
chooseIdentityButton.setOnClickListener(this);
RecipientMvpView recipientMvpView = new RecipientMvpView(this);
ComposePgpInlineDecider composePgpInlineDecider = new ComposePgpInlineDecider();
recipientPresenter = new RecipientPresenter(getApplicationContext(), getLoaderManager(), recipientMvpView,
account, composePgpInlineDecider, new ReplyToParser(), this);
recipientPresenter.updateCryptoStatus();
subjectView = (EditText) findViewById(R.id.subject);
subjectView.getInputExtras(true).putBoolean("allowEmoji", true);
EolConvertingEditText upperSignature = (EolConvertingEditText) findViewById(R.id.upper_signature);
EolConvertingEditText lowerSignature = (EolConvertingEditText) findViewById(R.id.lower_signature);
QuotedMessageMvpView quotedMessageMvpView = new QuotedMessageMvpView(this);
quotedMessagePresenter = new QuotedMessagePresenter(this, quotedMessageMvpView, account);
attachmentPresenter = new AttachmentPresenter(getApplicationContext(), attachmentMvpView, getLoaderManager(), this);
messageContentView = (EolConvertingEditText) findViewById(R.id.message_content);
messageContentView.getInputExtras(true).putBoolean("allowEmoji", true);
attachmentsView = (LinearLayout) findViewById(R.id.attachments);
TextWatcher draftNeedsChangingTextWatcher = new SimpleTextWatcher() {
@Override
public void onTextChanged(CharSequence s, int start, int before, int count) {
changesMadeSinceLastSave = true;
}
};
TextWatcher signTextWatcher = new SimpleTextWatcher() {
@Override
public void onTextChanged(CharSequence s, int start, int before, int count) {
changesMadeSinceLastSave = true;
signatureChanged = true;
}
};
recipientMvpView.addTextChangedListener(draftNeedsChangingTextWatcher);
quotedMessageMvpView.addTextChangedListener(draftNeedsChangingTextWatcher);
subjectView.addTextChangedListener(draftNeedsChangingTextWatcher);
messageContentView.addTextChangedListener(draftNeedsChangingTextWatcher);
/*
* We set this to invisible by default. Other methods will turn it back on if it's
* needed.
*/
quotedMessagePresenter.showOrHideQuotedText(QuotedTextMode.NONE);
subjectView.setOnFocusChangeListener(this);
messageContentView.setOnFocusChangeListener(this);
if (savedInstanceState != null) {
/*
* This data gets used in onCreate, so grab it here instead of onRestoreInstanceState
*/
relatedMessageProcessed = savedInstanceState.getBoolean(STATE_KEY_SOURCE_MESSAGE_PROCED, false);
}
if (initFromIntent(intent)) {
action = Action.COMPOSE;
changesMadeSinceLastSave = true;
} else {
String action = intent.getAction();
if (ACTION_COMPOSE.equals(action)) {
this.action = Action.COMPOSE;
} else if (ACTION_REPLY.equals(action)) {
this.action = Action.REPLY;
} else if (ACTION_REPLY_ALL.equals(action)) {
this.action = Action.REPLY_ALL;
} else if (ACTION_FORWARD.equals(action)) {
this.action = Action.FORWARD;
} else if (ACTION_EDIT_DRAFT.equals(action)) {
this.action = Action.EDIT_DRAFT;
} else {
// This shouldn't happen
Timber.w("MessageCompose was started with an unsupported action");
this.action = Action.COMPOSE;
}
}
if (identity == null) {
identity = account.getIdentity(0);
}
if (account.isSignatureBeforeQuotedText()) {
signatureView = upperSignature;
lowerSignature.setVisibility(View.GONE);
} else {
signatureView = lowerSignature;
upperSignature.setVisibility(View.GONE);
}
updateSignature();
signatureView.addTextChangedListener(signTextWatcher);
if (!identity.getSignatureUse()) {
signatureView.setVisibility(View.GONE);
}
requestReadReceipt = account.isMessageReadReceiptAlways();
updateFrom();
if (!relatedMessageProcessed) {
if (action == Action.REPLY || action == Action.REPLY_ALL ||
action == Action.FORWARD || action == Action.EDIT_DRAFT) {
messageLoaderHelper = new MessageLoaderHelper(this, getLoaderManager(), getFragmentManager(),
messageLoaderCallbacks);
internalMessageHandler.sendEmptyMessage(MSG_PROGRESS_ON);
Parcelable cachedDecryptionResult = intent.getParcelableExtra(EXTRA_MESSAGE_DECRYPTION_RESULT);
messageLoaderHelper.asyncStartOrResumeLoadingMessage(relatedMessageReference, cachedDecryptionResult);
}
if (action != Action.EDIT_DRAFT) {
String alwaysBccString = account.getAlwaysBcc();
if (!TextUtils.isEmpty(alwaysBccString)) {
recipientPresenter.addBccAddresses(Address.parse(alwaysBccString));
}
}
}
if (action == Action.REPLY || action == Action.REPLY_ALL) {
relatedMessageReference = relatedMessageReference.withModifiedFlag(Flag.ANSWERED);
}
if (action == Action.REPLY || action == Action.REPLY_ALL ||
action == Action.EDIT_DRAFT) {
//change focus to message body.
messageContentView.requestFocus();
} else {
// Explicitly set focus to "To:" input field (see issue 2998)
recipientMvpView.requestFocusOnToField();
}
if (action == Action.FORWARD) {
relatedMessageReference = relatedMessageReference.withModifiedFlag(Flag.FORWARDED);
}
updateMessageFormat();
// Set font size of input controls
int fontSize = K9.getFontSizes().getMessageComposeInput();
recipientMvpView.setFontSizes(K9.getFontSizes(), fontSize);
quotedMessageMvpView.setFontSizes(K9.getFontSizes(), fontSize);
K9.getFontSizes().setViewTextSize(subjectView, fontSize);
K9.getFontSizes().setViewTextSize(messageContentView, fontSize);
K9.getFontSizes().setViewTextSize(signatureView, fontSize);
updateMessageFormat();
setTitle();
currentMessageBuilder = (MessageBuilder) getLastNonConfigurationInstance();
if (currentMessageBuilder != null) {
setProgressBarIndeterminateVisibility(true);
currentMessageBuilder.reattachCallback(this);
}
}
@Override
public void onDestroy() {
super.onDestroy();
if (recipientPresenter != null) {
recipientPresenter.onActivityDestroy();
}
}
/**
* Handle external intents that trigger the message compose activity.
*
* <p>
* Supported external intents:
* <ul>
* <li>{@link Intent#ACTION_VIEW}</li>
* <li>{@link Intent#ACTION_SENDTO}</li>
* <li>{@link Intent#ACTION_SEND}</li>
* <li>{@link Intent#ACTION_SEND_MULTIPLE}</li>
* </ul>
* </p>
*
* @param intent
* The (external) intent that started the activity.
*
* @return {@code true}, if this activity was started by an external intent. {@code false},
* otherwise.
*/
private boolean initFromIntent(final Intent intent) {
boolean startedByExternalIntent = false;
final String action = intent.getAction();
if (Intent.ACTION_VIEW.equals(action) || Intent.ACTION_SENDTO.equals(action)) {
/*
* Someone has clicked a mailto: link. The address is in the URI.
*/
if (intent.getData() != null) {
Uri uri = intent.getData();
if (MailTo.isMailTo(uri)) {
MailTo mailTo = MailTo.parse(uri);
initializeFromMailto(mailTo);
}
}
/*
* Note: According to the documentation ACTION_VIEW and ACTION_SENDTO don't accept
* EXTRA_* parameters.
* And previously we didn't process these EXTRAs. But it looks like nobody bothers to
* read the official documentation and just copies wrong sample code that happens to
* work with the AOSP Email application. And because even big players get this wrong,
* we're now finally giving in and read the EXTRAs for those actions (below).
*/
}
if (Intent.ACTION_SEND.equals(action) || Intent.ACTION_SEND_MULTIPLE.equals(action) ||
Intent.ACTION_SENDTO.equals(action) || Intent.ACTION_VIEW.equals(action)) {
startedByExternalIntent = true;
/*
* Note: Here we allow a slight deviation from the documented behavior.
* EXTRA_TEXT is used as message body (if available) regardless of the MIME
* type of the intent. In addition one or multiple attachments can be added
* using EXTRA_STREAM.
*/
CharSequence text = intent.getCharSequenceExtra(Intent.EXTRA_TEXT);
// Only use EXTRA_TEXT if the body hasn't already been set by the mailto URI
if (text != null && messageContentView.getText().length() == 0) {
messageContentView.setCharacters(text);
}
String type = intent.getType();
if (Intent.ACTION_SEND.equals(action)) {
Uri stream = intent.getParcelableExtra(Intent.EXTRA_STREAM);
if (stream != null) {
attachmentPresenter.addAttachment(stream, type);
}
} else {
List<Parcelable> list = intent.getParcelableArrayListExtra(Intent.EXTRA_STREAM);
if (list != null) {
for (Parcelable parcelable : list) {
Uri stream = (Uri) parcelable;
if (stream != null) {
attachmentPresenter.addAttachment(stream, type);
}
}
}
}
String subject = intent.getStringExtra(Intent.EXTRA_SUBJECT);
// Only use EXTRA_SUBJECT if the subject hasn't already been set by the mailto URI
if (subject != null && subjectView.getText().length() == 0) {
subjectView.setText(subject);
}
recipientPresenter.initFromSendOrViewIntent(intent);
}
return startedByExternalIntent;
}
@Override
protected void onResume() {
super.onResume();
MessagingController.getInstance(this).addListener(messagingListener);
}
@Override
public void onPause() {
super.onPause();
MessagingController.getInstance(this).removeListener(messagingListener);
boolean isPausingOnConfigurationChange = (getChangingConfigurations() & ActivityInfo.CONFIG_ORIENTATION)
== ActivityInfo.CONFIG_ORIENTATION;
boolean isCurrentlyBuildingMessage = currentMessageBuilder != null;
if (isPausingOnConfigurationChange || isCurrentlyBuildingMessage || isInSubActivity) {
return;
}
checkToSaveDraftImplicitly();
}
/**
* The framework handles most of the fields, but we need to handle stuff that we
* dynamically show and hide:
* Attachment list,
* Cc field,
* Bcc field,
* Quoted text,
*/
@Override
protected void onSaveInstanceState(Bundle outState) {
super.onSaveInstanceState(outState);
outState.putBoolean(STATE_KEY_SOURCE_MESSAGE_PROCED, relatedMessageProcessed);
outState.putLong(STATE_KEY_DRAFT_ID, draftId);
outState.putSerializable(STATE_IDENTITY, identity);
outState.putBoolean(STATE_IDENTITY_CHANGED, identityChanged);
outState.putString(STATE_IN_REPLY_TO, repliedToMessageId);
outState.putString(STATE_REFERENCES, referencedMessageIds);
outState.putBoolean(STATE_KEY_READ_RECEIPT, requestReadReceipt);
outState.putBoolean(STATE_KEY_CHANGES_MADE_SINCE_LAST_SAVE, changesMadeSinceLastSave);
outState.putBoolean(STATE_ALREADY_NOTIFIED_USER_OF_EMPTY_SUBJECT, alreadyNotifiedUserOfEmptySubject);
recipientPresenter.onSaveInstanceState(outState);
quotedMessagePresenter.onSaveInstanceState(outState);
attachmentPresenter.onSaveInstanceState(outState);
}
@Override
public Object onRetainNonConfigurationInstance() {
if (currentMessageBuilder != null) {
currentMessageBuilder.detachCallback();
}
return currentMessageBuilder;
}
@Override
protected void onRestoreInstanceState(Bundle savedInstanceState) {
super.onRestoreInstanceState(savedInstanceState);
attachmentsView.removeAllViews();
requestReadReceipt = savedInstanceState.getBoolean(STATE_KEY_READ_RECEIPT);
recipientPresenter.onRestoreInstanceState(savedInstanceState);
quotedMessagePresenter.onRestoreInstanceState(savedInstanceState);
attachmentPresenter.onRestoreInstanceState(savedInstanceState);
draftId = savedInstanceState.getLong(STATE_KEY_DRAFT_ID);
identity = (Identity) savedInstanceState.getSerializable(STATE_IDENTITY);
identityChanged = savedInstanceState.getBoolean(STATE_IDENTITY_CHANGED);
repliedToMessageId = savedInstanceState.getString(STATE_IN_REPLY_TO);
referencedMessageIds = savedInstanceState.getString(STATE_REFERENCES);
changesMadeSinceLastSave = savedInstanceState.getBoolean(STATE_KEY_CHANGES_MADE_SINCE_LAST_SAVE);
alreadyNotifiedUserOfEmptySubject = savedInstanceState.getBoolean(STATE_ALREADY_NOTIFIED_USER_OF_EMPTY_SUBJECT);
updateFrom();
updateMessageFormat();
}
private void setTitle() {
setTitle(action.getTitleResource());
}
@Nullable
private MessageBuilder createMessageBuilder(boolean isDraft) {
MessageBuilder builder;
recipientPresenter.updateCryptoStatus();
ComposeCryptoStatus cryptoStatus = recipientPresenter.getCurrentCryptoStatus();
// TODO encrypt drafts for storage
if (!isDraft && cryptoStatus.shouldUsePgpMessageBuilder()) {
SendErrorState maybeSendErrorState = cryptoStatus.getSendErrorStateOrNull();
if (maybeSendErrorState != null) {
recipientPresenter.showPgpSendError(maybeSendErrorState);
return null;
}
PgpMessageBuilder pgpBuilder = PgpMessageBuilder.newInstance();
recipientPresenter.builderSetProperties(pgpBuilder);
builder = pgpBuilder;
} else {
builder = SimpleMessageBuilder.newInstance();
}
builder.setSubject(Utility.stripNewLines(subjectView.getText().toString()))
.setSentDate(new Date())
.setHideTimeZone(K9.hideTimeZone())
.setTo(recipientPresenter.getToAddresses())
.setCc(recipientPresenter.getCcAddresses())
.setBcc(recipientPresenter.getBccAddresses())
.setInReplyTo(repliedToMessageId)
.setReferences(referencedMessageIds)
.setRequestReadReceipt(requestReadReceipt)
.setIdentity(identity)
.setMessageFormat(currentMessageFormat)
.setText(messageContentView.getCharacters())
.setAttachments(attachmentPresenter.createAttachmentList())
.setSignature(signatureView.getCharacters())
.setSignatureBeforeQuotedText(account.isSignatureBeforeQuotedText())
.setIdentityChanged(identityChanged)
.setSignatureChanged(signatureChanged)
.setCursorPosition(messageContentView.getSelectionStart())
.setMessageReference(relatedMessageReference)
.setDraft(isDraft)
.setIsPgpInlineEnabled(cryptoStatus.isPgpInlineModeEnabled());
quotedMessagePresenter.builderSetProperties(builder);
return builder;
}
private void checkToSendMessage() {
if (subjectView.getText().length() == 0 && !alreadyNotifiedUserOfEmptySubject) {
Toast.makeText(this, R.string.empty_subject, Toast.LENGTH_LONG).show();
alreadyNotifiedUserOfEmptySubject = true;
return;
}
if (recipientPresenter.checkRecipientsOkForSending()) {
return;
}
if (attachmentPresenter.checkOkForSendingOrDraftSaving()) {
return;
}
performSendAfterChecks();
}
private void checkToSaveDraftAndSave() {
if (!account.hasDraftsFolder()) {
Toast.makeText(this, R.string.compose_error_no_draft_folder, Toast.LENGTH_SHORT).show();
return;
}
if (attachmentPresenter.checkOkForSendingOrDraftSaving()) {
return;
}
finishAfterDraftSaved = true;
performSaveAfterChecks();
}
private void checkToSaveDraftImplicitly() {
if (!account.hasDraftsFolder()) {
return;
}
if (!changesMadeSinceLastSave) {
return;
}
finishAfterDraftSaved = false;
performSaveAfterChecks();
}
private void performSaveAfterChecks() {
currentMessageBuilder = createMessageBuilder(true);
if (currentMessageBuilder != null) {
setProgressBarIndeterminateVisibility(true);
currentMessageBuilder.buildAsync(this);
}
}
public void performSendAfterChecks() {
currentMessageBuilder = createMessageBuilder(false);
if (currentMessageBuilder != null) {
changesMadeSinceLastSave = false;
setProgressBarIndeterminateVisibility(true);
currentMessageBuilder.buildAsync(this);
}
}
private void onDiscard() {
if (draftId != INVALID_DRAFT_ID) {
MessagingController.getInstance(getApplication()).deleteDraft(account, draftId);
draftId = INVALID_DRAFT_ID;
}
internalMessageHandler.sendEmptyMessage(MSG_DISCARDED_DRAFT);
changesMadeSinceLastSave = false;
if (navigateUp) {
openAutoExpandFolder();
} else {
finish();
}
}
private void onReadReceipt() {
CharSequence txt;
if (!requestReadReceipt) {
txt = getString(R.string.read_receipt_enabled);
requestReadReceipt = true;
} else {
txt = getString(R.string.read_receipt_disabled);
requestReadReceipt = false;
}
Context context = getApplicationContext();
Toast toast = Toast.makeText(context, txt, Toast.LENGTH_SHORT);
toast.show();
}
public void showContactPicker(int requestCode) {
requestCode |= REQUEST_MASK_RECIPIENT_PRESENTER;
isInSubActivity = true;
startActivityForResult(contacts.contactPickerIntent(), requestCode);
}
@Override
protected void onActivityResult(int requestCode, int resultCode, Intent data) {
isInSubActivity = false;
if ((requestCode & REQUEST_MASK_MESSAGE_BUILDER) == REQUEST_MASK_MESSAGE_BUILDER) {
requestCode ^= REQUEST_MASK_MESSAGE_BUILDER;
if (currentMessageBuilder == null) {
Timber.e("Got a message builder activity result for no message builder, " +
"this is an illegal state!");
return;
}
currentMessageBuilder.onActivityResult(requestCode, resultCode, data, this);
return;
}
if ((requestCode & REQUEST_MASK_RECIPIENT_PRESENTER) == REQUEST_MASK_RECIPIENT_PRESENTER) {
requestCode ^= REQUEST_MASK_RECIPIENT_PRESENTER;
recipientPresenter.onActivityResult(requestCode, resultCode, data);
return;
}
if ((requestCode & REQUEST_MASK_LOADER_HELPER) == REQUEST_MASK_LOADER_HELPER) {
requestCode ^= REQUEST_MASK_LOADER_HELPER;
messageLoaderHelper.onActivityResult(requestCode, resultCode, data);
return;
}
if ((requestCode & REQUEST_MASK_ATTACHMENT_PRESENTER) == REQUEST_MASK_ATTACHMENT_PRESENTER) {
requestCode ^= REQUEST_MASK_ATTACHMENT_PRESENTER;
attachmentPresenter.onActivityResult(resultCode, requestCode, data);
}
}
private void onAccountChosen(Account account, Identity identity) {
if (!this.account.equals(account)) {
Timber.v("Switching account from %s to %s", this.account, account);
// on draft edit, make sure we don't keep previous message UID
if (action == Action.EDIT_DRAFT) {
relatedMessageReference = null;
}
// test whether there is something to save
if (changesMadeSinceLastSave || (draftId != INVALID_DRAFT_ID)) {
final long previousDraftId = draftId;
final Account previousAccount = this.account;
// make current message appear as new
draftId = INVALID_DRAFT_ID;
// actual account switch
this.account = account;
Timber.v("Account switch, saving new draft in new account");
checkToSaveDraftImplicitly();
if (previousDraftId != INVALID_DRAFT_ID) {
Timber.v("Account switch, deleting draft from previous account: %d", previousDraftId);
MessagingController.getInstance(getApplication()).deleteDraft(previousAccount,
previousDraftId);
}
} else {
this.account = account;
}
// Show CC/BCC text input field when switching to an account that always wants them
// displayed.
// Please note that we're not hiding the fields if the user switches back to an account
// that doesn't have this setting checked.
recipientPresenter.onSwitchAccount(this.account);
quotedMessagePresenter.onSwitchAccount(this.account);
// not sure how to handle mFolder, mSourceMessage?
}
switchToIdentity(identity);
}
private void switchToIdentity(Identity identity) {
this.identity = identity;
identityChanged = true;
changesMadeSinceLastSave = true;
updateFrom();
updateSignature();
updateMessageFormat();
recipientPresenter.onSwitchIdentity(identity);
}
private void updateFrom() {
chooseIdentityButton.setText(identity.getEmail());
}
private void updateSignature() {
if (identity.getSignatureUse()) {
signatureView.setCharacters(identity.getSignature());
signatureView.setVisibility(View.VISIBLE);
} else {
signatureView.setVisibility(View.GONE);
}
}
@Override
public void onFocusChange(View v, boolean hasFocus) {
switch (v.getId()) {
case R.id.message_content:
case R.id.subject:
if (hasFocus) {
recipientPresenter.onNonRecipientFieldFocused();
}
break;
}
}
@Override
public void onCryptoModeChanged(CryptoMode cryptoMode) {
recipientPresenter.onCryptoModeChanged(cryptoMode);
}
@Override
public void onOpenPgpInlineChange(boolean enabled) {
recipientPresenter.onCryptoPgpInlineChanged(enabled);
}
@Override
public void onOpenPgpSignOnlyChange(boolean enabled) {
recipientPresenter.onCryptoPgpSignOnlyDisabled();
}
@Override
public void onAttachmentAdded() {
changesMadeSinceLastSave = true;
}
@Override
public void onAttachmentRemoved() {
changesMadeSinceLastSave = true;
}
@Override
public void onRecipientsChanged() {
changesMadeSinceLastSave = true;
}
@Override
public void onClick(View view) {
switch (view.getId()) {
case R.id.identity:
showDialog(DIALOG_CHOOSE_IDENTITY);
break;
}
}
private void askBeforeDiscard() {
if (K9.confirmDiscardMessage()) {
showDialog(DIALOG_CONFIRM_DISCARD);
} else {
onDiscard();
}
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
switch (item.getItemId()) {
case android.R.id.home:
prepareToFinish(true);
break;
case R.id.send:
checkToSendMessage();
break;
case R.id.save:
checkToSaveDraftAndSave();
break;
case R.id.discard:
askBeforeDiscard();
break;
case R.id.add_from_contacts:
recipientPresenter.onMenuAddFromContacts();
break;
case R.id.openpgp_inline_enable:
recipientPresenter.onMenuSetPgpInline(true);
updateMessageFormat();
break;
case R.id.openpgp_inline_disable:
recipientPresenter.onMenuSetPgpInline(false);
updateMessageFormat();
break;
case R.id.openpgp_sign_only:
recipientPresenter.onMenuSetSignOnly(true);
break;
case R.id.openpgp_sign_only_disable:
recipientPresenter.onMenuSetSignOnly(false);
break;
case R.id.add_attachment:
attachmentPresenter.onClickAddAttachment(recipientPresenter);
break;
case R.id.read_receipt:
onReadReceipt();
break;
default:
return super.onOptionsItemSelected(item);
}
return true;
}
@Override
public boolean onCreateOptionsMenu(Menu menu) {
super.onCreateOptionsMenu(menu);
if (isFinishing()) {
return false;
}
getMenuInflater().inflate(R.menu.message_compose_option, menu);
// Disable the 'Save' menu option if Drafts folder is set to -NONE-
if (!account.hasDraftsFolder()) {
menu.findItem(R.id.save).setEnabled(false);
}
return true;
}
@Override
public boolean onPrepareOptionsMenu(Menu menu) {
super.onPrepareOptionsMenu(menu);
recipientPresenter.onPrepareOptionsMenu(menu);
return true;
}
@Override
public void onBackPressed() {
prepareToFinish(false);
}
private void prepareToFinish(boolean shouldNavigateUp) {
navigateUp = shouldNavigateUp;
if (changesMadeSinceLastSave && draftIsNotEmpty()) {
if (!account.hasDraftsFolder()) {
showDialog(DIALOG_CONFIRM_DISCARD_ON_BACK);
} else {
showDialog(DIALOG_SAVE_OR_DISCARD_DRAFT_MESSAGE);
}
} else {
// Check if editing an existing draft.
if (draftId == INVALID_DRAFT_ID) {
onDiscard();
} else {
if (navigateUp) {
openAutoExpandFolder();
} else {
super.onBackPressed();
}
}
}
}
private void openAutoExpandFolder() {
String folder = account.getAutoExpandFolderName();
LocalSearch search = new LocalSearch(folder);
search.addAccountUuid(account.getUuid());
search.addAllowedFolder(folder);
MessageList.actionDisplaySearch(this, search, false, true);
finish();
}
private boolean draftIsNotEmpty() {
if (messageContentView.getText().length() != 0) {
return true;
}
if (!attachmentPresenter.createAttachmentList().isEmpty()) {
return true;
}
if (subjectView.getText().length() != 0) {
return true;
}
if (!recipientPresenter.getToAddresses().isEmpty() ||
!recipientPresenter.getCcAddresses().isEmpty() ||
!recipientPresenter.getBccAddresses().isEmpty()) {
return true;
}
return false;
}
public void onProgressCancel(ProgressDialogFragment fragment) {
attachmentPresenter.attachmentProgressDialogCancelled();
}
@Override
public Dialog onCreateDialog(int id) {
switch (id) {
case DIALOG_SAVE_OR_DISCARD_DRAFT_MESSAGE:
return new AlertDialog.Builder(this)
.setTitle(R.string.save_or_discard_draft_message_dlg_title)
.setMessage(R.string.save_or_discard_draft_message_instructions_fmt)
.setPositiveButton(R.string.save_draft_action, new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialog, int whichButton) {
dismissDialog(DIALOG_SAVE_OR_DISCARD_DRAFT_MESSAGE);
checkToSaveDraftAndSave();
}
})
.setNegativeButton(R.string.discard_action, new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialog, int whichButton) {
dismissDialog(DIALOG_SAVE_OR_DISCARD_DRAFT_MESSAGE);
onDiscard();
}
})
.create();
case DIALOG_CONFIRM_DISCARD_ON_BACK:
return new AlertDialog.Builder(this)
.setTitle(R.string.confirm_discard_draft_message_title)
.setMessage(R.string.confirm_discard_draft_message)
.setPositiveButton(R.string.cancel_action, new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialog, int whichButton) {
dismissDialog(DIALOG_CONFIRM_DISCARD_ON_BACK);
}
})
.setNegativeButton(R.string.discard_action, new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialog, int whichButton) {
dismissDialog(DIALOG_CONFIRM_DISCARD_ON_BACK);
Toast.makeText(MessageCompose.this,
getString(R.string.message_discarded_toast),
Toast.LENGTH_LONG).show();
onDiscard();
}
})
.create();
case DIALOG_CHOOSE_IDENTITY:
Context context = new ContextThemeWrapper(this,
(K9.getK9Theme() == K9.Theme.LIGHT) ?
R.style.Theme_K9_Dialog_Light :
R.style.Theme_K9_Dialog_Dark);
Builder builder = new AlertDialog.Builder(context);
builder.setTitle(R.string.send_as);
final IdentityAdapter adapter = new IdentityAdapter(context);
builder.setAdapter(adapter, new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialog, int which) {
IdentityContainer container = (IdentityContainer) adapter.getItem(which);
onAccountChosen(container.account, container.identity);
}
});
return builder.create();
case DIALOG_CONFIRM_DISCARD: {
return new AlertDialog.Builder(this)
.setTitle(R.string.dialog_confirm_delete_title)
.setMessage(R.string.dialog_confirm_delete_message)
.setPositiveButton(R.string.dialog_confirm_delete_confirm_button,
new DialogInterface.OnClickListener() {
public void onClick(DialogInterface dialog, int which) {
onDiscard();
}
})
.setNegativeButton(R.string.dialog_confirm_delete_cancel_button,
new DialogInterface.OnClickListener() {
public void onClick(DialogInterface dialog, int which) {
}
})
.create();
}
}
return super.onCreateDialog(id);
}
public void saveDraftEventually() {
changesMadeSinceLastSave = true;
}
public void loadQuotedTextForEdit() {
if (relatedMessageReference == null) { // shouldn't happen...
throw new IllegalStateException("tried to edit quoted message with no referenced message");
}
messageLoaderHelper.asyncStartOrResumeLoadingMessage(relatedMessageReference, null);
}
/**
* Pull out the parts of the now loaded source message and apply them to the new message
* depending on the type of message being composed.
*
* @param messageViewInfo
* The source message used to populate the various text fields.
*/
private void processSourceMessage(MessageViewInfo messageViewInfo) {
try {
switch (action) {
case REPLY:
case REPLY_ALL: {
processMessageToReplyTo(messageViewInfo);
break;
}
case FORWARD: {
processMessageToForward(messageViewInfo);
break;
}
case EDIT_DRAFT: {
processDraftMessage(messageViewInfo);
break;
}
default: {
Timber.w("processSourceMessage() called with unsupported action");
break;
}
}
} catch (MessagingException me) {
/*
* Let the user continue composing their message even if we have a problem processing
* the source message. Log it as an error, though.
*/
Timber.e(me, "Error while processing source message: ");
} finally {
relatedMessageProcessed = true;
changesMadeSinceLastSave = false;
}
updateMessageFormat();
}
private void processMessageToReplyTo(MessageViewInfo messageViewInfo) throws MessagingException {
Message message = messageViewInfo.message;
if (message.getSubject() != null) {
final String subject = PREFIX.matcher(message.getSubject()).replaceFirst("");
if (!subject.toLowerCase(Locale.US).startsWith("re:")) {
subjectView.setText("Re: " + subject);
} else {
subjectView.setText(subject);
}
} else {
subjectView.setText("");
}
/*
* If a reply-to was included with the message use that, otherwise use the from
* or sender address.
*/
boolean isReplyAll = action == Action.REPLY_ALL;
recipientPresenter.initFromReplyToMessage(message, isReplyAll);
if (message.getMessageId() != null && message.getMessageId().length() > 0) {
repliedToMessageId = message.getMessageId();
String[] refs = message.getReferences();
if (refs != null && refs.length > 0) {
referencedMessageIds = TextUtils.join("", refs) + " " + repliedToMessageId;
} else {
referencedMessageIds = repliedToMessageId;
}
} else {
Timber.d("could not get Message-ID.");
}
// Quote the message and setup the UI.
quotedMessagePresenter.initFromReplyToMessage(messageViewInfo, action);
if (action == Action.REPLY || action == Action.REPLY_ALL) {
Identity useIdentity = IdentityHelper.getRecipientIdentityFromMessage(account, message);
Identity defaultIdentity = account.getIdentity(0);
if (useIdentity != defaultIdentity) {
switchToIdentity(useIdentity);
}
}
}
private void processMessageToForward(MessageViewInfo messageViewInfo) throws MessagingException {
Message message = messageViewInfo.message;
String subject = message.getSubject();
if (subject != null && !subject.toLowerCase(Locale.US).startsWith("fwd:")) {
subjectView.setText("Fwd: " + subject);
} else {
subjectView.setText(subject);
}
// "Be Like Thunderbird" - on forwarded messages, set the message ID
// of the forwarded message in the references and the reply to. TB
// only includes ID of the message being forwarded in the reference,
// even if there are multiple references.
if (!TextUtils.isEmpty(message.getMessageId())) {
repliedToMessageId = message.getMessageId();
referencedMessageIds = repliedToMessageId;
} else {
Timber.d("could not get Message-ID.");
}
// Quote the message and setup the UI.
quotedMessagePresenter.processMessageToForward(messageViewInfo);
attachmentPresenter.processMessageToForward(messageViewInfo);
}
private void processDraftMessage(MessageViewInfo messageViewInfo) {
Message message = messageViewInfo.message;
draftId = MessagingController.getInstance(getApplication()).getId(message);
subjectView.setText(message.getSubject());
recipientPresenter.initFromDraftMessage(message);
// Read In-Reply-To header from draft
final String[] inReplyTo = message.getHeader("In-Reply-To");
if (inReplyTo.length >= 1) {
repliedToMessageId = inReplyTo[0];
}
// Read References header from draft
final String[] references = message.getHeader("References");
if (references.length >= 1) {
referencedMessageIds = references[0];
}
if (!relatedMessageProcessed) {
attachmentPresenter.loadNonInlineAttachments(messageViewInfo);
}
// Decode the identity header when loading a draft.
// See buildIdentityHeader(TextBody) for a detailed description of the composition of this blob.
Map<IdentityField, String> k9identity = new HashMap<>();
String[] identityHeaders = message.getHeader(K9.IDENTITY_HEADER);
if (identityHeaders.length > 0 && identityHeaders[0] != null) {
k9identity = IdentityHeaderParser.parse(identityHeaders[0]);
}
Identity newIdentity = new Identity();
if (k9identity.containsKey(IdentityField.SIGNATURE)) {
newIdentity.setSignatureUse(true);
newIdentity.setSignature(k9identity.get(IdentityField.SIGNATURE));
signatureChanged = true;
} else {
if (message instanceof LocalMessage) {
newIdentity.setSignatureUse(((LocalMessage) message).getFolder().getSignatureUse());
}
newIdentity.setSignature(identity.getSignature());
}
if (k9identity.containsKey(IdentityField.NAME)) {
newIdentity.setName(k9identity.get(IdentityField.NAME));
identityChanged = true;
} else {
newIdentity.setName(identity.getName());
}
if (k9identity.containsKey(IdentityField.EMAIL)) {
newIdentity.setEmail(k9identity.get(IdentityField.EMAIL));
identityChanged = true;
} else {
newIdentity.setEmail(identity.getEmail());
}
if (k9identity.containsKey(IdentityField.ORIGINAL_MESSAGE)) {
relatedMessageReference = null;
String originalMessage = k9identity.get(IdentityField.ORIGINAL_MESSAGE);
MessageReference messageReference = MessageReference.parse(originalMessage);
if (messageReference != null) {
// Check if this is a valid account in our database
Preferences prefs = Preferences.getPreferences(getApplicationContext());
Account account = prefs.getAccount(messageReference.getAccountUuid());
if (account != null) {
relatedMessageReference = messageReference;
}
}
}
identity = newIdentity;
updateSignature();
updateFrom();
quotedMessagePresenter.processDraftMessage(messageViewInfo, k9identity);
}
static class SendMessageTask extends AsyncTask<Void, Void, Void> {
final Context context;
final Account account;
final Contacts contacts;
final Message message;
final Long draftId;
final MessageReference messageReference;
SendMessageTask(Context context, Account account, Contacts contacts, Message message,
Long draftId, MessageReference messageReference) {
this.context = context;
this.account = account;
this.contacts = contacts;
this.message = message;
this.draftId = draftId;
this.messageReference = messageReference;
}
@Override
protected Void doInBackground(Void... params) {
try {
contacts.markAsContacted(message.getRecipients(RecipientType.TO));
contacts.markAsContacted(message.getRecipients(RecipientType.CC));
contacts.markAsContacted(message.getRecipients(RecipientType.BCC));
updateReferencedMessage();
} catch (Exception e) {
Timber.e(e, "Failed to mark contact as contacted.");
}
MessagingController.getInstance(context).sendMessage(account, message, null);
if (draftId != null) {
// TODO set draft id to invalid in MessageCompose!
MessagingController.getInstance(context).deleteDraft(account, draftId);
}
return null;
}
/**
* Set the flag on the referenced message(indicated we replied / forwarded the message)
**/
private void updateReferencedMessage() {
if (messageReference != null && messageReference.getFlag() != null) {
Timber.d("Setting referenced message (%s, %s) flag to %s",
messageReference.getFolderName(),
messageReference.getUid(),
messageReference.getFlag());
final Account account = Preferences.getPreferences(context)
.getAccount(messageReference.getAccountUuid());
final String folderName = messageReference.getFolderName();
final String sourceMessageUid = messageReference.getUid();
MessagingController.getInstance(context).setFlag(account, folderName,
sourceMessageUid, messageReference.getFlag(), true);
}
}
}
/**
* When we are launched with an intent that includes a mailto: URI, we can actually
* gather quite a few of our message fields from it.
*
* @param mailTo
* The MailTo object we use to initialize message field
*/
private void initializeFromMailto(MailTo mailTo) {
recipientPresenter.initFromMailto(mailTo);
String subject = mailTo.getSubject();
if (subject != null && !subject.isEmpty()) {
subjectView.setText(subject);
}
String body = mailTo.getBody();
if (body != null && !body.isEmpty()) {
messageContentView.setCharacters(body);
}
}
private void setCurrentMessageFormat(SimpleMessageFormat format) {
// This method will later be used to enable/disable the rich text editing mode.
currentMessageFormat = format;
}
public void updateMessageFormat() {
MessageFormat origMessageFormat = account.getMessageFormat();
SimpleMessageFormat messageFormat;
if (origMessageFormat == MessageFormat.TEXT) {
// The user wants to send text/plain messages. We don't override that choice under
// any circumstances.
messageFormat = SimpleMessageFormat.TEXT;
} else if (quotedMessagePresenter.isForcePlainText()
&& quotedMessagePresenter.includeQuotedText()) {
// Right now we send a text/plain-only message when the quoted text was edited, no
// matter what the user selected for the message format.
messageFormat = SimpleMessageFormat.TEXT;
} else if (recipientPresenter.isForceTextMessageFormat()) {
// Right now we only support PGP inline which doesn't play well with HTML. So force
// plain text in those cases.
messageFormat = SimpleMessageFormat.TEXT;
} else if (origMessageFormat == MessageFormat.AUTO) {
if (action == Action.COMPOSE || quotedMessagePresenter.isQuotedTextText() ||
!quotedMessagePresenter.includeQuotedText()) {
// If the message format is set to "AUTO" we use text/plain whenever possible. That
// is, when composing new messages and replying to or forwarding text/plain
// messages.
messageFormat = SimpleMessageFormat.TEXT;
} else {
messageFormat = SimpleMessageFormat.HTML;
}
} else {
// In all other cases use HTML
messageFormat = SimpleMessageFormat.HTML;
}
setCurrentMessageFormat(messageFormat);
}
@Override
public void onMessageBuildSuccess(MimeMessage message, boolean isDraft) {
if (isDraft) {
changesMadeSinceLastSave = false;
currentMessageBuilder = null;
if (action == Action.EDIT_DRAFT && relatedMessageReference != null) {
message.setUid(relatedMessageReference.getUid());
}
// TODO more appropriate logic here? not sure
boolean saveRemotely = !recipientPresenter.getCurrentCryptoStatus().shouldUsePgpMessageBuilder();
new SaveMessageTask(getApplicationContext(), account, contacts, internalMessageHandler,
message, draftId, saveRemotely).execute();
if (finishAfterDraftSaved) {
finish();
} else {
setProgressBarIndeterminateVisibility(false);
}
} else {
currentMessageBuilder = null;
new SendMessageTask(getApplicationContext(), account, contacts, message,
draftId != INVALID_DRAFT_ID ? draftId : null, relatedMessageReference).execute();
finish();
}
}
@Override
public void onMessageBuildCancel() {
currentMessageBuilder = null;
setProgressBarIndeterminateVisibility(false);
}
@Override
public void onMessageBuildException(MessagingException me) {
Timber.e(me, "Error sending message");
Toast.makeText(MessageCompose.this,
getString(R.string.send_failed_reason, me.getLocalizedMessage()), Toast.LENGTH_LONG).show();
currentMessageBuilder = null;
setProgressBarIndeterminateVisibility(false);
}
@Override
public void onMessageBuildReturnPendingIntent(PendingIntent pendingIntent, int requestCode) {
requestCode |= REQUEST_MASK_MESSAGE_BUILDER;
try {
startIntentSenderForResult(pendingIntent.getIntentSender(), requestCode, null, 0, 0, 0);
} catch (SendIntentException e) {
Timber.e(e, "Error starting pending intent from builder!");
}
}
public void launchUserInteractionPendingIntent(PendingIntent pendingIntent, int requestCode) {
requestCode |= REQUEST_MASK_RECIPIENT_PRESENTER;
try {
startIntentSenderForResult(pendingIntent.getIntentSender(), requestCode, null, 0, 0, 0);
} catch (SendIntentException e) {
e.printStackTrace();
}
}
public void loadLocalMessageForDisplay(MessageViewInfo messageViewInfo, Action action) {
// We check to see if we've previously processed the source message since this
// could be called when switching from HTML to text replies. If that happens, we
// only want to update the UI with quoted text (which picks the appropriate
// part).
if (relatedMessageProcessed) {
try {
quotedMessagePresenter.populateUIWithQuotedMessage(messageViewInfo, true, action);
} catch (MessagingException e) {
// Hm, if we couldn't populate the UI after source reprocessing, let's just delete it?
quotedMessagePresenter.showOrHideQuotedText(QuotedTextMode.HIDE);
Timber.e(e, "Could not re-process source message; deleting quoted text to be safe.");
}
updateMessageFormat();
} else {
processSourceMessage(messageViewInfo);
relatedMessageProcessed = true;
}
}
private MessageLoaderCallbacks messageLoaderCallbacks = new MessageLoaderCallbacks() {
@Override
public void onMessageDataLoadFinished(LocalMessage message) {
// nothing to do here, we don't care about message headers
}
@Override
public void onMessageDataLoadFailed() {
internalMessageHandler.sendEmptyMessage(MSG_PROGRESS_OFF);
Toast.makeText(MessageCompose.this, R.string.status_invalid_id_error, Toast.LENGTH_LONG).show();
}
@Override
public void onMessageViewInfoLoadFinished(MessageViewInfo messageViewInfo) {
internalMessageHandler.sendEmptyMessage(MSG_PROGRESS_OFF);
loadLocalMessageForDisplay(messageViewInfo, action);
}
@Override
public void onMessageViewInfoLoadFailed(MessageViewInfo messageViewInfo) {
internalMessageHandler.sendEmptyMessage(MSG_PROGRESS_OFF);
Toast.makeText(MessageCompose.this, R.string.status_invalid_id_error, Toast.LENGTH_LONG).show();
}
@Override
public void setLoadingProgress(int current, int max) {
// nvm - we don't have a progress bar
}
@Override
public void startIntentSenderForMessageLoaderHelper(IntentSender si, int requestCode, Intent fillIntent,
int flagsMask, int flagValues, int extraFlags) {
try {
requestCode |= REQUEST_MASK_LOADER_HELPER;
startIntentSenderForResult(si, requestCode, fillIntent, flagsMask, flagValues, extraFlags);
} catch (SendIntentException e) {
Timber.e(e, "Irrecoverable error calling PendingIntent!");
}
}
@Override
public void onDownloadErrorMessageNotFound() {
runOnUiThread(new Runnable() {
@Override
public void run() {
Toast.makeText(MessageCompose.this, R.string.status_invalid_id_error, Toast.LENGTH_LONG).show();
}
});
}
@Override
public void onDownloadErrorNetworkError() {
runOnUiThread(new Runnable() {
@Override
public void run() {
Toast.makeText(MessageCompose.this, R.string.status_network_error, Toast.LENGTH_LONG).show();
}
});
}
};
private void initializeActionBar() {
ActionBar actionBar = getActionBar();
actionBar.setDisplayHomeAsUpEnabled(true);
}
// TODO We miss callbacks for this listener if they happens while we are paused!
public MessagingListener messagingListener = new SimpleMessagingListener() {
@Override
public void messageUidChanged(Account account, String folder, String oldUid, String newUid) {
if (relatedMessageReference == null) {
return;
}
Account sourceAccount = Preferences.getPreferences(MessageCompose.this)
.getAccount(relatedMessageReference.getAccountUuid());
String sourceFolder = relatedMessageReference.getFolderName();
String sourceMessageUid = relatedMessageReference.getUid();
boolean changedMessageIsCurrent =
account.equals(sourceAccount) && folder.equals(sourceFolder) && oldUid.equals(sourceMessageUid);
if (changedMessageIsCurrent) {
relatedMessageReference = relatedMessageReference.withModifiedUid(newUid);
}
}
};
AttachmentMvpView attachmentMvpView = new AttachmentMvpView() {
private HashMap<Uri, View> attachmentViews = new HashMap<>();
@Override
public void showWaitingForAttachmentDialog(WaitingAction waitingAction) {
String title;
switch (waitingAction) {
case SEND: {
title = getString(R.string.fetching_attachment_dialog_title_send);
break;
}
case SAVE: {
title = getString(R.string.fetching_attachment_dialog_title_save);
break;
}
default: {
return;
}
}
ProgressDialogFragment fragment = ProgressDialogFragment.newInstance(title,
getString(R.string.fetching_attachment_dialog_message));
fragment.show(getFragmentManager(), FRAGMENT_WAITING_FOR_ATTACHMENT);
}
@Override
public void dismissWaitingForAttachmentDialog() {
ProgressDialogFragment fragment = (ProgressDialogFragment)
getFragmentManager().findFragmentByTag(FRAGMENT_WAITING_FOR_ATTACHMENT);
if (fragment != null) {
fragment.dismiss();
}
}
@Override
@SuppressLint("InlinedApi")
public void showPickAttachmentDialog(int requestCode) {
requestCode |= REQUEST_MASK_ATTACHMENT_PRESENTER;
Intent i = new Intent(Intent.ACTION_GET_CONTENT);
i.putExtra(Intent.EXTRA_ALLOW_MULTIPLE, true);
i.addCategory(Intent.CATEGORY_OPENABLE);
i.setType("*/*");
isInSubActivity = true;
startActivityForResult(Intent.createChooser(i, null), requestCode);
}
@Override
public void addAttachmentView(final Attachment attachment) {
View view = getLayoutInflater().inflate(R.layout.message_compose_attachment, attachmentsView, false);
attachmentViews.put(attachment.uri, view);
View deleteButton = view.findViewById(R.id.attachment_delete);
deleteButton.setOnClickListener(new OnClickListener() {
@Override
public void onClick(View view) {
attachmentPresenter.onClickRemoveAttachment(attachment.uri);
}
});
updateAttachmentView(attachment);
attachmentsView.addView(view);
}
@Override
public void updateAttachmentView(Attachment attachment) {
View view = attachmentViews.get(attachment.uri);
if (view == null) {
throw new IllegalArgumentException();
}
TextView nameView = (TextView) view.findViewById(R.id.attachment_name);
boolean hasMetadata = (attachment.state != Attachment.LoadingState.URI_ONLY);
if (hasMetadata) {
nameView.setText(attachment.name);
} else {
nameView.setText(R.string.loading_attachment);
}
View progressBar = view.findViewById(R.id.progressBar);
boolean isLoadingComplete = (attachment.state == Attachment.LoadingState.COMPLETE);
progressBar.setVisibility(isLoadingComplete ? View.GONE : View.VISIBLE);
}
@Override
public void removeAttachmentView(Attachment attachment) {
View view = attachmentViews.get(attachment.uri);
attachmentsView.removeView(view);
attachmentViews.remove(attachment.uri);
}
@Override
public void performSendAfterChecks() {
MessageCompose.this.performSendAfterChecks();
}
@Override
public void performSaveAfterChecks() {
MessageCompose.this.performSaveAfterChecks();
}
@Override
public void showMissingAttachmentsPartialMessageWarning() {
Toast.makeText(MessageCompose.this,
getString(R.string.message_compose_attachments_skipped_toast), Toast.LENGTH_LONG).show();
}
};
private Handler internalMessageHandler = new Handler() {
@Override
public void handleMessage(android.os.Message msg) {
switch (msg.what) {
case MSG_PROGRESS_ON:
setProgressBarIndeterminateVisibility(true);
break;
case MSG_PROGRESS_OFF:
setProgressBarIndeterminateVisibility(false);
break;
case MSG_SAVED_DRAFT:
draftId = (Long) msg.obj;
Toast.makeText(
MessageCompose.this,
getString(R.string.message_saved_toast),
Toast.LENGTH_LONG).show();
break;
case MSG_DISCARDED_DRAFT:
Toast.makeText(
MessageCompose.this,
getString(R.string.message_discarded_toast),
Toast.LENGTH_LONG).show();
break;
default:
super.handleMessage(msg);
break;
}
}
};
public enum Action {
COMPOSE(R.string.compose_title_compose),
REPLY(R.string.compose_title_reply),
REPLY_ALL(R.string.compose_title_reply_all),
FORWARD(R.string.compose_title_forward),
EDIT_DRAFT(R.string.compose_title_compose);
private final int titleResource;
Action(@StringRes int titleResource) {
this.titleResource = titleResource;
}
@StringRes
public int getTitleResource() {
return titleResource;
}
}
}
| 1 | 15,534 | I feel like this belongs in `RecipientPresenter` (which should really be `MetadataPresenter`), but I suppose having it next to requestReadReceipt makes sense as well. feel free to leave as is. | k9mail-k-9 | java |
@@ -1,6 +1,6 @@
Summary: Open Programmable Acceleration Engine (OPAE) SDK
Name: opae
-Version: 1.4.0
+Version: 1.4.1
Release: 1%{?dist}
License: BSD
ExclusiveArch: x86_64 | 1 | Summary: Open Programmable Acceleration Engine (OPAE) SDK
Name: opae
Version: 1.4.0
Release: 1%{?dist}
License: BSD
ExclusiveArch: x86_64
Group: Development/Libraries
Vendor: Intel Corporation
Requires: uuid, json-c, python
URL: https://github.com/OPAE/%{name}-sdk
Source0: https://github.com/OPAE/opae-sdk/releases/download/%{version}-1/%{name}.tar.gz
BuildRequires: gcc, gcc-c++
BuildRequires: cmake
BuildRequires: python3-devel
BuildRequires: json-c-devel
BuildRequires: libuuid-devel
BuildRequires: rpm-build
BuildRequires: hwloc-devel
BuildRequires: python-sphinx
BuildRequires: doxygen
BuildRequires: systemd-rpm-macros
BuildRequires: systemd
%description
Open Programmable Acceleration Engine (OPAE) is a software framework
for managing and accessing programmable accelerators (FPGAs).
Its main parts are:
* OPAE Software Development Kit (OPAE SDK) (this package)
* OPAE Linux driver for Intel(R) Xeon(R) CPU with
Integrated FPGAs and Intel(R) PAC with Arria(R) 10 GX FPGA
* Basic Building Block (BBB) library for accelerating AFU
OPAE SDK is a collection of libraries and tools to facilitate the
development of software applications and accelerators using OPAE.
It provides a library implementing the OPAE C API for presenting a
streamlined and easy-to-use interface for software applications to
discover, access, and manage FPGA devices and accelerators using
the OPAE software stack.
%package devel
Summary: OPAE headers, sample source, and documentation
Requires: libuuid-devel, %{name}%{?_isa} = %{version}-%{release}
%description devel
OPAE headers, tools, sample source, and documentation
%prep
%setup -q -n %{name}
%build
rm -rf _build
mkdir _build
cd _build
%cmake .. -DCMAKE_INSTALL_PREFIX=/usr
%make_build opae-c \
bitstream \
xfpga \
safestr \
modbmc \
opae-cxx-core \
hello_cxxcore \
board_rc \
board_vc \
fpgaconf \
fpgainfo \
userclk \
object_api \
hello_fpga \
hello_events \
mmlink
%install
mkdir -p %{buildroot}%{_datadir}/opae
cp RELEASE_NOTES.md %{buildroot}%{_datadir}/opae/RELEASE_NOTES.md
cp LICENSE %{buildroot}%{_datadir}/opae/LICENSE
cp COPYING %{buildroot}%{_datadir}/opae/COPYING
mkdir -p %{buildroot}%{_usr}/src/opae/cmake/modules
for s in FindSphinx.cmake
do
cp "cmake/${s}" %{buildroot}%{_usr}/src/opae/cmake/
done
mkdir -p %{buildroot}%{_usr}/src/opae/opae-libs/cmake/modules
for s in FindHwloc.cmake \
OPAE.cmake \
FindUUID.cmake \
Findjson-c.cmake \
OPAECompiler.cmake \
OPAEGit.cmake \
OPAEPackaging.cmake
do
cp "opae-libs/cmake/modules/${s}" %{buildroot}%{_usr}/src/opae/opae-libs/cmake/modules
done
mkdir -p %{buildroot}%{_usr}/src/opae/samples
mkdir -p %{buildroot}%{_usr}/src/opae/samples/hello_fpga/
mkdir -p %{buildroot}%{_usr}/src/opae/samples/hello_events/
mkdir -p %{buildroot}%{_usr}/src/opae/samples/object_api/
cp samples/hello_fpga/hello_fpga.c %{buildroot}%{_usr}/src/opae/samples/hello_fpga/
cp samples/hello_events/hello_events.c %{buildroot}%{_usr}/src/opae/samples/hello_events/
cp samples/object_api/object_api.c %{buildroot}%{_usr}/src/opae/samples/object_api/
cd _build
DESTDIR=%{buildroot} cmake -DCOMPONENT=safestrlib -P cmake_install.cmake
DESTDIR=%{buildroot} cmake -DCOMPONENT=opaeclib -P cmake_install.cmake
DESTDIR=%{buildroot} cmake -DCOMPONENT=opaecxxcorelib -P cmake_install.cmake
DESTDIR=%{buildroot} cmake -DCOMPONENT=samples -P cmake_install.cmake
DESTDIR=%{buildroot} cmake -DCOMPONENT=opaetoolslibs -P cmake_install.cmake
DESTDIR=%{buildroot} cmake -DCOMPONENT=toolfpgainfo -P cmake_install.cmake
DESTDIR=%{buildroot} cmake -DCOMPONENT=toolfpgaconf -P cmake_install.cmake
DESTDIR=%{buildroot} cmake -DCOMPONENT=tooluserclk -P cmake_install.cmake
DESTDIR=%{buildroot} cmake -DCOMPONENT=toolmmlink -P cmake_install.cmake
DESTDIR=%{buildroot} cmake -DCOMPONENT=samplebin -P cmake_install.cmake
DESTDIR=%{buildroot} cmake -DCOMPONENT=libopaeheaders -P cmake_install.cmake
DESTDIR=%{buildroot} cmake -DCOMPONENT=safestrheaders -P cmake_install.cmake
DESTDIR=%{buildroot} cmake -DCOMPONENT=toolpackager -P cmake_install.cmake
DESTDIR=%{buildroot} cmake -DCOMPONENT=jsonschema -P cmake_install.cmake
%files
%dir %{_datadir}/opae
%doc %{_datadir}/opae/RELEASE_NOTES.md
%license %{_datadir}/opae/LICENSE
%license %{_datadir}/opae/COPYING
%{_libdir}/libopae-c.so.%{version}
%{_libdir}/libopae-c.so.1
%{_libdir}/libopae-c.so
%{_libdir}/libbitstream.so.%{version}
%{_libdir}/libbitstream.so.1
%{_libdir}/libbitstream.so
%{_libdir}/libopae-cxx-core.so.%{version}
%{_libdir}/libopae-cxx-core.so.1
%{_libdir}/libopae-cxx-core.so
%{_libdir}/opae/libxfpga.so*
%{_libdir}/opae/libmodbmc.so*
%{_libdir}/libsafestr.a*
%files devel
%dir %{_includedir}/opae
%{_includedir}/opae/*
%dir %{_includedir}/safe_string
%{_includedir}/safe_string/safe_string.h
%{_libdir}/libsafestr.a
%dir %{_usr}/src/opae
%{_usr}/src/opae/samples/hello_fpga/hello_fpga.c
%{_usr}/src/opae/samples/hello_events/hello_events.c
%{_usr}/src/opae/samples/object_api/object_api.c
%{_usr}/src/opae/cmake/*
%{_usr}/src/opae/opae-libs/cmake/modules/*
%{_libdir}/opae/libboard_rc.so*
%{_libdir}/opae/libboard_vc.so*
%{_bindir}/fpgaconf
%{_bindir}/fpgainfo
%{_bindir}/mmlink
%{_bindir}/userclk
%{_bindir}/hello_fpga
%{_bindir}/object_api
%{_bindir}/hello_events
%{_bindir}/hello_cxxcore
%{_bindir}/afu_json_mgr
%{_bindir}/packager
%{_usr}/share/opae/*
%changelog
* Tue Dec 17 2019 Korde Nakul <nakul.korde@intel.com> 1.4.0-1
- Added support to FPGA Linux kernel Device Feature List (DFL) driver patch set2.
- Increased test cases and test coverage
- Various bug fixes
- Various compiler warning fixes
- Various memory leak fixes
- Various Static code scan bug fixes
- Added new FPGA MMIO API to write 512 bits
| 1 | 19,485 | Let's go back to 1.4.0 until the release is being prepared. | OPAE-opae-sdk | c |
@@ -7337,6 +7337,7 @@ pre_system_call(dcontext_t *dcontext)
size_t sigsetsize)
*/
/* we also need access to the params in post_system_call */
+ uint errno_val = 0;
dcontext->sys_param0 = sys_param(dcontext, 0);
dcontext->sys_param1 = sys_param(dcontext, 1);
dcontext->sys_param2 = sys_param(dcontext, 2); | 1 | /* *******************************************************************************
* Copyright (c) 2010-2021 Google, Inc. All rights reserved.
* Copyright (c) 2011 Massachusetts Institute of Technology All rights reserved.
* Copyright (c) 2000-2010 VMware, Inc. All rights reserved.
* *******************************************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of VMware, Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/* Copyright (c) 2003-2007 Determina Corp. */
/* Copyright (c) 2001-2003 Massachusetts Institute of Technology */
/* Copyright (c) 2000-2001 Hewlett-Packard Company */
/*
* os.c - Linux specific routines
*/
/* Easiest to match kernel stat struct by using 64-bit.
* This limits us to 2.4+ kernel but that's ok.
* I don't really want to get into requiring kernel headers to build
* general release packages, though that would be fine for targeted builds.
* There are 3 different stat syscalls (SYS_oldstat, SYS_stat, and SYS_stat64)
* and using _LARGEFILE64_SOURCE with SYS_stat64 is the best match.
*/
#define _LARGEFILE64_SOURCE
/* for mmap-related #defines */
#include <sys/types.h>
#include <sys/mman.h>
/* in case MAP_32BIT is missing */
#ifndef MAP_32BIT
# define MAP_32BIT 0x40
#endif
#ifndef MAP_ANONYMOUS
# define MAP_ANONYMOUS MAP_ANON /* MAP_ANON on Mac */
#endif
/* for open */
#include <sys/stat.h>
#include <fcntl.h>
#include "../globals.h"
#include "../hashtable.h"
#include "../native_exec.h"
#include <unistd.h> /* for write and usleep and _exit */
#include <limits.h>
#ifdef MACOS
# include <sys/sysctl.h> /* for sysctl */
# ifndef SYS___sysctl
/* The name was changed on Yosemite */
# define SYS___sysctl SYS_sysctl
# endif
# include <mach/mach_traps.h> /* for swtch_pri */
# include "include/syscall_mach.h"
#endif
#ifdef LINUX
# include <sys/vfs.h> /* for statfs */
#elif defined(MACOS)
# include <sys/mount.h> /* for statfs */
# include <mach/mach.h>
# include <mach/task.h>
# include <mach/semaphore.h>
# include <mach/sync_policy.h>
#endif
#include <dirent.h>
/* for getrlimit */
#include <sys/time.h>
#include <sys/resource.h>
#ifndef X64
struct compat_rlimit {
uint rlim_cur;
uint rlim_max;
};
#endif
#ifdef MACOS
typedef struct rlimit rlimit64_t;
#else
typedef struct rlimit64 rlimit64_t;
#endif
#ifdef LINUX
/* For clone and its flags, the manpage says to include sched.h with _GNU_SOURCE
* defined. _GNU_SOURCE brings in unwanted extensions and causes name
* conflicts. Instead, we include unix/sched.h which comes from the Linux
* kernel headers.
*/
# include <linux/sched.h>
#endif
#include "module.h" /* elf */
#include "tls.h"
#if defined(X86) && defined(DEBUG)
# include "os_asm_defines.asm" /* for TLS_SELF_OFFSET_ASM */
#endif
#ifndef F_DUPFD_CLOEXEC /* in linux 2.6.24+ */
# define F_DUPFD_CLOEXEC 1030
#endif
/* This is not always sufficient to identify a syscall return value.
* For example, MacOS has some 32-bit syscalls that return 64-bit
* values in xdx:xax.
*/
#define MCXT_SYSCALL_RES(mc) ((mc)->IF_X86_ELSE(xax, r0))
#if defined(DR_HOST_AARCH64)
# define READ_TP_TO_R3_DISP_IN_R2 \
"mrs " ASM_R3 ", tpidr_el0\n\t" \
"ldr " ASM_R3 ", [" ASM_R3 ", " ASM_R2 "] \n\t"
#elif defined(DR_HOST_ARM)
# define READ_TP_TO_R3_DISP_IN_R2 \
"mrc p15, 0, " ASM_R3 \
", c13, c0, " STRINGIFY(USR_TLS_REG_OPCODE) " \n\t" \
"ldr " ASM_R3 ", [" ASM_R3 \
", " ASM_R2 "] \n\t"
#endif /* ARM */
/* Prototype for all functions in .init_array. */
typedef int (*init_fn_t)(int argc, char **argv, char **envp);
/* For STATIC_LIBRARY we do not cache environ so the app can change it. */
#ifndef STATIC_LIBRARY
/* i#46: Private __environ pointer. Points at the environment variable array
* on the stack, which is different from what libc __environ may point at. We
* use the environment for following children and setting options, so its OK
* that we don't see what libc says.
*/
char **our_environ;
#endif
#include <errno.h>
/* avoid problems with use of errno as var name in rest of file */
#if !defined(STANDALONE_UNIT_TEST) && !defined(MACOS)
# undef errno
#endif
/* we define __set_errno below */
/* must be prior to <link.h> => <elf.h> => INT*_{MIN,MAX} */
#include "instr.h" /* for get_app_segment_base() */
#include "decode_fast.h" /* decode_cti: maybe os_handle_mov_seg should be ifdef X86? */
#include <dlfcn.h>
#include <stdlib.h>
#include <stdio.h>
#include <signal.h>
#include <syslog.h> /* vsyslog */
#include "../vmareas.h"
#ifdef RCT_IND_BRANCH
# include "../rct.h"
#endif
#ifdef LINUX
# include "include/syscall.h" /* our own local copy */
# include "include/clone3.h"
# include "include/close_range.h"
#else
# include <sys/syscall.h>
#endif
#include "../module_shared.h"
#include "os_private.h"
#include "../synch.h"
#include "memquery.h"
#include "ksynch.h"
#include "dr_tools.h" /* dr_syscall_result_info_t */
#ifndef HAVE_MEMINFO_QUERY
# include "memcache.h"
#endif
#include "instrument.h"
#ifdef LINUX
# include "rseq_linux.h"
#endif
#ifdef MACOS
# define SYSNUM_EXIT_PROCESS SYS_exit
# define SYSNUM_EXIT_THREAD SYS_bsdthread_terminate
#else
# define SYSNUM_EXIT_PROCESS SYS_exit_group
# define SYSNUM_EXIT_THREAD SYS_exit
#endif
#ifdef ANDROID
/* Custom prctl flags specific to Android (xref i#1861) */
# define PR_SET_VMA 0x53564d41
# define PR_SET_VMA_ANON_NAME 0
#endif
/* Guards data written by os_set_app_thread_area(). */
DECLARE_CXTSWPROT_VAR(static mutex_t set_thread_area_lock,
INIT_LOCK_FREE(set_thread_area_lock));
static bool first_thread_tls_initialized;
static bool last_thread_tls_exited;
tls_type_t tls_global_type;
#ifndef HAVE_TLS
/* We use a table lookup to find a thread's dcontext */
/* Our only current no-TLS target, VMKernel (VMX86_SERVER), doesn't have apps with
* tons of threads anyway
*/
# define MAX_THREADS 512
typedef struct _tls_slot_t {
thread_id_t tid;
dcontext_t *dcontext;
} tls_slot_t;
/* Stored in heap for self-prot */
static tls_slot_t *tls_table;
/* not static so deadlock_avoidance_unlock() can look for it */
DECLARE_CXTSWPROT_VAR(mutex_t tls_lock, INIT_LOCK_FREE(tls_lock));
#endif
/* Should we place this in a client header? Currently mentioned in
* dr_raw_tls_calloc() docs.
*/
static bool client_tls_allocated[MAX_NUM_CLIENT_TLS];
DECLARE_CXTSWPROT_VAR(static mutex_t client_tls_lock, INIT_LOCK_FREE(client_tls_lock));
#include <stddef.h> /* for offsetof */
#include <sys/utsname.h> /* for struct utsname */
/* forward decl */
static void
handle_execve_post(dcontext_t *dcontext);
static bool
os_switch_lib_tls(dcontext_t *dcontext, bool to_app);
static bool
os_switch_seg_to_context(dcontext_t *dcontext, reg_id_t seg, bool to_app);
#ifdef X86
static bool
os_set_dr_tls_base(dcontext_t *dcontext, os_local_state_t *tls, byte *base);
#endif
#ifdef LINUX
static bool
handle_app_mremap(dcontext_t *dcontext, byte *base, size_t size, byte *old_base,
size_t old_size, uint old_prot, uint old_type);
static void
handle_app_brk(dcontext_t *dcontext, byte *lowest_brk /*if known*/, byte *old_brk,
byte *new_brk);
#endif
/* full path to our own library, used for execve */
static char dynamorio_library_path[MAXIMUM_PATH]; /* just dir */
static char dynamorio_library_filepath[MAXIMUM_PATH];
/* Issue 20: path to other architecture */
static char dynamorio_alt_arch_path[MAXIMUM_PATH];
static char dynamorio_alt_arch_filepath[MAXIMUM_PATH]; /* just dir */
/* Makefile passes us LIBDIR_X{86,64} defines */
#define DR_LIBDIR_X86 STRINGIFY(LIBDIR_X86)
#define DR_LIBDIR_X64 STRINGIFY(LIBDIR_X64)
/* pc values delimiting dynamo dll image */
static app_pc dynamo_dll_start = NULL;
static app_pc dynamo_dll_end = NULL; /* open-ended */
/* pc values delimiting the app, equal to the "dll" bounds for static DR */
static app_pc executable_start = NULL;
static app_pc executable_end = NULL;
/* Used by get_application_name(). */
static char executable_path[MAXIMUM_PATH];
static char *executable_basename;
/* Pointers to arguments. Refers to the main stack set up by the kernel.
* These are only written once during process init and we can live with
* the non-guaranteed-delay until they are visible to other cores.
*/
static int *app_argc = NULL;
static char **app_argv = NULL;
/* does the kernel provide tids that must be used to distinguish threads in a group? */
static bool kernel_thread_groups;
static bool kernel_64bit;
pid_t pid_cached;
static bool fault_handling_initialized;
#ifdef PROFILE_RDTSC
uint kilo_hertz; /* cpu clock speed */
#endif
/* Xref PR 258731, dup of STDOUT/STDERR in case app wants to close them. */
DR_API file_t our_stdout = STDOUT_FILENO;
DR_API file_t our_stderr = STDERR_FILENO;
DR_API file_t our_stdin = STDIN_FILENO;
/* we steal fds from the app */
static rlimit64_t app_rlimit_nofile; /* cur rlimit set by app */
static int min_dr_fd;
/* we store all DR files so we can prevent the app from changing them,
* and so we can close them in a child of fork.
* the table key is the fd and the payload is the set of DR_FILE_* flags.
*/
static generic_table_t *fd_table;
#define INIT_HTABLE_SIZE_FD 6 /* should remain small */
#ifdef DEBUG
static int num_fd_add_pre_heap;
#endif
#ifdef LINUX
/* i#1004: brk emulation */
static byte *app_brk_map;
static byte *app_brk_cur;
static byte *app_brk_end;
#endif
#ifdef MACOS
static int macos_version;
#endif
static bool
is_readable_without_exception_internal(const byte *pc, size_t size, bool query_os);
static bool
mmap_check_for_module_overlap(app_pc base, size_t size, bool readable, uint64 inode,
bool at_map);
#ifdef LINUX
static char *
read_proc_self_exe(bool ignore_cache);
#endif
/* Libc independent directory iterator, similar to readdir. If we ever need
* this on Windows we should generalize it and export it to clients.
*/
typedef struct _dir_iterator_t {
file_t fd;
int off;
int end;
const char *name; /* Name of the current entry. */
char buf[4 * MAXIMUM_PATH]; /* Expect stack alloc, so not too big. */
} dir_iterator_t;
static void
os_dir_iterator_start(dir_iterator_t *iter, file_t fd);
static bool
os_dir_iterator_next(dir_iterator_t *iter);
/* XXX: If we generalize to Windows, will we need os_dir_iterator_stop()? */
/* vsyscall page. hardcoded at 0xffffe000 in earlier kernels, but
* randomly placed since fedora2.
* marked rx then: FIXME: should disallow this guy when that's the case!
* random vsyscall page is identified in maps files as "[vdso]"
* (kernel-provided fake shared library or Virt Dyn Shared Object).
*/
/* i#1583: vdso is now 2 pages, yet we assume vsyscall is on 1st page. */
/* i#2945: vdso is now 3 pages and vsyscall is not on the 1st page. */
app_pc vsyscall_page_start = NULL;
/* pc of the end of the syscall instr itself */
app_pc vsyscall_syscall_end_pc = NULL;
/* pc where kernel returns control after sysenter vsyscall */
app_pc vsyscall_sysenter_return_pc = NULL;
/* pc where our hook-displaced code was copied */
app_pc vsyscall_sysenter_displaced_pc = NULL;
#define VSYSCALL_PAGE_START_HARDCODED ((app_pc)(ptr_uint_t)0xffffe000)
#ifdef X64
/* i#430, in Red Hat Enterprise Server 5.6, vsyscall region is marked
* not executable
* ffffffffff600000-ffffffffffe00000 ---p 00000000 00:00 0 [vsyscall]
*/
# define VSYSCALL_REGION_MAPS_NAME "[vsyscall]"
#endif
/* i#1908: vdso and vsyscall are now split */
app_pc vdso_page_start = NULL;
size_t vdso_size = 0;
#if !defined(STANDALONE_UNIT_TEST) && !defined(STATIC_LIBRARY)
/* The pthreads library keeps errno in its pthread_descr data structure,
* which it looks up by dispatching on the stack pointer. This doesn't work
* when within dynamo. Thus, we define our own __errno_location() for use both
* by us and the app, to prevent pthreads looking at the stack pointer when
* out of the code cache.
*/
/* FIXME: maybe we should create 1st dcontext earlier so we don't need init_errno?
* any problems with init_errno being set and then dcontext->errno being read?
* FIXME: if a thread issues a dr_app_stop, then we don't want to use
* this errno slot? But it may later do a start...probably ok to keep using
* the slot. But, when threads die, they'll all use the same init_errno!
*/
static int init_errno; /* errno until 1st dcontext created */
int *
__errno_location(void)
{
/* Each dynamo thread should have a separate errno */
dcontext_t *dcontext = get_thread_private_dcontext();
if (dcontext == NULL)
return &init_errno;
else {
/* WARNING: init_errno is in data segment so can be RO! */
return &(dcontext->upcontext_ptr->dr_errno);
}
}
#endif /* !STANDALONE_UNIT_TEST && !STATIC_LIBRARY */
#ifdef HAVE_TLS
/* i#598
* (gdb) x/20i (*(errno_loc_t)0xf721e413)
* 0xf721e413 <__errno_location>: push %ebp
* 0xf721e414 <__errno_location+1>: mov %esp,%ebp
* 0xf721e416 <__errno_location+3>: call <__x86.get_pc_thunk.cx>
* 0xf721e41b <__errno_location+8>: add $0x166bd9,%ecx
* 0xf721e421 <__errno_location+14>: mov -0x1c(%ecx),%eax
* 0xf721e427 <__errno_location+20>: add %gs:0x0,%eax
* 0xf721e42e <__errno_location+27>: pop %ebp
* 0xf721e42f <__errno_location+28>: ret
*
* __errno_location calcuates the errno location by adding
* TLS's base with errno's offset in TLS.
* However, because the TLS has been switched in os_tls_init,
* the calculated address is wrong.
* We first get the errno offset in TLS at init time and
* calculate correct address by adding the app's tls base.
*/
/* __errno_location on ARM:
* 0xb6f0b290 <__errno_location>: ldr r3, [pc, #12]
* 0xb6f0b292 <__errno_location+2>: mrc 15, 0, r0, cr13, cr0, {3}
* 0xb6f0b296 <__errno_location+6>: add r3, pc
* 0xb6f0b298 <__errno_location+8>: ldr r3, [r3, #0]
* 0xb6f0b29a <__errno_location+10>: adds r0, r0, r3
* 0xb6f0b29c <__errno_location+12>: bx lr
* It uses the predefined offset to get errno location in TLS,
* and we should be able to reuse the code here.
*/
static int libc_errno_tls_offs;
static int *
our_libc_errno_loc(void)
{
void *app_tls = os_get_app_tls_base(NULL, TLS_REG_LIB);
if (app_tls == NULL)
return NULL;
return (int *)(app_tls + libc_errno_tls_offs);
}
#endif
/* i#238/PR 499179: libc errno preservation
*
* Errno location is per-thread so we store the
* function globally and call it each time. Note that pthreads seems
* to be the one who provides per-thread errno: using raw syscalls to
* create threads, we end up with a global errno:
*
* > for i in linux.thread.*0/log.*; do grep 'libc errno' $i | head -1; done
* libc errno loc: 0x00007f153de26698
* libc errno loc: 0x00007f153de26698
* > for i in pthreads.pthreads.*0/log.*; do grep 'libc errno' $i | head -1; done
* libc errno loc: 0x00007fc24d1ce698
* libc errno loc: 0x00007fc24d1cd8b8
* libc errno loc: 0x00007fc24c7cc8b8
*/
typedef int *(*errno_loc_t)(void);
#ifdef LINUX
/* Stores whether clone3 is unsupported on the system we're running on. */
static bool is_clone3_enosys = false;
#endif
static errno_loc_t
get_libc_errno_location(bool do_init)
{
static errno_loc_t libc_errno_loc;
if (do_init) {
module_iterator_t *mi = module_iterator_start();
while (module_iterator_hasnext(mi)) {
module_area_t *area = module_iterator_next(mi);
const char *modname = GET_MODULE_NAME(&area->names);
/* We ensure matches start to avoid matching "libgolibc.so".
* GET_MODULE_NAME never includes the path: i#138 will add path.
*/
if (modname != NULL && strstr(modname, "libc.so") == modname) {
bool found = true;
/* called during init when .data is writable */
libc_errno_loc =
(errno_loc_t)d_r_get_proc_address(area->start, "__errno_location");
ASSERT(libc_errno_loc != NULL);
LOG(GLOBAL, LOG_THREADS, 2, "libc errno loc func: " PFX "\n",
libc_errno_loc);
/* Currently, the DR is loaded by system loader and hooked up
* to app's libc. So right now, we still need this routine.
* we can remove this after libc independency and/or
* early injection
*/
if (INTERNAL_OPTION(private_loader)) {
acquire_recursive_lock(&privload_lock);
if (privload_lookup_by_base(area->start) != NULL)
found = false;
release_recursive_lock(&privload_lock);
}
if (found)
break;
}
}
module_iterator_stop(mi);
#ifdef HAVE_TLS
/* i#598: init the libc errno's offset. If we didn't find libc above,
* then we don't need to do this.
*/
if (INTERNAL_OPTION(private_loader) && libc_errno_loc != NULL) {
void *priv_lib_tls_base = os_get_priv_tls_base(NULL, TLS_REG_LIB);
ASSERT(priv_lib_tls_base != NULL);
libc_errno_tls_offs = (void *)libc_errno_loc() - priv_lib_tls_base;
libc_errno_loc = &our_libc_errno_loc;
}
#endif
}
return libc_errno_loc;
}
/* i#238/PR 499179: our __errno_location isn't affecting libc so until
* we have libc independence or our own private isolated libc we need
* to preserve the app's libc's errno
*/
int
get_libc_errno(void)
{
#if defined(STANDALONE_UNIT_TEST) && (defined(MACOS) || defined(ANDROID))
return errno;
#else
# ifdef STANDALONE_UNIT_TEST
errno_loc_t func = __errno_location;
# else
errno_loc_t func = get_libc_errno_location(false);
# endif
if (func == NULL) {
/* libc hasn't been loaded yet or we're doing early injection. */
return 0;
} else {
int *loc = (*func)();
ASSERT(loc != NULL);
LOG(THREAD_GET, LOG_THREADS, 5, "libc errno loc: " PFX "\n", loc);
if (loc != NULL)
return *loc;
}
return 0;
#endif
}
/* N.B.: pthreads has two other locations it keeps on a per-thread basis:
* h_errno and res_state. See glibc-2.2.4/linuxthreads/errno.c.
* If dynamo ever modifies those we'll need to do to them what we now do to
* errno.
*/
/* The environment vars exhibit totally messed up behavior when someone
* does an execve of /bin/sh -- not sure what's going on, but using our
* own implementation of unsetenv fixes all our problems. If we use
* libc's, unsetenv either does nothing or ends up having getenv return
* NULL for other vars that are obviously set (by iterating through environ).
* FIXME: find out the real story here.
*/
int
our_unsetenv(const char *name)
{
/* FIXME: really we should have some kind of synchronization */
size_t name_len;
char **env = our_environ;
if (name == NULL || *name == '\0' || strchr(name, '=') != NULL) {
return -1;
}
ASSERT(our_environ != NULL);
if (our_environ == NULL)
return -1;
name_len = strlen(name);
while (*env != NULL) {
if (strncmp(*env, name, name_len) == 0 && (*env)[name_len] == '=') {
/* We have a match. Shift the subsequent entries. Keep going to
* handle later matches.
*/
char **e;
for (e = env; *e != NULL; e++)
*e = *(e + 1);
} else {
env++;
}
}
return 0;
}
/* Clobbers the name rather than shifting, to preserve auxv (xref i#909). */
bool
disable_env(const char *name)
{
size_t name_len;
char **env = our_environ;
if (name == NULL || *name == '\0' || strchr(name, '=') != NULL) {
return false;
}
ASSERT(our_environ != NULL);
if (our_environ == NULL)
return false;
name_len = strlen(name);
while (*env != NULL) {
if (strncmp(*env, name, name_len) == 0 && (*env)[name_len] == '=') {
/* We have a match. If we shift subsequent entries we'll mess
* up access to auxv, which is after the env block, so we instead
* disable the env var by changing its name.
* We keep going to handle later matches.
*/
snprintf(*env, name_len, "__disabled__");
}
env++;
}
return true;
}
/* i#46: Private getenv.
*/
char *
our_getenv(const char *name)
{
char **env = our_environ;
size_t i;
size_t name_len;
if (name == NULL || name[0] == '\0' || strchr(name, '=') != NULL) {
return NULL;
}
ASSERT_MESSAGE(CHKLVL_ASSERTS,
"our_environ is missing. _init() or "
"dynamorio_set_envp() were not called",
our_environ != NULL);
if (our_environ == NULL)
return NULL;
name_len = strlen(name);
for (i = 0; env[i] != NULL; i++) {
if (strncmp(env[i], name, name_len) == 0 && env[i][name_len] == '=') {
return env[i] + name_len + 1;
}
}
return NULL;
}
bool
is_our_environ_followed_by_auxv(void)
{
#ifdef STATIC_LIBRARY
/* Since we initialize late, our_environ is likely no longer pointed at
* the stack (i#2122).
*/
return false;
#else
return true;
#endif
}
/* Work around drpreload's _init going first. We can get envp in our own _init
* routine down below, but drpreload.so comes first and calls
* dynamorio_app_init before our own _init routine gets called. Apps using the
* app API are unaffected because our _init routine will have run by then. For
* STATIC_LIBRARY, we used to set our_environ in our_init(), but to support
* the app setting DYNAMORIO_OPTIONS after our_init() runs, we now just use environ.
*/
DYNAMORIO_EXPORT
void
dynamorio_set_envp(char **envp)
{
our_environ = envp;
}
/* shared library init */
static int
our_init(int argc, char **argv, char **envp)
{
/* If we do not want to use drpreload.so, we can take over here: but when using
* drpreload, this is called *after* we have already taken over.
*/
extern void dynamorio_app_take_over(void);
bool takeover = false;
#ifdef INIT_TAKE_OVER
takeover = true;
#endif
#ifdef VMX86_SERVER
/* PR 391765: take over here instead of using preload */
takeover = os_in_vmkernel_classic();
#endif
#ifndef STATIC_LIBRARY
if (our_environ != NULL) {
/* Set by dynamorio_set_envp above. These should agree. */
ASSERT(our_environ == envp);
} else {
our_environ = envp;
}
#endif
/* if using preload, no -early_inject */
#ifdef STATIC_LIBRARY
if (!takeover) {
const char *takeover_env = getenv("DYNAMORIO_TAKEOVER_IN_INIT");
if (takeover_env != NULL && strcmp(takeover_env, "1") == 0) {
takeover = true;
}
}
#endif
if (takeover) {
if (dynamorio_app_init() == 0 /* success */) {
dynamorio_app_take_over();
}
}
return 0;
}
#if defined(STATIC_LIBRARY) || defined(STANDALONE_UNIT_TEST)
/* If we're getting linked into a binary that already has an _init definition
* like the app's exe or unit_tests, we add a pointer to our_init() to the
* .init_array section. We can't use the constructor attribute because not all
* toolchains pass the args and environment to the constructor.
*/
static init_fn_t
# ifdef MACOS
__attribute__((section("__DATA,__mod_init_func"), aligned(sizeof(void *)), used))
# else
__attribute__((section(".init_array"), aligned(sizeof(void *)), used))
# endif
init_array[] = { our_init };
#else
/* If we're a normal shared object, then we override _init.
*/
int
_init(int argc, char **argv, char **envp)
{
# ifdef ANDROID
/* i#1862: the Android loader passes *nothing* to lib init routines. We
* rely on DR being listed before libc so we can read the TLS slot the
* kernel set up.
*/
if (!get_kernel_args(&argc, &argv, &envp)) {
/* XXX: scan the stack and look for known auxv patterns or sthg. */
argc = 0;
argv = NULL;
envp = NULL;
}
ASSERT_MESSAGE(CHKLVL_ASSERTS, "failed to find envp", envp != NULL);
# endif
return our_init(argc, argv, envp);
}
#endif
bool
kernel_is_64bit(void)
{
return kernel_64bit;
}
#ifdef MACOS
/* XXX: if we get enough of these, move to os_macos.c or sthg */
static bool
sysctl_query(int level0, int level1, void *buf, size_t bufsz)
{
int res;
int name[2];
size_t len = bufsz;
name[0] = level0;
name[1] = level1;
res = dynamorio_syscall(SYS___sysctl, 6, &name, 2, buf, &len, NULL, 0);
return (res >= 0);
}
int
os_get_version(void)
{
return macos_version;
}
#endif
static void
get_uname(void)
{
/* assumption: only called at init, so we don't need any synch
* or .data unprot
*/
static struct utsname uinfo; /* can be large, avoid stack overflow */
#ifdef MACOS
if (!sysctl_query(CTL_KERN, KERN_OSTYPE, &uinfo.sysname, sizeof(uinfo.sysname)) ||
!sysctl_query(CTL_KERN, KERN_HOSTNAME, &uinfo.nodename, sizeof(uinfo.nodename)) ||
!sysctl_query(CTL_KERN, KERN_OSRELEASE, &uinfo.release, sizeof(uinfo.release)) ||
!sysctl_query(CTL_KERN, KERN_VERSION, &uinfo.version, sizeof(uinfo.version)) ||
!sysctl_query(CTL_HW, HW_MACHINE, &uinfo.machine, sizeof(uinfo.machine))) {
ASSERT(false && "sysctl queries failed");
return;
}
#else
DEBUG_DECLARE(int res =)
dynamorio_syscall(SYS_uname, 1, (ptr_uint_t)&uinfo);
ASSERT(res >= 0);
#endif
LOG(GLOBAL, LOG_TOP, 1, "uname:\n\tsysname: %s\n", uinfo.sysname);
LOG(GLOBAL, LOG_TOP, 1, "\tnodename: %s\n", uinfo.nodename);
LOG(GLOBAL, LOG_TOP, 1, "\trelease: %s\n", uinfo.release);
LOG(GLOBAL, LOG_TOP, 1, "\tversion: %s\n", uinfo.version);
LOG(GLOBAL, LOG_TOP, 1, "\tmachine: %s\n", uinfo.machine);
if (strncmp(uinfo.machine, "x86_64", sizeof("x86_64")) == 0)
kernel_64bit = true;
#ifdef MACOS
/* XXX: I would skip these checks for standalone so we don't have to set env
* vars for frontends to see the options but I'm still afraid of some syscall
* crash with no output: I'd rather have two messages than silent crashing.
*/
if (DYNAMO_OPTION(max_supported_os_version) != 0) { /* 0 disables */
/* We only support OSX 10.7.5+. That means kernels 11.x+. */
# define MIN_DARWIN_VERSION_SUPPORTED 11
int kernel_major;
if (sscanf(uinfo.release, "%d", &kernel_major) != 1 ||
kernel_major > DYNAMO_OPTION(max_supported_os_version) ||
kernel_major < MIN_DARWIN_VERSION_SUPPORTED) {
/* We make this non-fatal as it's likely DR will work */
SYSLOG(SYSLOG_WARNING, UNSUPPORTED_OS_VERSION, 3, get_application_name(),
get_application_pid(), uinfo.release);
}
macos_version = kernel_major;
}
#endif
}
#if defined(LINUX)
/* For some syscalls, detects whether they are unsupported by the system
* we're running on. Particularly, we are interested in detecting missing
* support early-on for syscalls that require complex pre-syscall handling
* by DR. We use this information to fail early for those syscalls.
*
* XXX: Move other logic for detecting unsupported syscalls from their
* respective locations to here at init time, like that for
* SYS_memfd_create in os_create_memory_file.
*
*/
static void
detect_unsupported_syscalls()
{
/* We know that when clone3 is available, it fails with EINVAL with
* these args.
*/
int clone3_errno =
dynamorio_syscall(SYS_clone3, 2, NULL /*clone_args*/, 0 /*clone_args_size*/);
ASSERT(clone3_errno == -ENOSYS || clone3_errno == -EINVAL);
is_clone3_enosys = clone3_errno == -ENOSYS;
}
#endif
/* os-specific initializations */
void
d_r_os_init(void)
{
ksynch_init();
get_uname();
/* Populate global data caches. */
get_application_name();
get_application_base();
/* determine whether gettid is provided and needed for threads,
* or whether getpid suffices. even 2.4 kernels have gettid
* (maps to getpid), don't have an old enough target to test this.
*/
#ifdef MACOS
kernel_thread_groups = (dynamorio_syscall(SYS_thread_selfid, 0) >= 0);
#else
kernel_thread_groups = (dynamorio_syscall(SYS_gettid, 0) >= 0);
#endif
LOG(GLOBAL, LOG_TOP | LOG_STATS, 1, "thread id is from %s\n",
kernel_thread_groups ? "gettid" : "getpid");
#ifdef MACOS
/* SYS_thread_selfid was added in 10.6. We have no simple way to get the
* thread id on 10.5, so we don't support it.
*/
if (!kernel_thread_groups) {
SYSLOG(SYSLOG_WARNING, UNSUPPORTED_OS_VERSION, 3, get_application_name(),
get_application_pid(), "Mac OSX 10.5 or earlier");
}
#else
ASSERT_CURIOSITY(kernel_thread_groups);
#endif
pid_cached = get_process_id();
#ifdef VMX86_SERVER
vmk_init();
#endif
d_r_signal_init();
/* We now set up an early fault handler for d_r_safe_read() (i#350) */
fault_handling_initialized = true;
memquery_init();
#ifdef PROFILE_RDTSC
if (dynamo_options.profile_times) {
ASSERT_NOT_TESTED();
kilo_hertz = get_timer_frequency();
LOG(GLOBAL, LOG_TOP | LOG_STATS, 1, "CPU MHz is %d\n", kilo_hertz / 1000);
}
#endif /* PROFILE_RDTSC */
/* Needs to be after heap_init */
IF_NO_MEMQUERY(memcache_init());
/* we didn't have heap in os_file_init() so create and add global logfile now */
fd_table = generic_hash_create(
GLOBAL_DCONTEXT, INIT_HTABLE_SIZE_FD, 80 /* load factor: not perf-critical */,
HASHTABLE_SHARED | HASHTABLE_PERSISTENT, NULL _IF_DEBUG("fd table"));
#ifdef DEBUG
if (GLOBAL != INVALID_FILE)
fd_table_add(GLOBAL, OS_OPEN_CLOSE_ON_FORK);
#endif
/* Ensure initialization */
get_dynamorio_dll_start();
#ifdef LINUX
if (DYNAMO_OPTION(emulate_brk))
init_emulated_brk(NULL);
#endif
#ifdef ANDROID
/* This must be set up earlier than privload_tls_init, and must be set up
* for non-client-interface as well, as this initializes DR_TLS_BASE_OFFSET
* (i#1931).
*/
init_android_version();
#endif
#ifdef LINUX
if (!standalone_library)
d_r_rseq_init();
#endif
#ifdef MACOS64
tls_process_init();
#endif
#if defined(LINUX)
detect_unsupported_syscalls();
#endif
}
/* called before any logfiles are opened */
void
os_file_init(void)
{
/* We steal fds from the app for better transparency. We lower the max file
* descriptor limit as viewed by the app, and block SYS_dup{2,3} and
* SYS_fcntl(F_DUPFD*) from creating a file explicitly in our space. We do
* not try to stop incremental file opening from extending into our space:
* if the app really is running out of fds, we'll give it some of ours:
* after all we probably don't need all -steal_fds, and if we really need fds
* we typically open them at startup. We also don't bother watching all
* syscalls that take in fds from affecting our fds.
*/
if (DYNAMO_OPTION(steal_fds) > 0) {
struct rlimit rlimit_nofile;
/* SYS_getrlimit uses an old 32-bit-field struct so we want SYS_ugetrlimit */
if (dynamorio_syscall(
IF_MACOS_ELSE(SYS_getrlimit, IF_X64_ELSE(SYS_getrlimit, SYS_ugetrlimit)),
2, RLIMIT_NOFILE, &rlimit_nofile) != 0) {
/* linux default is 1024 */
SYSLOG_INTERNAL_WARNING("getrlimit RLIMIT_NOFILE failed"); /* can't LOG yet */
rlimit_nofile.rlim_cur = 1024;
rlimit_nofile.rlim_max = 1024;
}
/* pretend the limit is lower and reserve the top spots for us.
* for simplicity and to give as much room as possible to app,
* raise soft limit to equal hard limit.
* if an app really depends on a low soft limit, they can run
* with -steal_fds 0.
*/
if (rlimit_nofile.rlim_max > DYNAMO_OPTION(steal_fds)) {
int res;
min_dr_fd = rlimit_nofile.rlim_max - DYNAMO_OPTION(steal_fds);
app_rlimit_nofile.rlim_max = min_dr_fd;
app_rlimit_nofile.rlim_cur = app_rlimit_nofile.rlim_max;
rlimit_nofile.rlim_cur = rlimit_nofile.rlim_max;
res = dynamorio_syscall(SYS_setrlimit, 2, RLIMIT_NOFILE, &rlimit_nofile);
if (res != 0) {
SYSLOG_INTERNAL_WARNING("unable to raise RLIMIT_NOFILE soft limit: %d",
res);
}
} else /* not fatal: we'll just end up using fds in app space */
SYSLOG_INTERNAL_WARNING("unable to reserve fds");
}
/* we don't have heap set up yet so we init fd_table in os_init */
}
/* we need to re-cache after a fork */
static char *
get_application_pid_helper(bool ignore_cache)
{
static char pidstr[16];
if (!pidstr[0] || ignore_cache) {
int pid = get_process_id();
snprintf(pidstr, sizeof(pidstr) - 1, "%d", pid);
}
return pidstr;
}
/* get application pid, (cached), used for event logging */
char *
get_application_pid()
{
return get_application_pid_helper(false);
}
/* The OSX kernel used to place the bare executable path above envp.
* On recent XNU versions, the kernel now prefixes the executable path
* with the string executable_path= so it can be parsed getenv style.
*/
#ifdef MACOS
# define EXECUTABLE_KEY "executable_path="
#endif
/* i#189: we need to re-cache after a fork */
static char *
get_application_name_helper(bool ignore_cache, bool full_path)
{
if (!executable_path[0] || ignore_cache) {
#ifdef VMX86_SERVER
if (os_in_vmkernel_userworld()) {
vmk_getnamefrompid(pid, executable_path, sizeof(executable_path));
} else
#endif
if (DYNAMO_OPTION(early_inject)) {
ASSERT(executable_path[0] != '\0' &&
"i#907: Can't read /proc/self/exe for early injection");
} else {
#ifdef LINUX
/* Populate cache from /proc/self/exe link. */
strncpy(executable_path, read_proc_self_exe(ignore_cache),
BUFFER_SIZE_ELEMENTS(executable_path));
#else
/* OSX kernel puts full app exec path above envp */
char *c, **env = our_environ;
do {
env++;
} while (*env != NULL);
env++; /* Skip the NULL separating the envp array from exec_path */
c = *env;
if (strncmp(EXECUTABLE_KEY, c, strlen(EXECUTABLE_KEY)) == 0) {
c += strlen(EXECUTABLE_KEY);
}
/* If our frontends always absolute-ize paths prior to exec,
* this should usually be absolute -- but we go ahead and
* handle relative just in case (and to handle child processes).
* We add the cur dir, but note that the resulting path can
* still contain . or .. so it's not normalized (but it is a
* correct absolute path). Xref i#1402, i#1406, i#1407.
*/
if (*c != '/') {
int len;
if (!os_get_current_dir(executable_path,
BUFFER_SIZE_ELEMENTS(executable_path)))
len = 0;
else
len = strlen(executable_path);
snprintf(executable_path + len,
BUFFER_SIZE_ELEMENTS(executable_path) - len, "%s%s",
len > 0 ? "/" : "", c);
} else
strncpy(executable_path, c, BUFFER_SIZE_ELEMENTS(executable_path));
#endif
NULL_TERMINATE_BUFFER(executable_path);
/* FIXME: Fall back on /proc/self/cmdline and maybe argv[0] from
* _init().
*/
ASSERT(strlen(executable_path) > 0 && "readlink /proc/self/exe failed");
}
}
/* Get basename. */
if (executable_basename == NULL || ignore_cache) {
executable_basename = strrchr(executable_path, '/');
executable_basename =
(executable_basename == NULL ? executable_path : executable_basename + 1);
}
return (full_path ? executable_path : executable_basename);
}
/* get application name, (cached), used for event logging */
char *
get_application_name(void)
{
return get_application_name_helper(false, true /* full path */);
}
/* i#907: Called during early injection before data section protection to avoid
* issues with /proc/self/exe.
*/
void
set_executable_path(const char *exe_path)
{
strncpy(executable_path, exe_path, BUFFER_SIZE_ELEMENTS(executable_path));
NULL_TERMINATE_BUFFER(executable_path);
/* Re-compute the basename in case the full path changed. */
get_application_name_helper(true /* re-compute */, false /* basename */);
}
/* Note: this is exported so that libdrpreload.so (preload.c) can use it to
* get process names to do selective process following (PR 212034). The
* alternative is to duplicate or compile in this code into libdrpreload.so,
* which is messy. Besides, libdynamorio.so is already loaded into the process
* and avaiable, so cleaner to just use functions from it.
*/
DYNAMORIO_EXPORT const char *
get_application_short_name(void)
{
return get_application_name_helper(false, false /* short name */);
}
/* Sets pointers to the application's command-line arguments. These pointers are then used
* by get_app_args().
*/
void
set_app_args(IN int *app_argc_in, IN char **app_argv_in)
{
app_argc = app_argc_in;
app_argv = app_argv_in;
}
/* Returns the number of application's command-line arguments. */
int
num_app_args()
{
if (!DYNAMO_OPTION(early_inject)) {
set_client_error_code(NULL, DR_ERROR_NOT_IMPLEMENTED);
return -1;
}
return *app_argc;
}
/* Returns the application's command-line arguments. */
int
get_app_args(OUT dr_app_arg_t *args_array, int args_count)
{
if (args_array == NULL || args_count < 0) {
set_client_error_code(NULL, DR_ERROR_INVALID_PARAMETER);
return -1;
}
if (!DYNAMO_OPTION(early_inject)) {
set_client_error_code(NULL, DR_ERROR_NOT_IMPLEMENTED);
return -1;
}
int num_args = num_app_args();
int min = (args_count < num_args) ? args_count : num_args;
for (int i = 0; i < min; i++) {
args_array[i].start = (void *)app_argv[i];
args_array[i].size = strlen(app_argv[i]) + 1 /* consider NULL byte */;
args_array[i].encoding = DR_APP_ARG_CSTR_COMPAT;
}
return min;
}
/* Processor information provided by kernel */
#define PROC_CPUINFO "/proc/cpuinfo"
#define CPUMHZ_LINE_LENGTH 64
#define CPUMHZ_LINE_FORMAT "cpu MHz\t\t: %lu.%03lu\n"
/* printed in /usr/src/linux-2.4/arch/i386/kernel/setup.c calibrated in time.c */
/* seq_printf(m, "cpu MHz\t\t: %lu.%03lu\n", cpu_khz / 1000, (cpu_khz % 1000)) */
/* e.g. cpu MHz : 1594.851 */
static timestamp_t
get_timer_frequency_cpuinfo(void)
{
file_t cpuinfo;
ssize_t nread;
char *buf;
char *mhz_line;
ulong cpu_mhz = 1000;
ulong cpu_khz = 0;
cpuinfo = os_open(PROC_CPUINFO, OS_OPEN_READ);
/* This can happen in a chroot or if /proc is disabled. */
if (cpuinfo == INVALID_FILE)
return 1000 * 1000; /* 1 GHz */
/* cpu MHz is typically in the first 4096 bytes. If not, or we get a short
* or interrupted read, our timer frequency estimate will be off, but it's
* not the end of the world.
* FIXME: Factor a buffered file reader out of our maps iterator if we want
* to do this the right way.
*/
buf = global_heap_alloc(PAGE_SIZE HEAPACCT(ACCT_OTHER));
nread = os_read(cpuinfo, buf, PAGE_SIZE - 1);
if (nread > 0) {
buf[nread] = '\0';
mhz_line = strstr(buf, "cpu MHz\t\t:");
if (mhz_line != NULL &&
sscanf(mhz_line, CPUMHZ_LINE_FORMAT, &cpu_mhz, &cpu_khz) == 2) {
LOG(GLOBAL, LOG_ALL, 2, "Processor speed exactly %lu.%03luMHz\n", cpu_mhz,
cpu_khz);
}
}
global_heap_free(buf, PAGE_SIZE HEAPACCT(ACCT_OTHER));
os_close(cpuinfo);
return cpu_mhz * 1000 + cpu_khz;
}
timestamp_t
get_timer_frequency()
{
#ifdef VMX86_SERVER
if (os_in_vmkernel_userworld()) {
return vmk_get_timer_frequency();
}
#endif
return get_timer_frequency_cpuinfo();
}
/* DR has standardized on UTC time which counts from since Jan 1, 1601.
* That's the Windows standard. But Linux uses the Epoch of Jan 1, 1970.
*/
#define UTC_TO_EPOCH_SECONDS 11644473600
/* seconds since 1601 */
uint
query_time_seconds(void)
{
struct timeval current_time;
uint64 val = dynamorio_syscall(SYS_gettimeofday, 2, ¤t_time, NULL);
#ifdef MACOS
/* MacOS before Sierra returns usecs:secs and does not set the timeval struct. */
if (macos_version < MACOS_VERSION_SIERRA) {
if ((int)val < 0)
return 0;
return (uint)val + UTC_TO_EPOCH_SECONDS;
}
#endif
if ((int)val >= 0) {
return current_time.tv_sec + UTC_TO_EPOCH_SECONDS;
} else {
ASSERT_NOT_REACHED();
return 0;
}
}
/* milliseconds since 1601 */
uint64
query_time_millis()
{
struct timeval current_time;
uint64 val = dynamorio_syscall(SYS_gettimeofday, 2, ¤t_time, NULL);
#ifdef MACOS
/* MacOS before Sierra returns usecs:secs and does not set the timeval struct. */
if (macos_version < MACOS_VERSION_SIERRA) {
if ((int)val > 0) {
current_time.tv_sec = (uint)val;
current_time.tv_usec = (uint)(val >> 32);
}
}
#endif
if ((int)val >= 0) {
uint64 res =
(((uint64)current_time.tv_sec) * 1000) + (current_time.tv_usec / 1000);
res += UTC_TO_EPOCH_SECONDS * 1000;
return res;
} else {
ASSERT_NOT_REACHED();
return 0;
}
}
/* microseconds since 1601 */
uint64
query_time_micros()
{
struct timeval current_time;
uint64 val = dynamorio_syscall(SYS_gettimeofday, 2, ¤t_time, NULL);
#ifdef MACOS
/* MacOS before Sierra returns usecs:secs and does not set the timeval struct. */
if (macos_version < MACOS_VERSION_SIERRA) {
if ((int)val > 0) {
current_time.tv_sec = (uint)val;
current_time.tv_usec = (uint)(val >> 32);
}
}
#endif
if ((int)val >= 0) {
uint64 res = (((uint64)current_time.tv_sec) * 1000000) + current_time.tv_usec;
res += UTC_TO_EPOCH_SECONDS * 1000000;
return res;
} else {
ASSERT_NOT_REACHED();
return 0;
}
}
#ifdef RETURN_AFTER_CALL
/* Finds the bottom of the call stack, presumably at program startup. */
/* This routine is a copycat of internal_dump_callstack and makes
assumptions about program state, i.e. that frame pointers are valid
and should be used only in well known points for release build.
*/
static app_pc
find_stack_bottom()
{
app_pc retaddr = 0;
int depth = 0;
reg_t *fp;
/* from dump_dr_callstack() */
asm("mov %%" ASM_XBP ", %0" : "=m"(fp));
LOG(THREAD_GET, LOG_ALL, 3, "Find stack bottom:\n");
while (fp != NULL && is_readable_without_exception((byte *)fp, sizeof(reg_t) * 2)) {
retaddr = (app_pc) * (fp + 1); /* presumably also readable */
LOG(THREAD_GET, LOG_ALL, 3,
"\tframe ptr " PFX " => parent " PFX ", ret = " PFX "\n", fp, *fp, retaddr);
depth++;
/* yes I've seen weird recursive cases before */
if (fp == (reg_t *)*fp || depth > 100)
break;
fp = (reg_t *)*fp;
}
return retaddr;
}
#endif /* RETURN_AFTER_CALL */
/* os-specific atexit cleanup */
void
os_slow_exit(void)
{
#ifdef MACOS64
tls_process_exit();
#endif
#ifdef LINUX
if (!standalone_library)
d_r_rseq_exit();
#endif
d_r_signal_exit();
memquery_exit();
ksynch_exit();
generic_hash_destroy(GLOBAL_DCONTEXT, fd_table);
fd_table = NULL;
if (doing_detach) {
vsyscall_page_start = NULL;
IF_DEBUG(num_fd_add_pre_heap = 0;)
}
DELETE_LOCK(set_thread_area_lock);
DELETE_LOCK(client_tls_lock);
IF_NO_MEMQUERY(memcache_exit());
}
/* Helper function that calls cleanup_and_terminate after blocking most signals
*(i#2921).
*/
void
block_cleanup_and_terminate(dcontext_t *dcontext, int sysnum, ptr_uint_t sys_arg1,
ptr_uint_t sys_arg2, bool exitproc,
/* these 2 args are only used for Mac thread exit */
ptr_uint_t sys_arg3, ptr_uint_t sys_arg4)
{
/* This thread is on its way to exit. We are blocking all signals since any
* signal that reaches us now can be delayed until after the exit is complete.
* We may still receive a suspend signal for synchronization that we may need
* to reply to (i#2921).
*/
if (sysnum == SYS_kill)
block_all_noncrash_signals_except(NULL, 2, dcontext->sys_param0, SUSPEND_SIGNAL);
else
block_all_noncrash_signals_except(NULL, 1, SUSPEND_SIGNAL);
cleanup_and_terminate(dcontext, sysnum, sys_arg1, sys_arg2, exitproc, sys_arg3,
sys_arg4);
}
/* os-specific atexit cleanup */
void
os_fast_exit(void)
{
/* nothing */
}
void
os_terminate_with_code(dcontext_t *dcontext, terminate_flags_t flags, int exit_code)
{
/* i#1319: we support a signal via 2nd byte */
bool use_signal = exit_code > 0x00ff;
/* XXX: TERMINATE_THREAD not supported */
ASSERT_NOT_IMPLEMENTED(TEST(TERMINATE_PROCESS, flags));
if (use_signal) {
int sig = (exit_code & 0xff00) >> 8;
os_terminate_via_signal(dcontext, flags, sig);
ASSERT_NOT_REACHED();
}
if (TEST(TERMINATE_CLEANUP, flags)) {
/* we enter from several different places, so rewind until top-level kstat */
KSTOP_REWIND_UNTIL(thread_measured);
block_cleanup_and_terminate(dcontext, SYSNUM_EXIT_PROCESS, exit_code, 0,
true /*whole process*/, 0, 0);
} else {
/* clean up may be impossible - just terminate */
d_r_config_exit(); /* delete .1config file */
exit_process_syscall(exit_code);
}
}
void
os_terminate(dcontext_t *dcontext, terminate_flags_t flags)
{
os_terminate_with_code(dcontext, flags, -1);
}
int
os_timeout(int time_in_milliseconds)
{
ASSERT_NOT_IMPLEMENTED(false);
return 0;
}
/************************************************************************
* SEGMENT STEALING
*
* Not easy to make truly transparent -- but the alternative of dispatch
* by thread id on global memory has performance implications.
* Pull the non-STEAL_SEGMENT code out of the cvs attic for a base if
* transparency becomes more of a problem.
*/
#define TLS_LOCAL_STATE_OFFSET (offsetof(os_local_state_t, state))
/* offset from top of page */
#define TLS_OS_LOCAL_STATE 0x00
#define TLS_SELF_OFFSET (TLS_OS_LOCAL_STATE + offsetof(os_local_state_t, self))
#define TLS_THREAD_ID_OFFSET (TLS_OS_LOCAL_STATE + offsetof(os_local_state_t, tid))
#define TLS_DCONTEXT_OFFSET (TLS_OS_LOCAL_STATE + TLS_DCONTEXT_SLOT)
#ifdef X86
# define TLS_MAGIC_OFFSET (TLS_OS_LOCAL_STATE + offsetof(os_local_state_t, magic))
#endif
/* they should be used with os_tls_offset, so do not need add TLS_OS_LOCAL_STATE here
*/
#define TLS_APP_LIB_TLS_BASE_OFFSET (offsetof(os_local_state_t, app_lib_tls_base))
#define TLS_APP_ALT_TLS_BASE_OFFSET (offsetof(os_local_state_t, app_alt_tls_base))
#define TLS_APP_LIB_TLS_REG_OFFSET (offsetof(os_local_state_t, app_lib_tls_reg))
#define TLS_APP_ALT_TLS_REG_OFFSET (offsetof(os_local_state_t, app_alt_tls_reg))
/* N.B.: imm and offs are ushorts!
* We use %c[0-9] to get gcc to emit an integer constant without a leading $ for
* the segment offset. See the documentation here:
* http://gcc.gnu.org/onlinedocs/gccint/Output-Template.html#Output-Template
* Also, var needs to match the pointer size, or else we'll get stack corruption.
* XXX: This is marked volatile prevent gcc from speculating this code before
* checks for is_thread_tls_initialized(), but if we could find a more
* precise constraint, then the compiler would be able to optimize better. See
* glibc comments on THREAD_SELF.
*/
#ifdef DR_HOST_NOT_TARGET
# define WRITE_TLS_SLOT_IMM(imm, var) var = 0, ASSERT_NOT_REACHED()
# define READ_TLS_SLOT_IMM(imm, var) var = 0, ASSERT_NOT_REACHED()
# define WRITE_TLS_INT_SLOT_IMM(imm, var) var = 0, ASSERT_NOT_REACHED()
# define READ_TLS_INT_SLOT_IMM(imm, var) var = 0, ASSERT_NOT_REACHED()
# define WRITE_TLS_SLOT(offs, var) offs = var ? 0 : 1, ASSERT_NOT_REACHED()
# define READ_TLS_SLOT(offs, var) var = (void *)(ptr_uint_t)offs, ASSERT_NOT_REACHED()
#elif defined(MACOS64)
/* For now we have both a directly-addressable os_local_state_t and a pointer to
* it in slot 6. If we settle on always doing the full os_local_state_t in slots,
* we would probably get rid of the indirection here and directly access slot fields.
*/
# define WRITE_TLS_SLOT_IMM(imm, var) \
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED()); \
ASSERT(sizeof(var) == sizeof(void *)); \
__asm__ __volatile__( \
"mov %%gs:%1, %%" ASM_XAX " \n\t" \
"movq %0, %c2(%%" ASM_XAX ") \n\t" \
: \
: "r"(var), "m"(*(void **)(DR_TLS_BASE_SLOT * sizeof(void *))), "i"(imm) \
: "memory", ASM_XAX);
# define READ_TLS_SLOT_IMM(imm, var) \
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED()); \
ASSERT(sizeof(var) == sizeof(void *)); \
__asm__ __volatile__("mov %%gs:%1, %%" ASM_XAX " \n\t" \
"movq %c2(%%" ASM_XAX "), %0 \n\t" \
: "=r"(var) \
: "m"(*(void **)(DR_TLS_BASE_SLOT * sizeof(void *))), \
"i"(imm) \
: ASM_XAX);
# define WRITE_TLS_SLOT(offs, var) \
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED()); \
__asm__ __volatile__("mov %%gs:%0, %%" ASM_XAX " \n\t" \
"movzwq %1, %%" ASM_XDX " \n\t" \
"movq %2, (%%" ASM_XAX ", %%" ASM_XDX ") \n\t" \
: \
: "m"(*(void **)(DR_TLS_BASE_SLOT * sizeof(void *))), \
"m"(offs), "r"(var) \
: "memory", ASM_XAX, ASM_XDX);
# define READ_TLS_SLOT(offs, var) \
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED()); \
ASSERT(sizeof(var) == sizeof(void *)); \
__asm__ __volatile__("mov %%gs:%1, %%" ASM_XAX " \n\t" \
"movzwq %2, %%" ASM_XDX " \n\t" \
"movq (%%" ASM_XAX ", %%" ASM_XDX "), %0 \n\t" \
: "=r"(var) \
: "m"(*(void **)(DR_TLS_BASE_SLOT * sizeof(void *))), \
"m"(offs) \
: "memory", ASM_XAX, ASM_XDX);
#elif defined(X86)
# define WRITE_TLS_SLOT_IMM(imm, var) \
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED()); \
ASSERT(sizeof(var) == sizeof(void *)); \
asm volatile("mov %0, %" ASM_SEG ":%c1" : : "r"(var), "i"(imm));
# define READ_TLS_SLOT_IMM(imm, var) \
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED()); \
ASSERT(sizeof(var) == sizeof(void *)); \
asm volatile("mov %" ASM_SEG ":%c1, %0" : "=r"(var) : "i"(imm));
# define WRITE_TLS_INT_SLOT_IMM(imm, var) \
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED()); \
ASSERT(sizeof(var) == sizeof(int)); \
asm volatile("movl %0, %" ASM_SEG ":%c1" : : "r"(var), "i"(imm));
# define READ_TLS_INT_SLOT_IMM(imm, var) \
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED()); \
ASSERT(sizeof(var) == sizeof(int)); \
asm volatile("movl %" ASM_SEG ":%c1, %0" : "=r"(var) : "i"(imm));
/* FIXME: need dedicated-storage var for _TLS_SLOT macros, can't use expr */
# define WRITE_TLS_SLOT(offs, var) \
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED()); \
ASSERT(sizeof(var) == sizeof(void *)); \
ASSERT(sizeof(offs) == 2); \
asm("mov %0, %%" ASM_XAX : : "m"((var)) : ASM_XAX); \
asm("movzw" IF_X64_ELSE("q", "l") " %0, %%" ASM_XDX : : "m"((offs)) : ASM_XDX); \
asm("mov %%" ASM_XAX ", %" ASM_SEG ":(%%" ASM_XDX ")" : : : ASM_XAX, ASM_XDX);
# define READ_TLS_SLOT(offs, var) \
ASSERT(sizeof(var) == sizeof(void *)); \
ASSERT(sizeof(offs) == 2); \
asm("movzw" IF_X64_ELSE("q", "l") " %0, %%" ASM_XAX : : "m"((offs)) : ASM_XAX); \
asm("mov %" ASM_SEG ":(%%" ASM_XAX "), %%" ASM_XAX : : : ASM_XAX); \
asm("mov %%" ASM_XAX ", %0" : "=m"((var)) : : ASM_XAX);
#elif defined(AARCHXX)
/* Android needs indirection through a global. The Android toolchain has
* trouble with relocations if we use a global directly in asm, so we convert to
* a local variable in these macros. We pay the cost of the extra instructions
* for Linux ARM to share the code.
*/
# define WRITE_TLS_SLOT_IMM(imm, var) \
do { \
uint _base_offs = DR_TLS_BASE_OFFSET; \
__asm__ __volatile__("mov " ASM_R2 ", %0 \n\t" READ_TP_TO_R3_DISP_IN_R2 \
"str %1, [" ASM_R3 ", %2] \n\t" \
: \
: "r"(_base_offs), "r"(var), "i"(imm) \
: "memory", ASM_R2, ASM_R3); \
} while (0)
# define READ_TLS_SLOT_IMM(imm, var) \
do { \
uint _base_offs = DR_TLS_BASE_OFFSET; \
__asm__ __volatile__("mov " ASM_R2 ", %1 \n\t" READ_TP_TO_R3_DISP_IN_R2 \
"ldr %0, [" ASM_R3 ", %2] \n\t" \
: "=r"(var) \
: "r"(_base_offs), "i"(imm) \
: ASM_R2, ASM_R3); \
} while (0)
# define WRITE_TLS_INT_SLOT_IMM WRITE_TLS_SLOT_IMM /* b/c 32-bit */
# define READ_TLS_INT_SLOT_IMM READ_TLS_SLOT_IMM /* b/c 32-bit */
# define WRITE_TLS_SLOT(offs, var) \
do { \
uint _base_offs = DR_TLS_BASE_OFFSET; \
__asm__ __volatile__("mov " ASM_R2 ", %0 \n\t" READ_TP_TO_R3_DISP_IN_R2 \
"add " ASM_R3 ", " ASM_R3 ", %2 \n\t" \
"str %1, [" ASM_R3 "] \n\t" \
: \
: "r"(_base_offs), "r"(var), "r"(offs) \
: "memory", ASM_R2, ASM_R3); \
} while (0)
# define READ_TLS_SLOT(offs, var) \
do { \
uint _base_offs = DR_TLS_BASE_OFFSET; \
__asm__ __volatile__("mov " ASM_R2 ", %1 \n\t" READ_TP_TO_R3_DISP_IN_R2 \
"add " ASM_R3 ", " ASM_R3 ", %2 \n\t" \
"ldr %0, [" ASM_R3 "] \n\t" \
: "=r"(var) \
: "r"(_base_offs), "r"(offs) \
: ASM_R2, ASM_R3); \
} while (0)
#endif /* X86/ARM */
#ifdef X86
/* We use this at thread init and exit to make it easy to identify
* whether TLS is initialized (i#2089).
* We assume alignment does not matter.
*/
static os_local_state_t uninit_tls; /* has .magic == 0 */
#endif
static bool
is_thread_tls_initialized(void)
{
#ifdef MACOS64
/* For now we have both a directly-addressable os_local_state_t and a pointer to
* it in slot 6. If we settle on always doing the full os_local_state_t in slots,
* we would probably get rid of the indirection here and directly read the magic
* field from its slot.
*/
byte **tls_swap_slot;
tls_swap_slot = (byte **)get_app_tls_swap_slot_addr();
if (tls_swap_slot == NULL || *tls_swap_slot == NULL ||
*tls_swap_slot == TLS_SLOT_VAL_EXITED)
return false;
return true;
#elif defined(X86)
if (INTERNAL_OPTION(safe_read_tls_init)) {
/* Avoid faults during early init or during exit when we have no handler.
* It's not worth extending the handler as the faults are a perf hit anyway.
* For standalone_library, first_thread_tls_initialized will always be false,
* so we'll return false here and use our check in get_thread_private_dcontext().
*/
if (!first_thread_tls_initialized || last_thread_tls_exited)
return false;
/* i#3535: Avoid races between removing DR's SIGSEGV signal handler and
* detached threads being passed native signals. The detaching thread is
* the one doing all the real cleanup, so we simply avoid any safe reads
* or TLS for detaching threads. This var is not cleared until re-init,
* so we have no race with the end of detach.
*/
if (detacher_tid != INVALID_THREAD_ID && detacher_tid != get_sys_thread_id())
return false;
/* To handle WSL (i#1986) where fs and gs start out equal to ss (0x2b),
* and when the MSR is used having a zero selector, and other complexities,
* we just do a blind safe read as the simplest solution once we're past
* initial init and have a fault handler.
*
* i#2089: to avoid the perf cost of syscalls to verify the tid, and to
* distinguish a fork child from a separate-group thread, we no longer read
* the tid field and check that the TLS belongs to this particular thread:
* instead we rely on clearing the .magic field for child threads and at
* thread exit (to avoid a fault) and we simply check the field here.
* A native app thread is very unlikely to match this.
*/
return safe_read_tls_magic() == TLS_MAGIC_VALID;
} else {
/* XXX i#2089: we're keeping this legacy code around until
* we're confident that the safe read code above is safer, more
* performant, and more robust.
*/
os_local_state_t *os_tls = NULL;
ptr_uint_t cur_seg = read_thread_register(SEG_TLS);
/* Handle WSL (i#1986) where fs and gs start out equal to ss (0x2b) */
if (cur_seg != 0 && cur_seg != read_thread_register(SEG_SS)) {
/* XXX: make this a safe read: but w/o dcontext we need special asm support */
READ_TLS_SLOT_IMM(TLS_SELF_OFFSET, os_tls);
}
# ifdef X64
if (os_tls == NULL && tls_dr_using_msr()) {
/* When the MSR is used, the selector in the register remains 0.
* We can't clear the MSR early in a new thread and then look for
* a zero base here b/c if kernel decides to use GDT that zeroing
* will set the selector, unless we want to assume we know when
* the kernel uses the GDT.
* Instead we make a syscall to get the tid. This should be ok
* perf-wise b/c the common case is the non-zero above.
*/
byte *base = tls_get_fs_gs_segment_base(SEG_TLS);
ASSERT(tls_global_type == TLS_TYPE_ARCH_PRCTL);
if (base != (byte *)POINTER_MAX && base != NULL) {
os_tls = (os_local_state_t *)base;
}
}
# endif
if (os_tls != NULL) {
return (os_tls->tid == get_sys_thread_id() ||
/* The child of a fork will initially come here */
os_tls->state.spill_space.dcontext->owning_process ==
get_parent_id());
} else
return false;
}
#elif defined(AARCHXX)
byte **dr_tls_base_addr;
if (tls_global_type == TLS_TYPE_NONE)
return false;
dr_tls_base_addr = (byte **)get_dr_tls_base_addr();
if (dr_tls_base_addr == NULL || *dr_tls_base_addr == NULL ||
/* We use the TLS slot's value to identify a now-exited thread (i#1578) */
*dr_tls_base_addr == TLS_SLOT_VAL_EXITED)
return false;
/* We would like to ASSERT is_dynamo_address(*tls_swap_slot) but that leads
* to infinite recursion for an address not in the vm_reserve area, as
* dynamo_vm_areas_start_reading() ending up calling
* deadlock_avoidance_unlock() which calls get_thread_private_dcontext()
* which comes here.
*/
return true;
#endif
}
bool
is_DR_segment_reader_entry(app_pc pc)
{
/* This routine is used to avoid problems with dr_prepopulate_cache() building
* bbs for DR code that reads DR segments when DR is a static library.
* It's a little ugly but it's not clear there's a better solution.
* See the discussion in i#2463 c#2.
*/
#ifdef X86
if (INTERNAL_OPTION(safe_read_tls_init)) {
return pc == (app_pc)safe_read_tls_magic || pc == (app_pc)safe_read_tls_self;
}
#endif
/* XXX i#2463: for ARM and for -no_safe_read_tls_init it may be
* more complicated as the PC may not be a function entry but the
* start of a bb after a branch in our C code that uses inline asm
* to read the TLS.
*/
return false;
}
#if defined(X86) || defined(DEBUG)
static bool
is_thread_tls_allocated(void)
{
# if defined(X86) && !defined(MACOS64)
if (INTERNAL_OPTION(safe_read_tls_init)) {
/* We use this routine to allow currently-native threads, for which
* is_thread_tls_initialized() (and thus is_thread_initialized()) will
* return false.
* Caution: this will also return true on a fresh clone child.
*/
uint magic;
if (!first_thread_tls_initialized || last_thread_tls_exited)
return false;
magic = safe_read_tls_magic();
return magic == TLS_MAGIC_VALID || magic == TLS_MAGIC_INVALID;
}
# endif
return is_thread_tls_initialized();
}
#endif
/* converts a local_state_t offset to a segment offset */
ushort
os_tls_offset(ushort tls_offs)
{
/* no ushort truncation issues b/c TLS_LOCAL_STATE_OFFSET is 0 */
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED());
ASSERT(TLS_LOCAL_STATE_OFFSET == 0);
return (TLS_LOCAL_STATE_OFFSET + tls_offs IF_MACOS64(+tls_get_dr_offs()));
}
/* converts a segment offset to a local_state_t offset */
ushort
os_local_state_offset(ushort seg_offs)
{
/* no ushort truncation issues b/c TLS_LOCAL_STATE_OFFSET is 0 */
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED());
ASSERT(TLS_LOCAL_STATE_OFFSET == 0);
return (seg_offs - TLS_LOCAL_STATE_OFFSET IF_MACOS64(-tls_get_dr_offs()));
}
/* XXX: Will return NULL if called before os_thread_init(), which sets
* ostd->dr_fs/gs_base.
*/
void *
os_get_priv_tls_base(dcontext_t *dcontext, reg_id_t reg)
{
os_thread_data_t *ostd;
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED());
ASSERT(reg == TLS_REG_ALT || reg == TLS_REG_LIB);
if (dcontext == NULL)
dcontext = get_thread_private_dcontext();
if (dcontext == NULL)
return NULL;
ostd = (os_thread_data_t *)dcontext->os_field;
if (reg == TLS_REG_LIB)
return ostd->priv_lib_tls_base;
else if (reg == TLS_REG_ALT)
return ostd->priv_alt_tls_base;
ASSERT_NOT_REACHED();
return NULL;
}
os_local_state_t *
get_os_tls(void)
{
os_local_state_t *os_tls;
ASSERT(is_thread_tls_initialized());
READ_TLS_SLOT_IMM(TLS_SELF_OFFSET, os_tls);
return os_tls;
}
/* Obtain TLS from dcontext directly, which succeeds in pre-thread-init
* situations where get_os_tls() fails.
*/
static os_local_state_t *
get_os_tls_from_dc(dcontext_t *dcontext)
{
byte *local_state;
ASSERT(dcontext != NULL);
local_state = (byte *)dcontext->local_state;
if (local_state == NULL)
return NULL;
return (os_local_state_t *)(local_state - offsetof(os_local_state_t, state));
}
#ifdef AARCHXX
bool
os_set_app_tls_base(dcontext_t *dcontext, reg_id_t reg, void *base)
{
os_local_state_t *os_tls;
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED());
ASSERT(reg == TLS_REG_LIB || reg == TLS_REG_ALT);
if (dcontext == NULL)
dcontext = get_thread_private_dcontext();
/* we will be called only if TLS is initialized */
ASSERT(dcontext != NULL);
os_tls = get_os_tls_from_dc(dcontext);
if (reg == TLS_REG_LIB) {
os_tls->app_lib_tls_base = base;
LOG(THREAD, LOG_THREADS, 1, "TLS app lib base =" PFX "\n", base);
return true;
} else if (reg == TLS_REG_ALT) {
os_tls->app_alt_tls_base = base;
LOG(THREAD, LOG_THREADS, 1, "TLS app alt base =" PFX "\n", base);
return true;
}
ASSERT_NOT_REACHED();
return false;
}
#endif
void *
os_get_app_tls_base(dcontext_t *dcontext, reg_id_t reg)
{
os_local_state_t *os_tls;
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED());
ASSERT(reg == TLS_REG_LIB || reg == TLS_REG_ALT);
if (dcontext == NULL)
dcontext = get_thread_private_dcontext();
if (dcontext == NULL) {
/* No dcontext means we haven't initialized TLS, so we haven't replaced
* the app's segments. get_segment_base is expensive, but this should
* be rare. Re-examine if it pops up in a profile.
*/
return get_segment_base(reg);
}
os_tls = get_os_tls_from_dc(dcontext);
if (reg == TLS_REG_LIB)
return os_tls->app_lib_tls_base;
else if (reg == TLS_REG_ALT)
return os_tls->app_alt_tls_base;
ASSERT_NOT_REACHED();
return NULL;
}
ushort
os_get_app_tls_base_offset(reg_id_t reg)
{
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED());
ASSERT(TLS_LOCAL_STATE_OFFSET == 0);
if (reg == TLS_REG_LIB)
return TLS_APP_LIB_TLS_BASE_OFFSET;
else if (reg == TLS_REG_ALT)
return TLS_APP_ALT_TLS_BASE_OFFSET;
ASSERT_NOT_REACHED();
return 0;
}
#ifdef X86
ushort
os_get_app_tls_reg_offset(reg_id_t reg)
{
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED());
ASSERT(TLS_LOCAL_STATE_OFFSET == 0);
if (reg == TLS_REG_LIB)
return TLS_APP_LIB_TLS_REG_OFFSET;
else if (reg == TLS_REG_ALT)
return TLS_APP_ALT_TLS_REG_OFFSET;
ASSERT_NOT_REACHED();
return 0;
}
#endif
void *
d_r_get_tls(ushort tls_offs)
{
void *val;
READ_TLS_SLOT(tls_offs, val);
return val;
}
void
d_r_set_tls(ushort tls_offs, void *value)
{
WRITE_TLS_SLOT(tls_offs, value);
}
/* Returns POINTER_MAX on failure.
* Assumes that cs, ss, ds, and es are flat.
* Should we export this to clients? For now they can get
* this information via opnd_compute_address().
*/
byte *
get_segment_base(uint seg)
{
#ifdef MACOS64
ptr_uint_t *pthread_self = (ptr_uint_t *)read_thread_register(seg);
return (byte *)&pthread_self[SEG_TLS_BASE_OFFSET];
#elif defined(X86)
if (seg == SEG_CS || seg == SEG_SS || seg == SEG_DS || seg == SEG_ES)
return NULL;
# ifdef HAVE_TLS
return tls_get_fs_gs_segment_base(seg);
# else
return (byte *)POINTER_MAX;
# endif /* HAVE_TLS */
#elif defined(AARCHXX)
/* XXX i#1551: should we rename/refactor to avoid "segment"? */
return (byte *)read_thread_register(seg);
#endif
}
/* i#572: handle opnd_compute_address to return the application
* segment base value.
*/
byte *
get_app_segment_base(uint seg)
{
#ifdef X86
if (seg == SEG_CS || seg == SEG_SS || seg == SEG_DS || seg == SEG_ES)
return NULL;
#endif /* X86 */
if (INTERNAL_OPTION(private_loader) && first_thread_tls_initialized &&
!last_thread_tls_exited) {
return d_r_get_tls(os_get_app_tls_base_offset(seg));
}
return get_segment_base(seg);
}
local_state_extended_t *
get_local_state_extended()
{
os_local_state_t *os_tls;
ASSERT(is_thread_tls_initialized());
READ_TLS_SLOT_IMM(TLS_SELF_OFFSET, os_tls);
return &(os_tls->state);
}
local_state_t *
get_local_state()
{
#ifdef HAVE_TLS
return (local_state_t *)get_local_state_extended();
#else
return NULL;
#endif
}
#ifdef DEBUG
void
os_enter_dynamorio(void)
{
# ifdef ARM
/* i#1578: check that app's tls value doesn't match our sentinel */
ASSERT(*(byte **)get_dr_tls_base_addr() != TLS_SLOT_VAL_EXITED);
# endif
}
#endif
/* i#107: handle segment register usage conflicts between app and dr:
* os_handle_mov_seg updates the app's tls selector maintained by DR.
* It is called before entering code cache in dispatch_enter_fcache.
*/
void
os_handle_mov_seg(dcontext_t *dcontext, byte *pc)
{
#ifdef X86
instr_t instr;
opnd_t opnd;
reg_id_t seg;
ushort sel = 0;
our_modify_ldt_t *desc;
int desc_idx;
os_local_state_t *os_tls;
os_thread_data_t *ostd;
instr_init(dcontext, &instr);
decode_cti(dcontext, pc, &instr);
/* the first instr must be mov seg */
ASSERT(instr_get_opcode(&instr) == OP_mov_seg);
opnd = instr_get_dst(&instr, 0);
ASSERT(opnd_is_reg(opnd));
seg = opnd_get_reg(opnd);
ASSERT(reg_is_segment(seg));
ostd = (os_thread_data_t *)dcontext->os_field;
desc = (our_modify_ldt_t *)ostd->app_thread_areas;
os_tls = get_os_tls();
/* get the selector value */
opnd = instr_get_src(&instr, 0);
if (opnd_is_reg(opnd)) {
sel = (ushort)reg_get_value_priv(opnd_get_reg(opnd), get_mcontext(dcontext));
} else {
void *ptr;
ptr = (ushort *)opnd_compute_address_priv(opnd, get_mcontext(dcontext));
ASSERT(ptr != NULL);
if (!d_r_safe_read(ptr, sizeof(sel), &sel)) {
/* FIXME: if invalid address, should deliver a signal to user. */
ASSERT_NOT_IMPLEMENTED(false);
}
}
/* calculate the entry_number */
desc_idx = SELECTOR_INDEX(sel) - tls_min_index();
if (seg == TLS_REG_LIB) {
os_tls->app_lib_tls_reg = sel;
os_tls->app_lib_tls_base = (void *)(ptr_uint_t)desc[desc_idx].base_addr;
} else {
os_tls->app_alt_tls_reg = sel;
os_tls->app_alt_tls_base = (void *)(ptr_uint_t)desc[desc_idx].base_addr;
}
instr_free(dcontext, &instr);
LOG(THREAD_GET, LOG_THREADS, 2,
"thread " TIDFMT " segment change %s to selector 0x%x => "
"app lib tls base: " PFX ", alt tls base: " PFX "\n",
d_r_get_thread_id(), reg_names[seg], sel, os_tls->app_lib_tls_base,
os_tls->app_alt_tls_base);
#elif defined(ARM)
/* FIXME i#1551: NYI on ARM */
ASSERT_NOT_REACHED();
#endif /* X86/ARM */
}
/* Initialization for TLS mangling (-mangle_app_seg on x86).
* Must be called before DR setup its own segment.
*/
static void
os_tls_app_seg_init(os_local_state_t *os_tls, void *segment)
{
app_pc app_lib_tls_base, app_alt_tls_base;
#if defined(X86) && !defined(MACOS64)
int i, index;
our_modify_ldt_t *desc;
os_tls->app_lib_tls_reg = read_thread_register(TLS_REG_LIB);
os_tls->app_alt_tls_reg = read_thread_register(TLS_REG_ALT);
#endif
app_lib_tls_base = get_segment_base(TLS_REG_LIB);
app_alt_tls_base = get_segment_base(TLS_REG_ALT);
/* If we're a non-initial thread, tls will be set to the parent's value,
* or to &uninit_tls (i#2089), both of which will be is_dynamo_address().
*/
os_tls->app_lib_tls_base =
is_dynamo_address(app_lib_tls_base) ? NULL : app_lib_tls_base;
os_tls->app_alt_tls_base =
is_dynamo_address(app_alt_tls_base) ? NULL : app_alt_tls_base;
#if defined(X86) && !defined(MACOS64)
/* get all TLS thread area value */
/* XXX: is get_thread_area supported in 64-bit kernel?
* It has syscall number 211.
* It works for a 32-bit application running in a 64-bit kernel.
* It returns error value -38 for a 64-bit app in a 64-bit kernel.
*/
desc = &os_tls->os_seg_info.app_thread_areas[0];
tls_initialize_indices(os_tls);
index = tls_min_index();
for (i = 0; i < GDT_NUM_TLS_SLOTS; i++) {
tls_get_descriptor(i + index, &desc[i]);
}
#endif /* X86 */
os_tls->os_seg_info.dr_tls_base = segment;
os_tls->os_seg_info.priv_alt_tls_base = IF_X86_ELSE(segment, NULL);
/* now allocate the tls segment for client libraries */
if (INTERNAL_OPTION(private_loader)) {
os_tls->os_seg_info.priv_lib_tls_base = IF_UNIT_TEST_ELSE(
os_tls->app_lib_tls_base, privload_tls_init(os_tls->app_lib_tls_base));
}
#if defined(X86) && !defined(MACOSX64)
LOG(THREAD_GET, LOG_THREADS, 1,
"thread " TIDFMT " app lib tls reg: 0x%x, alt tls reg: 0x%x\n",
d_r_get_thread_id(), os_tls->app_lib_tls_reg, os_tls->app_alt_tls_reg);
#endif
LOG(THREAD_GET, LOG_THREADS, 1,
"thread " TIDFMT " app lib tls base: " PFX ", alt tls base: " PFX "\n",
d_r_get_thread_id(), os_tls->app_lib_tls_base, os_tls->app_alt_tls_base);
LOG(THREAD_GET, LOG_THREADS, 1,
"thread " TIDFMT " priv lib tls base: " PFX ", alt tls base: " PFX ", "
"DR's tls base: " PFX "\n",
d_r_get_thread_id(), os_tls->os_seg_info.priv_lib_tls_base,
os_tls->os_seg_info.priv_alt_tls_base, os_tls->os_seg_info.dr_tls_base);
}
void
os_tls_init(void)
{
#ifdef X86
ASSERT(TLS_MAGIC_OFFSET_ASM == TLS_MAGIC_OFFSET);
ASSERT(TLS_SELF_OFFSET_ASM == TLS_SELF_OFFSET);
#endif
#ifdef HAVE_TLS
/* We create a 1-page segment with an LDT entry for each thread and load its
* selector into fs/gs.
* FIXME PR 205276: this whole scheme currently does not check if app is using
* segments need to watch modify_ldt syscall
*/
# ifdef MACOS64
/* Today we're allocating enough contiguous TLS slots to hold os_local_state_t.
* We also store a pointer to it in TLS slot 6.
*/
byte *segment = tls_get_dr_addr();
# else
byte *segment = heap_mmap(PAGE_SIZE, MEMPROT_READ | MEMPROT_WRITE,
VMM_SPECIAL_MMAP | VMM_PER_THREAD);
# endif
os_local_state_t *os_tls = (os_local_state_t *)segment;
LOG(GLOBAL, LOG_THREADS, 1, "os_tls_init for thread " TIDFMT "\n",
d_r_get_thread_id());
ASSERT(!is_thread_tls_initialized());
/* MUST zero out dcontext slot so uninit access gets NULL */
memset(segment, 0, PAGE_SIZE);
/* store key data in the tls itself */
os_tls->self = os_tls;
os_tls->tid = get_sys_thread_id();
os_tls->tls_type = TLS_TYPE_NONE;
# ifdef X86
os_tls->magic = TLS_MAGIC_VALID;
# endif
/* We save DR's TLS segment base here so that os_get_dr_tls_base() will work
* even when -no_mangle_app_seg is set. If -mangle_app_seg is set, this
* will be overwritten in os_tls_app_seg_init().
*/
os_tls->os_seg_info.dr_tls_base = segment;
ASSERT(proc_is_cache_aligned(os_tls->self + TLS_LOCAL_STATE_OFFSET));
/* Verify that local_state_extended_t should indeed be used. */
ASSERT(DYNAMO_OPTION(ibl_table_in_tls));
/* initialize DR TLS seg base before replacing app's TLS in tls_thread_init */
if (MACHINE_TLS_IS_DR_TLS)
os_tls_app_seg_init(os_tls, segment);
tls_thread_init(os_tls, segment);
ASSERT(os_tls->tls_type != TLS_TYPE_NONE);
/* store type in global var for convenience: should be same for all threads */
tls_global_type = os_tls->tls_type;
/* FIXME: this should be a SYSLOG fatal error? Should fall back on !HAVE_TLS?
* Should have create_ldt_entry() return failure instead of asserting, then.
*/
#else
tls_table = (tls_slot_t *)global_heap_alloc(MAX_THREADS *
sizeof(tls_slot_t) HEAPACCT(ACCT_OTHER));
memset(tls_table, 0, MAX_THREADS * sizeof(tls_slot_t));
#endif
if (!first_thread_tls_initialized) {
first_thread_tls_initialized = true;
if (last_thread_tls_exited) /* re-attach */
last_thread_tls_exited = false;
}
ASSERT(is_thread_tls_initialized());
}
static bool
should_zero_tls_at_thread_exit()
{
#ifdef X86
/* i#2089: For a thread w/o CLONE_SIGHAND we cannot handle a fault, so we want to
* leave &uninit_tls (which was put in place in os_thread_exit()) as long as
* possible. For non-detach, that means until the exit.
*/
return !INTERNAL_OPTION(safe_read_tls_init) || doing_detach;
#else
return true;
#endif
}
/* TLS exit for the current thread who must own local_state. */
void
os_tls_thread_exit(local_state_t *local_state)
{
#ifdef HAVE_TLS
/* We assume (assert below) that local_state_t's start == local_state_extended_t */
os_local_state_t *os_tls =
(os_local_state_t *)(((byte *)local_state) - offsetof(os_local_state_t, state));
tls_type_t tls_type = os_tls->tls_type;
int index = os_tls->ldt_index;
ASSERT(offsetof(local_state_t, spill_space) ==
offsetof(local_state_extended_t, spill_space));
if (should_zero_tls_at_thread_exit()) {
tls_thread_free(tls_type, index);
# if defined(X86) && defined(X64) && !defined(MACOS)
if (tls_type == TLS_TYPE_ARCH_PRCTL) {
/* syscall re-sets gs register so re-clear it */
if (read_thread_register(SEG_TLS) != 0) {
static const ptr_uint_t zero = 0;
WRITE_DR_SEG(zero); /* macro needs lvalue! */
}
}
# endif
}
/* We already set TLS to &uninit_tls in os_thread_exit() */
/* Do not set last_thread_tls_exited if a client_thread is exiting.
* If set, get_thread_private_dcontext() returns NULL, which may cause
* other thread fault on using dcontext.
*/
if (dynamo_exited_all_other_threads && !last_thread_tls_exited) {
last_thread_tls_exited = true;
first_thread_tls_initialized = false; /* for possible re-attach */
}
#endif
}
/* Frees local_state. If the calling thread is exiting (i.e.,
* !other_thread) then also frees kernel resources for the calling
* thread; if other_thread then that may not be possible.
*/
void
os_tls_exit(local_state_t *local_state, bool other_thread)
{
#ifdef HAVE_TLS
# if defined(X86) && !defined(MACOS64)
static const ptr_uint_t zero = 0;
# endif /* X86 */
/* We can't read from fs: as we can be called from other threads */
# if defined(X86) && !defined(MACOS64)
/* If the MSR is in use, writing to the reg faults. We rely on it being 0
* to indicate that.
*/
if (!other_thread && read_thread_register(SEG_TLS) != 0 &&
should_zero_tls_at_thread_exit()) {
WRITE_DR_SEG(zero); /* macro needs lvalue! */
}
# endif /* X86 */
/* For another thread we can't really make these syscalls so we have to
* leave it un-cleaned-up. That's fine if the other thread is exiting:
* but for detach (i#95) we get the other thread to run this code.
*/
if (!other_thread)
os_tls_thread_exit(local_state);
# ifndef MACOS64
/* We can't free prior to tls_thread_free() in case that routine refs os_tls */
/* ASSUMPTION: local_state_t is laid out at same start as local_state_extended_t */
os_local_state_t *os_tls =
(os_local_state_t *)(((byte *)local_state) - offsetof(os_local_state_t, state));
heap_munmap(os_tls->self, PAGE_SIZE, VMM_SPECIAL_MMAP | VMM_PER_THREAD);
# endif
#else
global_heap_free(tls_table, MAX_THREADS * sizeof(tls_slot_t) HEAPACCT(ACCT_OTHER));
DELETE_LOCK(tls_lock);
#endif
}
static int
os_tls_get_gdt_index(dcontext_t *dcontext)
{
os_local_state_t *os_tls = (os_local_state_t *)(((byte *)dcontext->local_state) -
offsetof(os_local_state_t, state));
if (os_tls->tls_type == TLS_TYPE_GDT)
return os_tls->ldt_index;
else
return -1;
}
void
os_tls_pre_init(int gdt_index)
{
#if defined(X86) && !defined(MACOS64)
/* Only set to above 0 for tls_type == TLS_TYPE_GDT */
if (gdt_index > 0) {
/* PR 458917: clear gdt slot to avoid leak across exec */
DEBUG_DECLARE(bool ok;)
static const ptr_uint_t zero = 0;
/* Be sure to clear the selector before anything that might
* call get_thread_private_dcontext()
*/
WRITE_DR_SEG(zero); /* macro needs lvalue! */
DEBUG_DECLARE(ok =)
tls_clear_descriptor(gdt_index);
ASSERT(ok);
}
#elif defined(ARM)
/* FIXME i#1551: NYI on ARM */
ASSERT_NOT_IMPLEMENTED(false);
#endif /* X86/ARM */
}
/* Allocates num_slots tls slots aligned with alignment align */
bool
os_tls_calloc(OUT uint *offset, uint num_slots, uint alignment)
{
bool res = false;
uint i, count = 0;
int start = -1;
uint offs = offsetof(os_local_state_t, client_tls);
if (num_slots == 0 || num_slots > MAX_NUM_CLIENT_TLS)
return false;
d_r_mutex_lock(&client_tls_lock);
for (i = 0; i < MAX_NUM_CLIENT_TLS; i++) {
if (!client_tls_allocated[i] &&
/* ALIGNED doesn't work for 0 */
(alignment == 0 || ALIGNED(offs + i * sizeof(void *), alignment))) {
if (start == -1)
start = i;
count++;
if (count >= num_slots)
break;
} else {
start = -1;
count = 0;
}
}
if (count >= num_slots) {
for (i = 0; i < num_slots; i++)
client_tls_allocated[i + start] = true;
*offset = offs + start * sizeof(void *);
res = true;
}
d_r_mutex_unlock(&client_tls_lock);
return res;
}
bool
os_tls_cfree(uint offset, uint num_slots)
{
uint i;
uint offs = (offset - offsetof(os_local_state_t, client_tls)) / sizeof(void *);
bool ok = true;
d_r_mutex_lock(&client_tls_lock);
for (i = 0; i < num_slots; i++) {
if (!client_tls_allocated[i + offs])
ok = false;
client_tls_allocated[i + offs] = false;
}
d_r_mutex_unlock(&client_tls_lock);
return ok;
}
/* os_data is a clone_record_t for signal_thread_inherit */
void
os_thread_init(dcontext_t *dcontext, void *os_data)
{
os_local_state_t *os_tls = get_os_tls();
os_thread_data_t *ostd = (os_thread_data_t *)heap_alloc(
dcontext, sizeof(os_thread_data_t) HEAPACCT(ACCT_OTHER));
dcontext->os_field = (void *)ostd;
/* make sure stack fields, etc. are 0 now so they can be initialized on demand
* (don't have app esp register handy here to init now)
*/
memset(ostd, 0, sizeof(*ostd));
ksynch_init_var(&ostd->suspended);
ksynch_init_var(&ostd->wakeup);
ksynch_init_var(&ostd->resumed);
ksynch_init_var(&ostd->terminated);
ksynch_init_var(&ostd->detached);
#ifdef RETURN_AFTER_CALL
/* We only need the stack bottom for the initial thread, and due to thread
* init now preceding vm_areas_init(), we initialize in find_executable_vm_areas()
*/
ostd->stack_bottom_pc = NULL;
#endif
ASSIGN_INIT_LOCK_FREE(ostd->suspend_lock, suspend_lock);
signal_thread_init(dcontext, os_data);
/* i#107, initialize thread area information,
* the value was first get in os_tls_init and stored in os_tls
*/
ostd->priv_lib_tls_base = os_tls->os_seg_info.priv_lib_tls_base;
ostd->priv_alt_tls_base = os_tls->os_seg_info.priv_alt_tls_base;
ostd->dr_tls_base = os_tls->os_seg_info.dr_tls_base;
LOG(THREAD, LOG_THREADS, 1, "TLS app lib base =" PFX "\n", os_tls->app_lib_tls_base);
LOG(THREAD, LOG_THREADS, 1, "TLS app alt base =" PFX "\n", os_tls->app_alt_tls_base);
LOG(THREAD, LOG_THREADS, 1, "TLS priv lib base =" PFX "\n", ostd->priv_lib_tls_base);
LOG(THREAD, LOG_THREADS, 1, "TLS priv alt base =" PFX "\n", ostd->priv_alt_tls_base);
LOG(THREAD, LOG_THREADS, 1, "TLS DynamoRIO base=" PFX "\n", ostd->dr_tls_base);
#ifdef X86
if (INTERNAL_OPTION(mangle_app_seg)) {
ostd->app_thread_areas = heap_alloc(
dcontext, sizeof(our_modify_ldt_t) * GDT_NUM_TLS_SLOTS HEAPACCT(ACCT_OTHER));
memcpy(ostd->app_thread_areas, os_tls->os_seg_info.app_thread_areas,
sizeof(our_modify_ldt_t) * GDT_NUM_TLS_SLOTS);
}
#endif
LOG(THREAD, LOG_THREADS, 1, "post-TLS-setup, cur %s base is " PFX "\n",
IF_X86_ELSE("gs", "tpidruro"),
get_segment_base(IF_X86_ELSE(SEG_GS, DR_REG_TPIDRURO)));
LOG(THREAD, LOG_THREADS, 1, "post-TLS-setup, cur %s base is " PFX "\n",
IF_X86_ELSE("fs", "tpidrurw"),
get_segment_base(IF_X86_ELSE(SEG_FS, DR_REG_TPIDRURW)));
#ifdef MACOS
/* XXX: do we need to free/close dcontext->thread_port? I don't think so. */
dcontext->thread_port = dynamorio_mach_syscall(MACH_thread_self_trap, 0);
LOG(THREAD, LOG_ALL, 1, "Mach thread port: %d\n", dcontext->thread_port);
#endif
}
/* os_data is a clone_record_t for signal_thread_inherit */
void
os_thread_init_finalize(dcontext_t *dcontext, void *os_data)
{
/* We do not want to record pending signals until at least synch_thread_init()
* is finished so we delay until here: but we need this inside the
* thread_initexit_lock (i#2779).
*/
signal_thread_inherit(dcontext, os_data);
}
void
os_thread_exit(dcontext_t *dcontext, bool other_thread)
{
os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field;
/* i#237/PR 498284: if we had a vfork child call execve we need to clean up
* the env vars.
*/
if (dcontext->thread_record->execve)
handle_execve_post(dcontext);
DELETE_LOCK(ostd->suspend_lock);
signal_thread_exit(dcontext, other_thread);
ksynch_free_var(&ostd->suspended);
ksynch_free_var(&ostd->wakeup);
ksynch_free_var(&ostd->resumed);
ksynch_free_var(&ostd->terminated);
ksynch_free_var(&ostd->detached);
#ifdef X86
if (ostd->clone_tls != NULL) {
if (!other_thread) {
/* Avoid faults in is_thread_tls_initialized() */
/* FIXME i#2088: we need to restore the app's aux seg, if any, instead. */
os_set_dr_tls_base(dcontext, NULL, (byte *)&uninit_tls);
}
/* We have to free in release build too b/c "local unprotected" is global. */
HEAP_TYPE_FREE(dcontext, ostd->clone_tls, os_local_state_t, ACCT_THREAD_MGT,
UNPROTECTED);
}
#endif
if (INTERNAL_OPTION(private_loader))
privload_tls_exit(IF_UNIT_TEST_ELSE(NULL, ostd->priv_lib_tls_base));
/* for non-debug we do fast exit path and don't free local heap */
DODEBUG({
if (MACHINE_TLS_IS_DR_TLS) {
#ifdef X86
heap_free(dcontext, ostd->app_thread_areas,
sizeof(our_modify_ldt_t) * GDT_NUM_TLS_SLOTS HEAPACCT(ACCT_OTHER));
#endif
}
heap_free(dcontext, ostd, sizeof(os_thread_data_t) HEAPACCT(ACCT_OTHER));
});
}
/* Happens in the parent prior to fork. */
static void
os_fork_pre(dcontext_t *dcontext)
{
os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field;
/* Otherwise a thread might wait for us. */
ASSERT_OWN_NO_LOCKS();
ASSERT(ostd->fork_threads == NULL && ostd->fork_num_threads == 0);
/* i#239: Synch with all other threads to ensure that they are holding no
* locks across the fork.
* FIXME i#26: Suspend signals received before initializing siginfo are
* squelched, so we won't be able to suspend threads that are initializing.
*/
LOG(GLOBAL, 2, LOG_SYSCALLS | LOG_THREADS,
"fork: synching with other threads to prevent deadlock in child\n");
if (!synch_with_all_threads(THREAD_SYNCH_SUSPENDED_VALID_MCONTEXT_OR_NO_XFER,
&ostd->fork_threads, &ostd->fork_num_threads,
THREAD_SYNCH_VALID_MCONTEXT,
/* If we fail to suspend a thread, there is a
* risk of deadlock in the child, so it's worth
* retrying on failure.
*/
THREAD_SYNCH_SUSPEND_FAILURE_RETRY)) {
/* If we failed to synch with all threads, we live with the possiblity
* of deadlock and continue as normal.
*/
LOG(GLOBAL, 1, LOG_SYSCALLS | LOG_THREADS,
"fork: synch failed, possible deadlock in child\n");
ASSERT_CURIOSITY(false);
}
vmm_heap_fork_pre(dcontext);
/* We go back to the code cache to execute the syscall, so we can't hold
* locks. If the synch succeeded, no one else is running, so it should be
* safe to release these locks. However, if there are any rogue threads,
* then releasing these locks will allow them to synch and create threads.
* Such threads could be running due to synch failure or presence of
* non-suspendable client threads. We keep our data in ostd to prevent some
* conflicts, but there are some unhandled corner cases.
*/
d_r_mutex_unlock(&thread_initexit_lock);
d_r_mutex_unlock(&all_threads_synch_lock);
}
/* Happens after the fork in both the parent and child. */
static void
os_fork_post(dcontext_t *dcontext, bool parent)
{
os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field;
/* Re-acquire the locks we released before the fork. */
d_r_mutex_lock(&all_threads_synch_lock);
d_r_mutex_lock(&thread_initexit_lock);
/* Resume the other threads that we suspended. */
if (parent) {
LOG(GLOBAL, 2, LOG_SYSCALLS | LOG_THREADS,
"fork: resuming other threads after fork\n");
}
end_synch_with_all_threads(ostd->fork_threads, ostd->fork_num_threads,
parent /*resume in parent, not in child*/);
ostd->fork_threads = NULL; /* Freed by end_synch_with_all_threads. */
ostd->fork_num_threads = 0;
vmm_heap_fork_post(dcontext, parent);
}
/* this one is called before child's new logfiles are set up */
void
os_fork_init(dcontext_t *dcontext)
{
int iter;
/* We use a larger data size than file_t to avoid clobbering our stack (i#991) */
ptr_uint_t fd;
ptr_uint_t flags;
/* Static assert would save debug build overhead: could use array bound trick */
ASSERT(sizeof(file_t) <= sizeof(ptr_uint_t));
/* i#239: If there were unsuspended threads across the fork, we could have
* forked while another thread held locks. We reset the locks and try to
* cope with any intermediate state left behind from the parent. If we
* encounter more deadlocks after fork, we can add more lock and data resets
* on a case by case basis.
*/
d_r_mutex_fork_reset(&all_threads_synch_lock);
d_r_mutex_fork_reset(&thread_initexit_lock);
os_fork_post(dcontext, false /*!parent*/);
/* re-populate cached data that contains pid */
pid_cached = get_process_id();
get_application_pid_helper(true);
get_application_name_helper(true, true /* not important */);
/* close all copies of parent files */
TABLE_RWLOCK(fd_table, write, lock);
iter = 0;
do {
iter = generic_hash_iterate_next(GLOBAL_DCONTEXT, fd_table, iter, &fd,
(void **)&flags);
if (iter < 0)
break;
if (TEST(OS_OPEN_CLOSE_ON_FORK, flags)) {
close_syscall((file_t)fd);
iter = generic_hash_iterate_remove(GLOBAL_DCONTEXT, fd_table, iter, fd);
}
} while (true);
TABLE_RWLOCK(fd_table, write, unlock);
}
static void
os_swap_dr_tls(dcontext_t *dcontext, bool to_app)
{
#ifdef X86
/* If the option is off, we really should swap it (xref i#107/i#2088 comments
* in os_swap_context()) but there are few consequences of not doing it, and we
* have no code set up separate from the i#2089 scheme here.
*/
if (!INTERNAL_OPTION(safe_read_tls_init))
return;
if (to_app) {
/* i#2089: we want the child to inherit a TLS with invalid .magic, but we
* need our own syscall execution and post-syscall code to have valid scratch
* and dcontext values. We can't clear our own magic b/c we don't know when
* the child will be scheduled, so we use a copy of our TLS. We carefully
* never have a valid magic there in case a prior child is still unscheduled.
*
* We assume the child will not modify this TLS copy in any way.
* CLONE_SETTLS touc * hes the other segment (we'll have to watch for
* addition of CLONE_SETTLS_AUX). The parent will use the scratch space
* returning from the syscall to d_r_dispatch, but we restore via os_clone_post()
* immediately before anybody calls get_thread_private_dcontext() or
* anything.
*/
/* FIXME i#2088: to preserve the app's aux seg, if any, we should pass it
* and the seg reg value via the clone record (like we do for ARM today).
*/
os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field;
os_local_state_t *cur_tls = get_os_tls_from_dc(dcontext);
if (ostd->clone_tls == NULL) {
ostd->clone_tls = (os_local_state_t *)HEAP_TYPE_ALLOC(
dcontext, os_local_state_t, ACCT_THREAD_MGT, UNPROTECTED);
LOG(THREAD, LOG_THREADS, 2, "TLS copy is " PFX "\n", ostd->clone_tls);
}
/* Leave no window where a prior uninit child could read valid magic by
* invalidating prior to copying.
*/
cur_tls->magic = TLS_MAGIC_INVALID;
memcpy(ostd->clone_tls, cur_tls, sizeof(*ostd->clone_tls));
cur_tls->magic = TLS_MAGIC_VALID;
ostd->clone_tls->self = ostd->clone_tls;
os_set_dr_tls_base(dcontext, NULL, (byte *)ostd->clone_tls);
} else {
/* i#2089: restore the parent's DR TLS */
os_local_state_t *real_tls = get_os_tls_from_dc(dcontext);
/* For dr_app_start we can end up here with nothing to do, so we check. */
if (get_segment_base(SEG_TLS) != (byte *)real_tls) {
DEBUG_DECLARE(os_thread_data_t *ostd =
(os_thread_data_t *)dcontext->os_field);
ASSERT(get_segment_base(SEG_TLS) == (byte *)ostd->clone_tls);
/* We assume there's no need to copy the scratch slots back */
os_set_dr_tls_base(dcontext, real_tls, (byte *)real_tls);
}
}
#elif defined(AARCHXX)
/* For aarchxx we don't have a separate thread register for DR, and we
* always leave the DR pointer in the slot inside the app's or privlib's TLS.
* That means we have nothing to do here.
* For SYS_clone, we are ok with the parent's TLS being inherited until
* new_thread_setup() calls set_thread_register_from_clone_record().
*/
#endif
}
static void
os_new_thread_pre(void)
{
/* We use a barrier on new threads to ensure we make progress when
* attaching to an app that is continually making threads.
* XXX i#1305: if we fully suspend all threads during attach we can
* get rid of this barrier.
*/
wait_for_event(dr_attach_finished, 0);
ATOMIC_INC(int, uninit_thread_count);
}
/* This is called from pre_system_call() and before cloning a client thread in
* dr_create_client_thread. Hence os_clone_pre is used for app threads as well
* as client threads. Do not add anything that we do not want to happen while
* in DR mode.
*/
static void
os_clone_pre(dcontext_t *dcontext)
{
/* We switch the lib tls segment back to app's segment.
* Please refer to comment on os_switch_lib_tls.
*/
if (INTERNAL_OPTION(private_loader)) {
os_switch_lib_tls(dcontext, true /*to app*/);
}
os_swap_dr_tls(dcontext, true /*to app*/);
}
/* This is called from d_r_dispatch prior to post_system_call() and after
* cloning a client thread in dr_create_client_thread. Hence os_clone_post is
* used for app threads as well as client threads. Do not add anything that
* we do not want to happen while in DR mode.
*/
void
os_clone_post(dcontext_t *dcontext)
{
os_swap_dr_tls(dcontext, false /*to DR*/);
}
byte *
os_get_dr_tls_base(dcontext_t *dcontext)
{
os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field;
return ostd->dr_tls_base;
}
/* We only bother swapping the library segment if we're using the private
* loader.
*/
bool
os_should_swap_state(void)
{
#ifdef X86
/* -private_loader currently implies -mangle_app_seg, but let's be safe. */
return (INTERNAL_OPTION(mangle_app_seg) && INTERNAL_OPTION(private_loader));
#elif defined(AARCHXX)
return INTERNAL_OPTION(private_loader);
#endif
}
bool
os_using_app_state(dcontext_t *dcontext)
{
#ifdef X86
/* FIXME: This could be optimized to avoid the syscall by keeping state in
* the dcontext.
*/
if (INTERNAL_OPTION(mangle_app_seg)) {
return (get_segment_base(TLS_REG_LIB) ==
os_get_app_tls_base(dcontext, TLS_REG_LIB));
}
#endif
/* We're always in the app state if we're not mangling. */
return true;
}
/* Similar to PEB swapping on Windows, this call will switch between DR's
* private lib segment base and the app's segment base.
* i#107/i#2088: If the app wants to use SEG_TLS, we should also switch that back at
* this boundary, but there are many places where we simply assume it is always
* installed.
*/
void
os_swap_context(dcontext_t *dcontext, bool to_app, dr_state_flags_t flags)
{
if (os_should_swap_state())
os_switch_seg_to_context(dcontext, LIB_SEG_TLS, to_app);
if (TEST(DR_STATE_DR_TLS, flags))
os_swap_dr_tls(dcontext, to_app);
}
void
os_thread_under_dynamo(dcontext_t *dcontext)
{
os_swap_context(dcontext, false /*to dr*/, DR_STATE_GO_NATIVE);
signal_swap_mask(dcontext, false /*to dr*/);
start_itimer(dcontext);
}
void
os_thread_not_under_dynamo(dcontext_t *dcontext)
{
stop_itimer(dcontext);
signal_swap_mask(dcontext, true /*to app*/);
os_swap_context(dcontext, true /*to app*/, DR_STATE_GO_NATIVE);
}
void
os_process_under_dynamorio_initiate(dcontext_t *dcontext)
{
LOG(GLOBAL, LOG_THREADS, 1, "process now under DR\n");
/* We only support regular process-wide signal handlers for delayed takeover. */
/* i#2161: we ignore alarm signals during the attach process to avoid races. */
signal_reinstate_handlers(dcontext, true /*ignore alarm*/);
/* XXX: there's a tradeoff here: we have a race when we remove the hook
* because dr_app_stop() has no barrier and a thread sent native might
* resume from vsyscall after we remove the hook. However, if we leave the
* hook, then the next takeover signal might hit a native thread that's
* inside DR just to go back native after having hit the hook. For now we
* remove the hook and rely on translate_from_synchall_to_dispatch() moving
* threads from vsyscall to our gencode and not relying on the hook being
* present to finish up their go-native code.
*/
hook_vsyscall(dcontext, false);
}
void
os_process_under_dynamorio_complete(dcontext_t *dcontext)
{
/* i#2161: only now do we un-ignore alarm signals. */
signal_reinstate_alarm_handlers(dcontext);
IF_NO_MEMQUERY({
/* Update the memory cache (i#2037) now that we've taken over all the
* threads, if there may have been a gap between setup and start.
*/
if (dr_api_entry)
memcache_update_all_from_os();
});
}
void
os_process_not_under_dynamorio(dcontext_t *dcontext)
{
/* We only support regular process-wide signal handlers for mixed-mode control. */
signal_remove_handlers(dcontext);
unhook_vsyscall();
LOG(GLOBAL, LOG_THREADS, 1, "process no longer under DR\n");
}
bool
detach_do_not_translate(thread_record_t *tr)
{
return false;
}
void
detach_finalize_translation(thread_record_t *tr, priv_mcontext_t *mc)
{
/* Nothing to do. */
}
void
detach_finalize_cleanup(void)
{
/* Nothing to do. */
}
static pid_t
get_process_group_id()
{
return dynamorio_syscall(SYS_getpgid, 0);
}
process_id_t
get_parent_id(void)
{
return dynamorio_syscall(SYS_getppid, 0);
}
thread_id_t
get_sys_thread_id(void)
{
#ifdef MACOS
if (kernel_thread_groups)
return dynamorio_syscall(SYS_thread_selfid, 0);
#else
if (kernel_thread_groups)
return dynamorio_syscall(SYS_gettid, 0);
#endif
return dynamorio_syscall(SYS_getpid, 0);
}
thread_id_t
d_r_get_thread_id(void)
{
/* i#228/PR 494330: making a syscall here is a perf bottleneck since we call
* this routine in read and recursive locks so use the TLS value instead
*/
thread_id_t id = get_tls_thread_id();
if (id != INVALID_THREAD_ID)
return id;
else
return get_sys_thread_id();
}
thread_id_t
get_tls_thread_id(void)
{
ptr_int_t tid; /* can't use thread_id_t since it's 32-bits */
if (!is_thread_tls_initialized())
return INVALID_THREAD_ID;
READ_TLS_SLOT_IMM(TLS_THREAD_ID_OFFSET, tid);
/* it reads 8-bytes into the memory, which includes app_gs and app_fs.
* 0x000000007127357b <get_tls_thread_id+37>: mov %gs:(%rax),%rax
* 0x000000007127357f <get_tls_thread_id+41>: mov %rax,-0x8(%rbp)
* so we remove the TRUNCATE check and trucate it on return.
*/
return (thread_id_t)tid;
}
/* returns the thread-private dcontext pointer for the calling thread */
dcontext_t *
get_thread_private_dcontext(void)
{
#ifdef HAVE_TLS
dcontext_t *dcontext;
/* We have to check this b/c this is called from __errno_location prior
* to os_tls_init, as well as after os_tls_exit, and early in a new
* thread's initialization (see comments below on that).
*/
if (!is_thread_tls_initialized())
return standalone_library ? GLOBAL_DCONTEXT : NULL;
/* We used to check tid and return NULL to distinguish parent from child, but
* that was affecting performance (xref PR 207366: but I'm leaving the assert in
* for now so debug build will still incur it). So we fixed the cases that
* needed that:
*
* - dynamo_thread_init() calling is_thread_initialized() for a new thread
* created via clone or the start/stop interface: so we have
* is_thread_initialized() pay the d_r_get_thread_id() cost.
* - new_thread_setup()'s ENTER_DR_HOOK kstats, or a crash and the signal
* handler asking about dcontext: we have new_thread_dynamo_start()
* clear the segment register for us early on.
* - child of fork (ASSERT_OWN_NO_LOCKS, etc. on re-entering DR):
* here we just suppress the assert: we'll use this same dcontext.
* xref PR 209518 where w/o this fix we used to need an extra KSTOP.
*
* An alternative would be to have the parent thread clear the segment
* register, or even set up the child's TLS ahead of time ourselves
* (and special-case so that we know if at clone syscall the app state is not
* quite correct: but we're already stealing a register there: PR 286194).
* We could also have the kernel set up TLS for us (PR 285898).
*
* For hotp_only or non-full-control (native_exec, e.g.) (PR 212012), this
* routine is not the only issue: we have to catch all new threads since
* hotp_only gateways assume tls is set up.
* Xref PR 192231.
*/
/* PR 307698: this assert causes large slowdowns (also xref PR 207366) */
DOCHECK(CHKLVL_DEFAULT + 1, {
ASSERT(get_tls_thread_id() == get_sys_thread_id() ||
/* ok for fork as mentioned above */
pid_cached != get_process_id());
});
READ_TLS_SLOT_IMM(TLS_DCONTEXT_OFFSET, dcontext);
return dcontext;
#else
/* Assumption: no lock needed on a read => no race conditions between
* reading and writing same tid! Since both get and set are only for
* the current thread, they cannot both execute simultaneously for the
* same tid, right?
*/
thread_id_t tid = d_r_get_thread_id();
int i;
if (tls_table != NULL) {
for (i = 0; i < MAX_THREADS; i++) {
if (tls_table[i].tid == tid) {
return tls_table[i].dcontext;
}
}
}
return NULL;
#endif
}
/* sets the thread-private dcontext pointer for the calling thread */
void
set_thread_private_dcontext(dcontext_t *dcontext)
{
#ifdef HAVE_TLS
ASSERT(is_thread_tls_allocated());
WRITE_TLS_SLOT_IMM(TLS_DCONTEXT_OFFSET, dcontext);
#else
thread_id_t tid = d_r_get_thread_id();
int i;
bool found = false;
ASSERT(tls_table != NULL);
d_r_mutex_lock(&tls_lock);
for (i = 0; i < MAX_THREADS; i++) {
if (tls_table[i].tid == tid) {
if (dcontext == NULL) {
/* if setting to NULL, clear the entire slot for reuse */
tls_table[i].tid = 0;
}
tls_table[i].dcontext = dcontext;
found = true;
break;
}
}
if (!found) {
if (dcontext == NULL) {
/* don't do anything...but why would this happen? */
} else {
/* look for an empty slot */
for (i = 0; i < MAX_THREADS; i++) {
if (tls_table[i].tid == 0) {
tls_table[i].tid = tid;
tls_table[i].dcontext = dcontext;
found = true;
break;
}
}
}
}
d_r_mutex_unlock(&tls_lock);
ASSERT(found);
#endif
}
/* replaces old with new
* use for forking: child should replace parent's id with its own
*/
static void
replace_thread_id(thread_id_t old, thread_id_t new)
{
#ifdef HAVE_TLS
thread_id_t new_tid = new;
ASSERT(is_thread_tls_initialized());
DOCHECK(1, {
thread_id_t old_tid;
IF_LINUX_ELSE(READ_TLS_INT_SLOT_IMM(TLS_THREAD_ID_OFFSET, old_tid),
READ_TLS_SLOT_IMM(TLS_THREAD_ID_OFFSET, old_tid));
ASSERT(old_tid == old);
});
IF_LINUX_ELSE(WRITE_TLS_INT_SLOT_IMM(TLS_THREAD_ID_OFFSET, new_tid),
WRITE_TLS_SLOT_IMM(TLS_THREAD_ID_OFFSET, new_tid));
#else
int i;
d_r_mutex_lock(&tls_lock);
for (i = 0; i < MAX_THREADS; i++) {
if (tls_table[i].tid == old) {
tls_table[i].tid = new;
break;
}
}
d_r_mutex_unlock(&tls_lock);
#endif
}
/* translate native flags to platform independent protection bits */
static inline uint
osprot_to_memprot(uint prot)
{
uint mem_prot = 0;
if (TEST(PROT_EXEC, prot))
mem_prot |= MEMPROT_EXEC;
if (TEST(PROT_READ, prot))
mem_prot |= MEMPROT_READ;
if (TEST(PROT_WRITE, prot))
mem_prot |= MEMPROT_WRITE;
return mem_prot;
}
/* returns osprot flags preserving all native protection flags except
* for RWX, which are replaced according to memprot */
uint
osprot_replace_memprot(uint old_osprot, uint memprot)
{
/* Note only protection flags PROT_ are relevant to mprotect()
* and they are separate from any other MAP_ flags passed to mmap()
*/
uint new_osprot = memprot_to_osprot(memprot);
return new_osprot;
}
/* libc independence */
static inline long
mprotect_syscall(byte *p, size_t size, uint prot)
{
return dynamorio_syscall(SYS_mprotect, 3, p, size, prot);
}
/* free memory allocated from os_raw_mem_alloc */
bool
os_raw_mem_free(void *p, size_t size, uint flags, heap_error_code_t *error_code)
{
long rc;
ASSERT(error_code != NULL);
ASSERT(size > 0 && ALIGNED(size, PAGE_SIZE));
rc = munmap_syscall(p, size);
if (rc != 0) {
*error_code = -rc;
} else {
*error_code = HEAP_ERROR_SUCCESS;
}
return (rc == 0);
}
/* try to alloc memory at preferred from os directly,
* caller is required to handle thread synchronization and to update
*/
void *
os_raw_mem_alloc(void *preferred, size_t size, uint prot, uint flags,
heap_error_code_t *error_code)
{
byte *p;
uint os_prot = memprot_to_osprot(prot);
uint os_flags =
MAP_PRIVATE | MAP_ANONYMOUS | (TEST(RAW_ALLOC_32BIT, flags) ? MAP_32BIT : 0);
ASSERT(error_code != NULL);
/* should only be used on aligned pieces */
ASSERT(size > 0 && ALIGNED(size, PAGE_SIZE));
p = mmap_syscall(preferred, size, os_prot, os_flags, -1, 0);
if (!mmap_syscall_succeeded(p)) {
*error_code = -(heap_error_code_t)(ptr_int_t)p;
LOG(GLOBAL, LOG_HEAP, 3, "os_raw_mem_alloc %d bytes failed" PFX "\n", size, p);
return NULL;
}
if (preferred != NULL && p != preferred) {
*error_code = HEAP_ERROR_NOT_AT_PREFERRED;
os_raw_mem_free(p, size, flags, error_code);
LOG(GLOBAL, LOG_HEAP, 3, "os_raw_mem_alloc %d bytes failed" PFX "\n", size, p);
return NULL;
}
LOG(GLOBAL, LOG_HEAP, 2, "os_raw_mem_alloc: " SZFMT " bytes @ " PFX "\n", size, p);
return p;
}
#ifdef LINUX
void
init_emulated_brk(app_pc exe_end)
{
ASSERT(DYNAMO_OPTION(emulate_brk));
if (app_brk_map != NULL) {
return;
}
/* i#1004: emulate brk via a separate mmap. The real brk starts out empty, but
* we need at least a page to have an mmap placeholder. We also want to reserve
* enough memory to avoid a client lib or other mmap truncating the brk at a
* too-small size, which can crash the app (i#3982).
*/
# define BRK_INITIAL_SIZE 4 * 1024 * 1024
app_brk_map = mmap_syscall(exe_end, BRK_INITIAL_SIZE, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
ASSERT(mmap_syscall_succeeded(app_brk_map));
app_brk_cur = app_brk_map;
app_brk_end = app_brk_map + BRK_INITIAL_SIZE;
LOG(GLOBAL, LOG_HEAP, 1, "%s: initial brk is " PFX "-" PFX "\n", __FUNCTION__,
app_brk_cur, app_brk_end);
}
static byte *
emulate_app_brk(dcontext_t *dcontext, byte *new_val)
{
byte *old_brk = app_brk_cur;
ASSERT(DYNAMO_OPTION(emulate_brk));
LOG(THREAD, LOG_HEAP, 2, "%s: cur=" PFX ", requested=" PFX "\n", __FUNCTION__,
app_brk_cur, new_val);
new_val = (byte *)ALIGN_FORWARD(new_val, PAGE_SIZE);
if (new_val == NULL || new_val == app_brk_cur ||
/* Not allowed to shrink below original base */
new_val < app_brk_map) {
/* Just return cur val */
} else if (new_val < app_brk_cur) {
/* Shrink */
if (munmap_syscall(new_val, app_brk_cur - new_val) == 0) {
app_brk_cur = new_val;
app_brk_end = new_val;
}
} else if (new_val < app_brk_end) {
/* We've already allocated the space */
app_brk_cur = new_val;
} else {
/* Expand */
byte *remap = (byte *)dynamorio_syscall(SYS_mremap, 4, app_brk_map,
app_brk_end - app_brk_map,
new_val - app_brk_map, 0 /*do not move*/);
if (mmap_syscall_succeeded(remap)) {
ASSERT(remap == app_brk_map);
app_brk_cur = new_val;
app_brk_end = new_val;
} else {
LOG(THREAD, LOG_HEAP, 1, "%s: mremap to " PFX " failed\n", __FUNCTION__,
new_val);
}
}
if (app_brk_cur != old_brk)
handle_app_brk(dcontext, app_brk_map, old_brk, app_brk_cur);
return app_brk_cur;
}
#endif /* LINUX */
#ifdef LINUX
DR_API
/* XXX: could add dr_raw_mem_realloc() instead of dr_raw_mremap() -- though there
* is no realloc for Windows: supposed to reserve yourself and then commit in
* pieces.
*/
void *
dr_raw_mremap(void *old_address, size_t old_size, size_t new_size, int flags,
void *new_address)
{
byte *res;
dr_mem_info_t info;
dcontext_t *dcontext = get_thread_private_dcontext();
/* i#173: we need prot + type from prior to mremap */
DEBUG_DECLARE(bool ok =)
query_memory_ex(old_address, &info);
/* XXX: this could be a large region w/ multiple protection regions
* inside. For now we assume our handling of it doesn't care.
*/
ASSERT(ok);
if (is_pretend_or_executable_writable(old_address))
info.prot |= DR_MEMPROT_WRITE;
/* we just unconditionally send the 5th param */
res = (byte *)dynamorio_syscall(SYS_mremap, 5, old_address, old_size, new_size, flags,
new_address);
handle_app_mremap(dcontext, res, new_size, old_address, old_size, info.prot,
info.size);
return res;
}
DR_API
void *
dr_raw_brk(void *new_address)
{
dcontext_t *dcontext = get_thread_private_dcontext();
if (DYNAMO_OPTION(emulate_brk)) {
/* i#1004: emulate brk via a separate mmap */
return (void *)emulate_app_brk(dcontext, (byte *)new_address);
} else {
/* We pay the cost of 2 syscalls. This should be infrequent enough that
* it doesn't mater.
*/
if (new_address == NULL) {
/* Just a query */
return (void *)dynamorio_syscall(SYS_brk, 1, new_address);
} else {
byte *old_brk = (byte *)dynamorio_syscall(SYS_brk, 1, 0);
byte *res = (byte *)dynamorio_syscall(SYS_brk, 1, new_address);
handle_app_brk(dcontext, NULL, old_brk, res);
return res;
}
}
}
#endif /* LINUX */
/* caller is required to handle thread synchronization and to update dynamo vm areas */
void
os_heap_free(void *p, size_t size, heap_error_code_t *error_code)
{
long rc;
ASSERT(error_code != NULL);
if (!dynamo_exited)
LOG(GLOBAL, LOG_HEAP, 4, "os_heap_free: %d bytes @ " PFX "\n", size, p);
rc = munmap_syscall(p, size);
if (rc != 0) {
*error_code = -rc;
} else {
*error_code = HEAP_ERROR_SUCCESS;
}
ASSERT(rc == 0);
}
/* reserve virtual address space without committing swap space for it,
and of course no physical pages since it will never be touched */
/* to be transparent, we do not use sbrk, and are
* instead using mmap, and asserting that all os_heap requests are for
* reasonably large pieces of memory */
void *
os_heap_reserve(void *preferred, size_t size, heap_error_code_t *error_code,
bool executable)
{
void *p;
uint prot = PROT_NONE;
#ifdef VMX86_SERVER
/* PR 365331: we need to be in the mmap_text region for code cache and
* gencode (PROT_EXEC).
*/
ASSERT(!os_in_vmkernel_userworld() || !executable || preferred == NULL ||
((byte *)preferred >= os_vmk_mmap_text_start() &&
((byte *)preferred) + size <= os_vmk_mmap_text_end()));
/* Note that a preferred address overrides PROT_EXEC and a mmap_data
* address will be honored, even though any execution there will fault.
*/
/* FIXME: note that PROT_EXEC => read access, so our guard pages and other
* non-committed memory, while not writable, is readable.
* Plus, we can't later clear all prot bits for userworld mmap due to PR 107872
* (PR 365748 covers fixing this for us).
* But in most uses we should get our preferred vmheap and shouldn't run
* out of vmheap, so this should be a corner-case issue.
*/
if (executable)
prot = PROT_EXEC;
#endif
/* should only be used on aligned pieces */
ASSERT(size > 0 && ALIGNED(size, PAGE_SIZE));
ASSERT(error_code != NULL);
/* FIXME: note that this memory is in fact still committed - see man mmap */
/* FIXME: case 2347 on Linux or -vm_reserve should be set to false */
/* FIXME: Need to actually get a mmap-ing with |MAP_NORESERVE */
p = mmap_syscall(
preferred, size, prot,
MAP_PRIVATE |
MAP_ANONYMOUS IF_X64(| (DYNAMO_OPTION(heap_in_lower_4GB) ? MAP_32BIT : 0)),
-1, 0);
if (!mmap_syscall_succeeded(p)) {
*error_code = -(heap_error_code_t)(ptr_int_t)p;
LOG(GLOBAL, LOG_HEAP, 4, "os_heap_reserve %d bytes failed " PFX "\n", size, p);
return NULL;
} else if (preferred != NULL && p != preferred) {
/* We didn't get the preferred address. To harmonize with windows behavior and
* give greater control we fail the reservation. */
heap_error_code_t dummy;
*error_code = HEAP_ERROR_NOT_AT_PREFERRED;
os_heap_free(p, size, &dummy);
ASSERT(dummy == HEAP_ERROR_SUCCESS);
LOG(GLOBAL, LOG_HEAP, 4,
"os_heap_reserve %d bytes at " PFX " not preferred " PFX "\n", size,
preferred, p);
return NULL;
} else {
*error_code = HEAP_ERROR_SUCCESS;
}
LOG(GLOBAL, LOG_HEAP, 2, "os_heap_reserve: %d bytes @ " PFX "\n", size, p);
#ifdef VMX86_SERVER
/* PR 365331: ensure our memory is all in the mmap_text region */
ASSERT(!os_in_vmkernel_userworld() || !executable ||
((byte *)p >= os_vmk_mmap_text_start() &&
((byte *)p) + size <= os_vmk_mmap_text_end()));
#endif
#if defined(ANDROID) && defined(DEBUG)
/* We don't label in release to be more transparent */
dynamorio_syscall(SYS_prctl, 5, PR_SET_VMA, PR_SET_VMA_ANON_NAME, p, size,
"DynamoRIO-internal");
#endif
return p;
}
static bool
find_free_memory_in_region(byte *start, byte *end, size_t size, byte **found_start OUT,
byte **found_end OUT)
{
memquery_iter_t iter;
/* XXX: despite /proc/sys/vm/mmap_min_addr == PAGE_SIZE, mmap won't
* give me that address if I use it as a hint.
*/
app_pc last_end = (app_pc)(PAGE_SIZE * 16);
bool found = false;
memquery_iterator_start(&iter, NULL, false /*won't alloc*/);
while (memquery_iterator_next(&iter)) {
if (iter.vm_start >= start &&
MIN(iter.vm_start, end) - MAX(last_end, start) >= size) {
if (found_start != NULL)
*found_start = MAX(last_end, start);
if (found_end != NULL)
*found_end = MIN(iter.vm_start, end);
found = true;
break;
}
if (iter.vm_end >= end)
break;
last_end = iter.vm_end;
}
memquery_iterator_stop(&iter);
return found;
}
void *
os_heap_reserve_in_region(void *start, void *end, size_t size,
heap_error_code_t *error_code, bool executable)
{
byte *p = NULL;
byte *try_start = NULL, *try_end = NULL;
uint iters = 0;
ASSERT(ALIGNED(start, PAGE_SIZE) && ALIGNED(end, PAGE_SIZE));
ASSERT(ALIGNED(size, PAGE_SIZE));
LOG(GLOBAL, LOG_HEAP, 3,
"os_heap_reserve_in_region: " SZFMT " bytes in " PFX "-" PFX "\n", size, start,
end);
/* if no restriction on location use regular os_heap_reserve() */
if (start == (void *)PTR_UINT_0 && end == (void *)POINTER_MAX)
return os_heap_reserve(NULL, size, error_code, executable);
/* loop to handle races */
#define RESERVE_IN_REGION_MAX_ITERS 128
while (find_free_memory_in_region(start, end, size, &try_start, &try_end)) {
/* If there's space we'd prefer the end, to avoid the common case of
* a large binary + heap at attach where we're likely to reserve
* right at the start of the brk: we'd prefer to leave more brk space.
*/
p = os_heap_reserve(try_end - size, size, error_code, executable);
if (p != NULL) {
ASSERT(*error_code == HEAP_ERROR_SUCCESS);
ASSERT(p >= (byte *)start && p + size <= (byte *)end);
break;
}
if (++iters > RESERVE_IN_REGION_MAX_ITERS) {
ASSERT_NOT_REACHED();
break;
}
}
if (p == NULL)
*error_code = HEAP_ERROR_CANT_RESERVE_IN_REGION;
else
*error_code = HEAP_ERROR_SUCCESS;
LOG(GLOBAL, LOG_HEAP, 2,
"os_heap_reserve_in_region: reserved " SZFMT " bytes @ " PFX " in " PFX "-" PFX
"\n",
size, p, start, end);
return p;
}
/* commit previously reserved with os_heap_reserve pages */
/* returns false when out of memory */
/* A replacement of os_heap_alloc can be constructed by using os_heap_reserve
and os_heap_commit on a subset of the reserved pages. */
/* caller is required to handle thread synchronization */
bool
os_heap_commit(void *p, size_t size, uint prot, heap_error_code_t *error_code)
{
uint os_prot = memprot_to_osprot(prot);
long res;
/* should only be used on aligned pieces */
ASSERT(size > 0 && ALIGNED(size, PAGE_SIZE));
ASSERT(p);
ASSERT(error_code != NULL);
/* FIXME: note that the memory would not be not truly committed if we have */
/* not actually marked a mmap-ing without MAP_NORESERVE */
res = mprotect_syscall(p, size, os_prot);
if (res != 0) {
*error_code = -res;
return false;
} else {
*error_code = HEAP_ERROR_SUCCESS;
}
LOG(GLOBAL, LOG_HEAP, 2, "os_heap_commit: %d bytes @ " PFX "\n", size, p);
return true;
}
/* caller is required to handle thread synchronization and to update dynamo vm areas */
void
os_heap_decommit(void *p, size_t size, heap_error_code_t *error_code)
{
int rc;
ASSERT(error_code != NULL);
if (!dynamo_exited)
LOG(GLOBAL, LOG_HEAP, 4, "os_heap_decommit: %d bytes @ " PFX "\n", size, p);
*error_code = HEAP_ERROR_SUCCESS;
/* FIXME: for now do nothing since os_heap_reserve has in fact committed the memory */
rc = 0;
/* TODO:
p = mmap_syscall(p, size, PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
we should either do a mremap()
or we can do a munmap() followed 'quickly' by a mmap() -
also see above the comment that os_heap_reserve() in fact is not so lightweight
*/
ASSERT(rc == 0);
}
bool
os_heap_systemwide_overcommit(heap_error_code_t last_error_code)
{
/* FIXME: conservative answer yes */
return true;
}
bool
os_heap_get_commit_limit(size_t *commit_used, size_t *commit_limit)
{
/* FIXME - NYI */
return false;
}
/* yield the current thread */
void
os_thread_yield()
{
#ifdef MACOS
/* XXX i#1291: use raw syscall instead */
swtch_pri(0);
#else
dynamorio_syscall(SYS_sched_yield, 0);
#endif
}
bool
thread_signal(process_id_t pid, thread_id_t tid, int signum)
{
#ifdef MACOS
/* FIXME i#58: this takes in a thread port. Need to map thread id to port.
* Need to figure out whether we support raw Mach threads w/o pthread on top.
*/
ASSERT_NOT_IMPLEMENTED(false);
return false;
#else
/* FIXME: for non-NPTL use SYS_kill */
/* Note that the pid is equivalent to the thread group id.
* However, we can have threads sharing address space but not pid
* (if created via CLONE_VM but not CLONE_THREAD), so make sure to
* use the pid of the target thread, not our pid.
*/
return (dynamorio_syscall(SYS_tgkill, 3, pid, tid, signum) == 0);
#endif
}
static bool
known_thread_signal(thread_record_t *tr, int signum)
{
#ifdef MACOS
ptr_int_t res;
if (tr->dcontext == NULL)
return FALSE;
res = dynamorio_syscall(SYS___pthread_kill, 2, tr->dcontext->thread_port, signum);
LOG(THREAD_GET, LOG_ALL, 3, "%s: signal %d to port %d => %ld\n", __FUNCTION__, signum,
tr->dcontext->thread_port, res);
return res == 0;
#else
return thread_signal(tr->pid, tr->id, signum);
#endif
}
void
os_thread_sleep(uint64 milliseconds)
{
#ifdef MACOS
semaphore_t sem = MACH_PORT_NULL;
int res;
#else
struct timespec remain;
int count = 0;
#endif
struct timespec req;
req.tv_sec = (milliseconds / 1000);
/* docs say can go up to 1000000000, but doesn't work on FC9 */
req.tv_nsec = (milliseconds % 1000) * 1000000;
#ifdef MACOS
if (sem == MACH_PORT_NULL) {
DEBUG_DECLARE(kern_return_t res =)
semaphore_create(mach_task_self(), &sem, SYNC_POLICY_FIFO, 0);
ASSERT(res == KERN_SUCCESS);
}
res =
dynamorio_syscall(SYSNUM_NO_CANCEL(SYS___semwait_signal), 6, sem, MACH_PORT_NULL,
1, 1, (int64_t)req.tv_sec, (int32_t)req.tv_nsec);
if (res == -EINTR) {
/* FIXME i#58: figure out how much time elapsed and re-wait */
}
#else
/* FIXME: if we need accurate sleeps in presence of itimers we should
* be using SYS_clock_nanosleep w/ an absolute time instead of relative
*/
while (dynamorio_syscall(SYS_nanosleep, 2, &req, &remain) == -EINTR) {
/* interrupted by signal or something: finish the interval */
ASSERT_CURIOSITY_ONCE(remain.tv_sec <= req.tv_sec &&
(remain.tv_sec < req.tv_sec ||
/* there seems to be some rounding, and sometimes
* remain nsec > req nsec (I've seen 40K diff)
*/
req.tv_nsec - remain.tv_nsec < 100000 ||
req.tv_nsec - remain.tv_nsec > -100000));
/* not unusual for client threads to use itimers and have their run
* routine sleep forever
*/
if (count++ > 3 && !IS_CLIENT_THREAD(get_thread_private_dcontext())) {
ASSERT_NOT_REACHED();
break; /* paranoid */
}
req = remain;
}
#endif
}
bool
os_thread_suspend(thread_record_t *tr)
{
os_thread_data_t *ostd = (os_thread_data_t *)tr->dcontext->os_field;
ASSERT(ostd != NULL);
/* See synch comments in os_thread_resume: the mutex held there
* prevents prematurely sending a re-suspend signal.
*/
d_r_mutex_lock(&ostd->suspend_lock);
ostd->suspend_count++;
ASSERT(ostd->suspend_count > 0);
/* If already suspended, do not send another signal. However, we do
* need to ensure the target is suspended in case of a race, so we can't
* just return.
*/
if (ostd->suspend_count == 1) {
/* PR 212090: we use a custom signal handler to suspend. We wait
* here until the target reaches the suspend point, and leave it
* up to the caller to check whether it is a safe suspend point,
* to match Windows behavior.
*/
ASSERT(ksynch_get_value(&ostd->suspended) == 0);
if (!known_thread_signal(tr, SUSPEND_SIGNAL)) {
ostd->suspend_count--;
d_r_mutex_unlock(&ostd->suspend_lock);
return false;
}
}
/* we can unlock before the wait loop b/c we're using a separate "resumed"
* int and os_thread_resume holds the lock across its wait. this way a resume
* can proceed as soon as the suspended thread is suspended, before the
* suspending thread gets scheduled again.
*/
d_r_mutex_unlock(&ostd->suspend_lock);
while (ksynch_get_value(&ostd->suspended) == 0) {
/* For Linux, waits only if the suspended flag is not set as 1. Return value
* doesn't matter because the flag will be re-checked.
*/
/* We time out and assert in debug build to provide better diagnostics than a
* silent hang. We can't safely return false b/c the synch model here
* assumes there will not be a retry until the target reaches the suspend
* point. Xref i#2779.
*/
#define SUSPEND_DEBUG_TIMEOUT_MS 5000
if (ksynch_wait(&ostd->suspended, 0, SUSPEND_DEBUG_TIMEOUT_MS) == -ETIMEDOUT) {
ASSERT_CURIOSITY(false && "failed to suspend thread in 5s");
}
if (ksynch_get_value(&ostd->suspended) == 0) {
/* If it still has to wait, give up the cpu. */
os_thread_yield();
}
}
return true;
}
bool
os_thread_resume(thread_record_t *tr)
{
os_thread_data_t *ostd = (os_thread_data_t *)tr->dcontext->os_field;
ASSERT(ostd != NULL);
/* This mutex prevents sending a re-suspend signal before the target
* reaches a safe post-resume point from a first suspend signal.
* Given that race, we can't just use atomic_add_exchange_int +
* atomic_dec_becomes_zero on suspend_count.
*/
d_r_mutex_lock(&ostd->suspend_lock);
ASSERT(ostd->suspend_count > 0);
/* PR 479750: if do get here and target is not suspended then abort
* to avoid possible deadlocks
*/
if (ostd->suspend_count == 0) {
d_r_mutex_unlock(&ostd->suspend_lock);
return true; /* the thread is "resumed", so success status */
}
ostd->suspend_count--;
if (ostd->suspend_count > 0) {
d_r_mutex_unlock(&ostd->suspend_lock);
return true; /* still suspended */
}
ksynch_set_value(&ostd->wakeup, 1);
ksynch_wake(&ostd->wakeup);
while (ksynch_get_value(&ostd->resumed) == 0) {
/* For Linux, waits only if the resumed flag is not set as 1. Return value
* doesn't matter because the flag will be re-checked.
*/
ksynch_wait(&ostd->resumed, 0, 0);
if (ksynch_get_value(&ostd->resumed) == 0) {
/* If it still has to wait, give up the cpu. */
os_thread_yield();
}
}
ksynch_set_value(&ostd->wakeup, 0);
ksynch_set_value(&ostd->resumed, 0);
d_r_mutex_unlock(&ostd->suspend_lock);
return true;
}
bool
os_thread_terminate(thread_record_t *tr)
{
/* PR 297902: for NPTL sending SIGKILL will take down the whole group:
* so instead we send SIGUSR2 and have a flag set telling
* target thread to execute SYS_exit
*/
os_thread_data_t *ostd = (os_thread_data_t *)tr->dcontext->os_field;
ASSERT(ostd != NULL);
ostd->terminate = true;
/* Even if the thread is currently suspended, it's simpler to send it
* another signal than to resume it.
*/
return known_thread_signal(tr, SUSPEND_SIGNAL);
}
bool
is_thread_terminated(dcontext_t *dcontext)
{
os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field;
ASSERT(ostd != NULL);
return (ksynch_get_value(&ostd->terminated) == 1);
}
static void
os_wait_thread_futex(KSYNCH_TYPE *var)
{
while (ksynch_get_value(var) == 0) {
/* On Linux, waits only if var is not set as 1. Return value
* doesn't matter because var will be re-checked.
*/
ksynch_wait(var, 0, 0);
if (ksynch_get_value(var) == 0) {
/* If it still has to wait, give up the cpu. */
os_thread_yield();
}
}
}
void
os_wait_thread_terminated(dcontext_t *dcontext)
{
os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field;
ASSERT(ostd != NULL);
os_wait_thread_futex(&ostd->terminated);
}
void
os_wait_thread_detached(dcontext_t *dcontext)
{
os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field;
ASSERT(ostd != NULL);
os_wait_thread_futex(&ostd->detached);
}
void
os_signal_thread_detach(dcontext_t *dcontext)
{
os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field;
ASSERT(ostd != NULL);
ostd->do_detach = true;
}
bool
thread_get_mcontext(thread_record_t *tr, priv_mcontext_t *mc)
{
/* PR 212090: only works when target is suspended by us, and
* we then take the signal context
*/
os_thread_data_t *ostd = (os_thread_data_t *)tr->dcontext->os_field;
ASSERT(ostd != NULL);
ASSERT(ostd->suspend_count > 0);
if (ostd->suspend_count == 0)
return false;
ASSERT(ostd->suspended_sigcxt != NULL);
sigcontext_to_mcontext(mc, ostd->suspended_sigcxt, DR_MC_ALL);
IF_ARM(dr_set_isa_mode(tr->dcontext, get_sigcontext_isa_mode(ostd->suspended_sigcxt),
NULL));
return true;
}
bool
thread_set_mcontext(thread_record_t *tr, priv_mcontext_t *mc)
{
/* PR 212090: only works when target is suspended by us, and
* we then replace the signal context
*/
os_thread_data_t *ostd = (os_thread_data_t *)tr->dcontext->os_field;
ASSERT(ostd != NULL);
ASSERT(ostd->suspend_count > 0);
if (ostd->suspend_count == 0)
return false;
ASSERT(ostd->suspended_sigcxt != NULL);
mcontext_to_sigcontext(ostd->suspended_sigcxt, mc, DR_MC_ALL);
IF_ARM(
set_sigcontext_isa_mode(ostd->suspended_sigcxt, dr_get_isa_mode(tr->dcontext)));
return true;
}
/* Only one of mc and dmc can be non-NULL. */
bool
os_context_to_mcontext(dr_mcontext_t *dmc, priv_mcontext_t *mc, os_cxt_ptr_t osc)
{
if (dmc != NULL)
sigcontext_to_mcontext(dr_mcontext_as_priv_mcontext(dmc), &osc, dmc->flags);
else if (mc != NULL)
sigcontext_to_mcontext(mc, &osc, DR_MC_ALL);
else
return false;
return true;
}
/* Only one of mc and dmc can be non-NULL. */
bool
mcontext_to_os_context(os_cxt_ptr_t osc, dr_mcontext_t *dmc, priv_mcontext_t *mc)
{
if (dmc != NULL)
mcontext_to_sigcontext(&osc, dr_mcontext_as_priv_mcontext(dmc), dmc->flags);
else if (mc != NULL)
mcontext_to_sigcontext(&osc, mc, DR_MC_ALL);
else
return false;
return true;
}
bool
is_thread_currently_native(thread_record_t *tr)
{
return (!tr->under_dynamo_control ||
/* start/stop doesn't change under_dynamo_control and has its own field */
(tr->dcontext != NULL && tr->dcontext->currently_stopped));
}
#ifdef LINUX /* XXX i#58: just until we have Mac support */
static void
client_thread_run(void)
{
void (*func)(void *param);
dcontext_t *dcontext;
byte *xsp;
GET_STACK_PTR(xsp);
void *crec = get_clone_record((reg_t)xsp);
/* i#2335: we support setup separate from start, and we want to allow a client
* to create a client thread during init, but we do not support that thread
* executing until the app has started (b/c we have no signal handlers in place).
*/
/* i#3973: in addition to _executing_ a client thread before the
* app has started, if we even create the thread before
* dynamo_initialized is set, we will not copy tls blocks. By
* waiting for the app to be started before dynamo_thread_init is
* called, we ensure this race condition can never happen, since
* dynamo_initialized will always be set before the app is started.
*/
wait_for_event(dr_app_started, 0);
IF_DEBUG(int rc =)
dynamo_thread_init(get_clone_record_dstack(crec), NULL, crec, true);
ASSERT(rc != -1); /* this better be a new thread */
dcontext = get_thread_private_dcontext();
ASSERT(dcontext != NULL);
LOG(THREAD, LOG_ALL, 1, "\n***** CLIENT THREAD %d *****\n\n", d_r_get_thread_id());
/* We stored the func and args in particular clone record fields */
func = (void (*)(void *param))dcontext->next_tag;
/* Reset any inherited mask (i#2337). */
signal_swap_mask(dcontext, false /*to DR*/);
void *arg = (void *)get_clone_record_app_xsp(crec);
LOG(THREAD, LOG_ALL, 1, "func=" PFX ", arg=" PFX "\n", func, arg);
(*func)(arg);
LOG(THREAD, LOG_ALL, 1, "\n***** CLIENT THREAD %d EXITING *****\n\n",
d_r_get_thread_id());
block_cleanup_and_terminate(dcontext, SYS_exit, 0, 0, false /*just thread*/,
IF_MACOS_ELSE(dcontext->thread_port, 0), 0);
}
#endif
/* i#41/PR 222812: client threads
* * thread must have dcontext since many API routines require one and we
* don't expose GLOBAL_DCONTEXT (xref PR 243008, PR 216936, PR 536058)
* * reversed the old design of not using dstack (partly b/c want dcontext)
* and I'm using the same parent-creates-dstack and clone_record_t design
* to create linux threads: dstack should be big enough for client threads
* (xref PR 202669)
* * reversed the old design of explicit dr_terminate_client_thread(): now
* the thread is auto-terminated and stack cleaned up on return from run
* function
*/
DR_API bool
dr_create_client_thread(void (*func)(void *param), void *arg)
{
#ifdef LINUX
dcontext_t *dcontext = get_thread_private_dcontext();
byte *xsp;
/* We do not pass SIGCHLD since don't want signal to parent and don't support
* waiting on child.
* We do not pass CLONE_THREAD so that the new thread is in its own thread
* group, allowing it to have private itimers and not receive any signals
* sent to the app's thread groups. It also makes the thread not show up in
* the thread list for the app, making it more invisible.
*/
uint flags = CLONE_VM | CLONE_FS | CLONE_FILES |
CLONE_SIGHAND
/* CLONE_THREAD required. Signals and itimers are private anyway. */
IF_VMX86(| (os_in_vmkernel_userworld() ? CLONE_THREAD : 0));
pre_second_thread();
/* need to share signal handler table, prior to creating clone record */
handle_clone(dcontext, flags);
ATOMIC_INC(int, uninit_thread_count);
void *crec = create_clone_record(dcontext, (reg_t *)&xsp, NULL, NULL);
/* make sure client_thread_run can get the func and arg, and that
* signal_thread_inherit gets the right syscall info
*/
set_clone_record_fields(crec, (reg_t)arg, (app_pc)func, SYS_clone, flags);
LOG(THREAD, LOG_ALL, 1, "dr_create_client_thread xsp=" PFX " dstack=" PFX "\n", xsp,
get_clone_record_dstack(crec));
/* i#501 switch to app's tls before creating client thread.
* i#3526 switch DR's tls to an invalid one before cloning, and switch lib_tls
* to the app's.
*/
os_clone_pre(dcontext);
# ifdef AARCHXX
/* We need to invalidate DR's TLS to avoid get_thread_private_dcontext() finding one
* and hitting asserts in dynamo_thread_init lock calls -- yet we don't want to for
* app threads, so we're doing this here and not in os_clone_pre().
* XXX: Find a way to put this in os_clone_* to simplify the code?
*/
void *tls = (void *)read_thread_register(LIB_SEG_TLS);
write_thread_register(NULL);
# endif
thread_id_t newpid = dynamorio_clone(flags, xsp, NULL, NULL, NULL, client_thread_run);
/* i#3526 switch DR's tls back to the original one before cloning. */
os_clone_post(dcontext);
# ifdef AARCHXX
write_thread_register(tls);
# endif
/* i#501 the app's tls was switched in os_clone_pre. */
if (INTERNAL_OPTION(private_loader))
os_switch_lib_tls(dcontext, false /*to dr*/);
if (newpid < 0) {
LOG(THREAD, LOG_ALL, 1, "client thread creation failed: %d\n", newpid);
return false;
} else if (newpid == 0) {
/* dynamorio_clone() should have called client_thread_run directly */
ASSERT_NOT_REACHED();
return false;
}
return true;
#else
ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#58: implement on Mac */
return false;
#endif
}
int
get_num_processors(void)
{
static uint num_cpu = 0; /* cached value */
if (!num_cpu) {
#ifdef MACOS
DEBUG_DECLARE(bool ok =)
sysctl_query(CTL_HW, HW_NCPU, &num_cpu, sizeof(num_cpu));
ASSERT(ok);
#else
/* We used to use get_nprocs_conf, but that's in libc, so now we just
* look at the /sys filesystem ourselves, which is what glibc does.
*/
uint local_num_cpus = 0;
file_t cpu_dir = os_open_directory("/sys/devices/system/cpu", OS_OPEN_READ);
dir_iterator_t iter;
ASSERT(cpu_dir != INVALID_FILE &&
"/sys must be mounted: mount -t sysfs sysfs /sys");
os_dir_iterator_start(&iter, cpu_dir);
while (os_dir_iterator_next(&iter)) {
int dummy_num;
if (sscanf(iter.name, "cpu%d", &dummy_num) == 1)
local_num_cpus++;
}
os_close(cpu_dir);
num_cpu = local_num_cpus;
#endif
ASSERT(num_cpu);
}
return num_cpu;
}
/* i#46: To support -no_private_loader, we have to call the dlfcn family of
* routines in libdl.so. When we do early injection, there is no loader to
* resolve these imports, so they will crash. Early injection is incompatible
* with -no_private_loader, so this should never happen.
*/
shlib_handle_t
load_shared_library(const char *name, bool reachable)
{
#ifdef STATIC_LIBRARY
if (os_files_same(name, get_application_name())) {
/* The private loader falls back to dlsym() and friends for modules it
* doesn't recognize, so this works without disabling the private loader.
*/
return dlopen(NULL, RTLD_LAZY); /* Gets a handle to the exe. */
}
#endif
/* We call locate_and_load_private_library() to support searching for
* a pathless name.
*/
if (INTERNAL_OPTION(private_loader))
return (shlib_handle_t)locate_and_load_private_library(name, reachable);
#if defined(STATIC_LIBRARY) || defined(MACOS)
ASSERT(!DYNAMO_OPTION(early_inject));
return dlopen(name, RTLD_LAZY);
#else
/* -no_private_loader is no longer supported in our default builds.
* If we want it for hybrid mode we should add a new build param and include
* the libdl calls here under that param.
*/
ASSERT_NOT_REACHED();
return NULL;
#endif
}
shlib_routine_ptr_t
lookup_library_routine(shlib_handle_t lib, const char *name)
{
if (INTERNAL_OPTION(private_loader)) {
return (shlib_routine_ptr_t)get_private_library_address((app_pc)lib, name);
}
#if defined(STATIC_LIBRARY) || defined(MACOS)
ASSERT(!DYNAMO_OPTION(early_inject));
return dlsym(lib, name);
#else
ASSERT_NOT_REACHED(); /* -no_private_loader is no longer supported: see above */
return NULL;
#endif
}
void
unload_shared_library(shlib_handle_t lib)
{
if (INTERNAL_OPTION(private_loader)) {
unload_private_library(lib);
} else {
#if defined(STATIC_LIBRARY) || defined(MACOS)
ASSERT(!DYNAMO_OPTION(early_inject));
if (!DYNAMO_OPTION(avoid_dlclose)) {
dlclose(lib);
}
#else
ASSERT_NOT_REACHED(); /* -no_private_loader is no longer supported: see above */
#endif
}
}
void
shared_library_error(char *buf, int maxlen)
{
const char *err;
if (INTERNAL_OPTION(private_loader)) {
err = "error in private loader";
} else {
#if defined(STATIC_LIBRARY) || defined(MACOS)
ASSERT(!DYNAMO_OPTION(early_inject));
err = dlerror();
if (err == NULL) {
err = "dlerror returned NULL";
}
#else
ASSERT_NOT_REACHED(); /* -no_private_loader is no longer supported */
err = "unknown error";
#endif
}
strncpy(buf, err, maxlen - 1);
buf[maxlen - 1] = '\0'; /* strncpy won't put on trailing null if maxes out */
}
/* addr is any pointer known to lie within the library.
* for linux, one of addr or name is needed; for windows, neither is needed.
*/
bool
shared_library_bounds(IN shlib_handle_t lib, IN byte *addr, IN const char *name,
OUT byte **start, OUT byte **end)
{
ASSERT(start != NULL && end != NULL);
/* PR 366195: dlopen() handle truly is opaque, so we have to use either
* addr or name
*/
ASSERT(addr != NULL || name != NULL);
*start = addr;
if (INTERNAL_OPTION(private_loader)) {
privmod_t *mod;
/* look for private library first */
acquire_recursive_lock(&privload_lock);
mod = privload_lookup_by_base((app_pc)lib);
if (name != NULL && mod == NULL)
mod = privload_lookup(name);
if (mod != NULL && !mod->externally_loaded) {
*start = mod->base;
if (end != NULL)
*end = mod->base + mod->size;
release_recursive_lock(&privload_lock);
return true;
}
release_recursive_lock(&privload_lock);
}
return (memquery_library_bounds(name, start, end, NULL, 0, NULL, 0) > 0);
}
static int
fcntl_syscall(int fd, int cmd, long arg)
{
return dynamorio_syscall(SYSNUM_NO_CANCEL(SYS_fcntl), 3, fd, cmd, arg);
}
/* dups curfd to a private fd.
* returns -1 if unsuccessful.
*/
file_t
fd_priv_dup(file_t curfd)
{
file_t newfd = -1;
if (DYNAMO_OPTION(steal_fds) > 0) {
/* RLIMIT_NOFILES is 1 greater than max and F_DUPFD starts at given value */
/* XXX: if > linux 2.6.24, can use F_DUPFD_CLOEXEC to avoid later call:
* so how do we tell if the flag is supported? try calling once at init?
*/
newfd = fcntl_syscall(curfd, F_DUPFD, min_dr_fd);
if (newfd < 0) {
/* We probably ran out of fds, esp if debug build and there are
* lots of threads. Should we track how many we've given out to
* avoid a failed syscall every time after?
*/
SYSLOG_INTERNAL_WARNING_ONCE("ran out of stolen fd space");
/* Try again but this time in the app space, somewhere high up
* to avoid issues like tcsh assuming it can own fds 3-5 for
* piping std{in,out,err} (xref the old -open_tcsh_fds option).
*/
newfd = fcntl_syscall(curfd, F_DUPFD, min_dr_fd / 2);
}
}
return newfd;
}
bool
fd_mark_close_on_exec(file_t fd)
{
/* we assume FD_CLOEXEC is the only flag and don't bother w/ F_GETFD */
if (fcntl_syscall(fd, F_SETFD, FD_CLOEXEC) != 0) {
SYSLOG_INTERNAL_WARNING("unable to mark file %d as close-on-exec", fd);
return false;
}
return true;
}
void
fd_table_add(file_t fd, uint flags)
{
if (fd_table != NULL) {
TABLE_RWLOCK(fd_table, write, lock);
DODEBUG({
/* i#1010: If the fd is already in the table, chances are it's a
* stale logfile fd left behind by a vforked or cloned child that
* called execve. Avoid an assert if that happens.
*/
bool present = generic_hash_remove(GLOBAL_DCONTEXT, fd_table, (ptr_uint_t)fd);
ASSERT_CURIOSITY_ONCE(!present && "stale fd not cleaned up");
});
generic_hash_add(GLOBAL_DCONTEXT, fd_table, (ptr_uint_t)fd,
/* store the flags, w/ a set bit to ensure not 0 */
(void *)(ptr_uint_t)(flags | OS_OPEN_RESERVED));
TABLE_RWLOCK(fd_table, write, unlock);
} else {
#ifdef DEBUG
num_fd_add_pre_heap++;
/* we add main_logfile in d_r_os_init() */
ASSERT(num_fd_add_pre_heap == 1 && "only main_logfile should come here");
#endif
}
}
static bool
fd_is_dr_owned(file_t fd)
{
ptr_uint_t flags;
ASSERT(fd_table != NULL);
TABLE_RWLOCK(fd_table, read, lock);
flags = (ptr_uint_t)generic_hash_lookup(GLOBAL_DCONTEXT, fd_table, (ptr_uint_t)fd);
TABLE_RWLOCK(fd_table, read, unlock);
return (flags != 0);
}
static bool
fd_is_in_private_range(file_t fd)
{
return (DYNAMO_OPTION(steal_fds) > 0 && min_dr_fd > 0 && fd >= min_dr_fd);
}
file_t
os_open_protected(const char *fname, int os_open_flags)
{
file_t dup;
file_t res = os_open(fname, os_open_flags);
if (res < 0)
return res;
/* we could have os_open() always switch to a private fd but it's probably
* not worth the extra syscall for temporary open/close sequences so we
* only use it for persistent files
*/
dup = fd_priv_dup(res);
if (dup >= 0) {
close_syscall(res);
res = dup;
fd_mark_close_on_exec(res);
} /* else just keep original */
/* ditto here, plus for things like config.c opening files we can't handle
* grabbing locks and often don't have heap available so no fd_table
*/
fd_table_add(res, os_open_flags);
return res;
}
void
os_close_protected(file_t f)
{
ASSERT(fd_table != NULL || dynamo_exited);
if (fd_table != NULL) {
TABLE_RWLOCK(fd_table, write, lock);
generic_hash_remove(GLOBAL_DCONTEXT, fd_table, (ptr_uint_t)f);
TABLE_RWLOCK(fd_table, write, unlock);
}
os_close(f);
}
bool
os_get_current_dir(char *buf, size_t bufsz)
{
#ifdef MACOS
static char noheap_buf[MAXPATHLEN];
bool res = false;
file_t fd = os_open(".", OS_OPEN_READ);
int len;
/* F_GETPATH assumes a buffer of size MAXPATHLEN */
char *fcntl_buf;
if (dynamo_heap_initialized)
fcntl_buf = global_heap_alloc(MAXPATHLEN HEAPACCT(ACCT_OTHER));
else
fcntl_buf = noheap_buf;
if (fd == INVALID_FILE)
goto cwd_error;
if (fcntl_syscall(fd, F_GETPATH, (long)fcntl_buf) != 0)
goto cwd_error;
len = snprintf(buf, bufsz, "%s", fcntl_buf);
buf[bufsz - 1] = '\0';
return (len > 0 && len < bufsz);
cwd_error:
if (dynamo_heap_initialized)
global_heap_free(fcntl_buf, MAXPATHLEN HEAPACCT(ACCT_OTHER));
os_close(fd);
return res;
#else
return (dynamorio_syscall(SYS_getcwd, 2, buf, bufsz) > 0);
#endif
}
ssize_t
os_write(file_t f, const void *buf, size_t count)
{
return write_syscall(f, buf, count);
}
/* There are enough differences vs the shared drlibc_os.c version that we override
* it here. We use a loop to ensure reachability for the core.
*/
byte *
os_map_file(file_t f, size_t *size INOUT, uint64 offs, app_pc addr, uint prot,
map_flags_t map_flags)
{
int flags;
byte *map = NULL;
#if defined(X64)
bool loop = false;
uint iters = 0;
# define MAX_MMAP_LOOP_ITERS 100
byte *region_start = NULL, *region_end = NULL;
#else
uint pg_offs;
ASSERT_TRUNCATE(pg_offs, uint, offs / PAGE_SIZE);
pg_offs = (uint)(offs / PAGE_SIZE);
#endif
#ifdef VMX86_SERVER
flags = MAP_PRIVATE; /* MAP_SHARED not supported yet */
#else
flags = TEST(MAP_FILE_COPY_ON_WRITE, map_flags) ? MAP_PRIVATE : MAP_SHARED;
#endif
#if defined(X64)
/* Allocate memory from reachable range for image: or anything (pcache
* in particular): for low 4GB, easiest to just pass MAP_32BIT (which is
* low 2GB, but good enough).
*/
if (DYNAMO_OPTION(heap_in_lower_4GB) &&
!TESTANY(MAP_FILE_FIXED | MAP_FILE_APP, map_flags))
flags |= MAP_32BIT;
#endif
/* Allows memory request instead of mapping a file,
* so we can request memory from a particular address with fixed argument */
if (f == -1)
flags |= MAP_ANONYMOUS;
if (TEST(MAP_FILE_FIXED, map_flags))
flags |= MAP_FIXED;
#if defined(X64)
if (!TEST(MAP_32BIT, flags) && TEST(MAP_FILE_REACHABLE, map_flags)) {
vmcode_get_reachable_region(®ion_start, ®ion_end);
/* addr need not be NULL: we'll use it if it's in the region */
ASSERT(!TEST(MAP_FILE_FIXED, map_flags));
/* Loop to handle races */
loop = true;
}
if ((!TEST(MAP_32BIT, flags) && TEST(MAP_FILE_REACHABLE, map_flags) &&
(is_vmm_reserved_address(addr, *size, NULL, NULL) ||
/* Try to honor a library's preferred address. This does open up a race
* window during attach where another thread could take this spot,
* and with this current code we'll never go back and try to get VMM
* memory. We live with that as being rare rather than complicate the code.
*/
!rel32_reachable_from_current_vmcode(addr))) ||
(TEST(MAP_FILE_FIXED, map_flags) && !TEST(MAP_FILE_VMM_COMMIT, map_flags) &&
is_vmm_reserved_address(addr, *size, NULL, NULL))) {
if (DYNAMO_OPTION(vm_reserve)) {
/* Try to get space inside the vmcode reservation. */
map = heap_reserve_for_external_mapping(addr, *size,
VMM_SPECIAL_MMAP | VMM_REACHABLE);
if (map != NULL) {
addr = map;
flags |= MAP_FIXED;
}
}
}
while (!loop ||
(addr != NULL && addr >= region_start && addr + *size <= region_end) ||
find_free_memory_in_region(region_start, region_end, *size, &addr, NULL)) {
#endif
map = mmap_syscall(addr, *size, memprot_to_osprot(prot), flags, f,
/* x86 Linux mmap uses offset in pages */
IF_LINUX_ELSE(IF_X64_ELSE(offs, pg_offs), offs));
if (!mmap_syscall_succeeded(map)) {
LOG(THREAD_GET, LOG_SYSCALLS, 2, "%s failed: " PIFX "\n", __func__, map);
map = NULL;
}
#if defined(X64)
else if (loop && (map < region_start || map + *size > region_end)) {
/* Try again: probably a race. Hopefully our notion of "there's a free
* region big enough" matches the kernel's, else we'll loop forever
* (which we try to catch w/ a max iters count).
*/
munmap_syscall(map, *size);
map = NULL;
} else
break;
if (!loop)
break;
if (++iters > MAX_MMAP_LOOP_ITERS) {
ASSERT_NOT_REACHED();
map = NULL;
break;
}
addr = NULL; /* pick a new one */
}
#endif
return map;
}
bool
os_unmap_file(byte *map, size_t size)
{
if (DYNAMO_OPTION(vm_reserve) && is_vmm_reserved_address(map, size, NULL, NULL)) {
/* XXX i#3570: We'd prefer to have the VMM perform this to ensure it matches
* how it originally reserved the memory. To do that would we expose a way
* to ask for MAP_FIXED in os_heap_reserve*()?
*/
byte *addr = mmap_syscall(map, size, PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
if (!mmap_syscall_succeeded(addr))
return false;
return heap_unreserve_for_external_mapping(map, size,
VMM_SPECIAL_MMAP | VMM_REACHABLE);
}
long res = munmap_syscall(map, size);
return (res == 0);
}
#ifdef LINUX
static void
os_get_memory_file_shm_path(const char *name, OUT char *buf, size_t bufsz)
{
snprintf(buf, bufsz, "/dev/shm/%s.%d", name, get_process_id());
buf[bufsz - 1] = '\0';
}
#endif
file_t
os_create_memory_file(const char *name, size_t size)
{
#ifdef LINUX
char path[MAXIMUM_PATH];
file_t fd;
/* We need an in-memory file. We prefer the new memfd_create over /dev/shm (it
* has no name conflict issues, stale files left around on a crash, or
* reliance on tmpfs).
*/
# ifdef SYS_memfd_create
snprintf(path, BUFFER_SIZE_ELEMENTS(path), "/%s.%d", name, get_process_id());
NULL_TERMINATE_BUFFER(path);
fd = dynamorio_syscall(SYS_memfd_create, 2, path, 0);
# else
fd = -ENOSYS;
# endif
if (fd == -ENOSYS) {
/* Fall back on /dev/shm. */
os_get_memory_file_shm_path(name, path, BUFFER_SIZE_ELEMENTS(path));
NULL_TERMINATE_BUFFER(path);
fd = open_syscall(path, O_CREAT | O_EXCL | O_RDWR, S_IRUSR | S_IWUSR);
if (fd == -EEXIST) {
/* We assume a stale file from some prior crash. */
SYSLOG_INTERNAL_WARNING("Removing presumed-stale %s", path);
os_delete_file(path);
fd = open_syscall(path, O_CREAT | O_EXCL | O_RDWR, S_IRUSR | S_IWUSR);
}
}
if (fd < 0)
return INVALID_FILE;
/* Work around an IMA (kernel optional feature "Integrity Measurement
* Architecture") slowdown where the first executable mmap causes a hash
* to be computed of the entire file size, which can take 5 or 10
* *seconds* for gigabyte files. This is only done once, so if we
* trigger it while the file is tiny, we can avoid the delay later.
*/
byte *temp_map = mmap_syscall(0, PAGE_SIZE, PROT_READ | PROT_EXEC, MAP_SHARED, fd, 0);
if (mmap_syscall_succeeded(temp_map))
munmap_syscall(temp_map, PAGE_SIZE);
/* Else, not fatal: this may not be destined for a later executable mapping anyway. */
if (dynamorio_syscall(SYS_ftruncate, 2, fd, size) < 0) {
close_syscall(fd);
return INVALID_FILE;
}
file_t priv_fd = fd_priv_dup(fd);
close_syscall(fd); /* Close the old descriptor on success *and* error. */
if (priv_fd < 0) {
return INVALID_FILE;
}
fd = priv_fd;
fd_mark_close_on_exec(fd); /* We could use MFD_CLOEXEC for memfd_create. */
return fd;
#else
ASSERT_NOT_IMPLEMENTED(false && "i#3556 NYI for Mac");
return INVALID_FILE;
#endif
}
void
os_delete_memory_file(const char *name, file_t fd)
{
#ifdef LINUX
/* There is no need to delete a memfd_create path, but if we used shm we need
* to clean it up. We blindly do this rather than trying to record whether
* we created this file.
*/
char path[MAXIMUM_PATH];
os_get_memory_file_shm_path(name, path, BUFFER_SIZE_ELEMENTS(path));
NULL_TERMINATE_BUFFER(path);
os_delete_file(path);
close_syscall(fd);
#else
ASSERT_NOT_IMPLEMENTED(false && "i#3556 NYI for Mac");
#endif
}
bool
os_get_disk_free_space(/*IN*/ file_t file_handle,
/*OUT*/ uint64 *AvailableQuotaBytes /*OPTIONAL*/,
/*OUT*/ uint64 *TotalQuotaBytes /*OPTIONAL*/,
/*OUT*/ uint64 *TotalVolumeBytes /*OPTIONAL*/)
{
/* libc struct seems to match kernel's */
struct statfs stat;
ptr_int_t res = dynamorio_syscall(SYS_fstatfs, 2, file_handle, &stat);
if (res != 0) {
LOG(THREAD_GET, LOG_SYSCALLS, 2, "%s failed: " PIFX "\n", __func__, res);
return false;
}
LOG(GLOBAL, LOG_STATS, 3,
"os_get_disk_free_space: avail=" SZFMT ", free=" SZFMT ", bsize=" SZFMT "\n",
stat.f_bavail, stat.f_bfree, stat.f_bsize);
if (AvailableQuotaBytes != NULL)
*AvailableQuotaBytes = ((uint64)stat.f_bavail * stat.f_bsize);
/* no support for quotas */
if (TotalQuotaBytes != NULL)
*TotalQuotaBytes = ((uint64)stat.f_bavail * stat.f_bsize);
if (TotalVolumeBytes != NULL) /* despite name this is how much is free */
*TotalVolumeBytes = ((uint64)stat.f_bfree * stat.f_bsize);
return true;
}
#ifdef LINUX
static bool
symlink_is_self_exe(const char *path)
{
/* Look for "/proc/%d/exe" where %d exists in /proc/self/task/%d,
* or "/proc/self/exe". Rule out the exe link for another process
* (though it could also be under DR we have no simple way to obtain
* its actual app path).
*/
# define SELF_LEN_LEADER 6 /* "/proc/" */
# define SELF_LEN_TRAILER 4 /* "/exe" */
# define SELF_LEN_MAX 18
size_t len = strlen(path);
if (strcmp(path, "/proc/self/exe") == 0)
return true;
if (len < SELF_LEN_MAX && /* /proc/nnnnnn/exe */
strncmp(path, "/proc/", SELF_LEN_LEADER) == 0 &&
strncmp(path + len - SELF_LEN_TRAILER, "/exe", SELF_LEN_TRAILER) == 0) {
int pid;
if (sscanf(path + SELF_LEN_LEADER, "%d", &pid) == 1) {
char task[32];
snprintf(task, BUFFER_SIZE_ELEMENTS(task), "/proc/self/task/%d", pid);
NULL_TERMINATE_BUFFER(task);
return os_file_exists(task, true /*dir*/);
}
}
return false;
}
#endif
void
exit_process_syscall(long status)
{
/* We now assume SYS_exit_group is defined: not building on old machines,
* but will execute there. We try exit_group and if it fails we use exit.
*
* FIXME: if no exit_group, kill all other threads (==processes in same addr
* space) manually? Presumably we got here b/c at an unsafe point to do
* full exit? Or is that not true: what about dr_abort()?
*/
dynamorio_syscall(SYSNUM_EXIT_PROCESS, 1, status);
/* would assert that result is -ENOSYS but assert likely calls us => infinite loop */
exit_thread_syscall(status);
ASSERT_NOT_REACHED();
}
void
exit_thread_syscall(long status)
{
#ifdef MACOS
mach_port_t thread_port = dynamorio_mach_syscall(MACH_thread_self_trap, 0);
/* FIXME i#1403: on MacOS we fail to free the app's stack: we need to pass it to
* bsdthread_terminate.
*/
dynamorio_syscall(SYSNUM_EXIT_THREAD, 4, 0, 0, thread_port, 0);
#else
dynamorio_syscall(SYSNUM_EXIT_THREAD, 1, status);
#endif
}
/* FIXME: this one will not be easily internationalizable
yet it is easier to have a syslog based Unix implementation with real strings.
*/
void
os_syslog(syslog_event_type_t priority, uint message_id, uint substitutions_num,
va_list args)
{
int native_priority;
switch (priority) {
case SYSLOG_INFORMATION: native_priority = LOG_INFO; break;
case SYSLOG_WARNING: native_priority = LOG_WARNING; break;
case SYSLOG_CRITICAL: native_priority = LOG_CRIT; break;
case SYSLOG_ERROR: native_priority = LOG_ERR; break;
default: ASSERT_NOT_REACHED();
}
/* can amount to passing a format string (careful here) to vsyslog */
/* Never let user controlled data in the format string! */
ASSERT_NOT_IMPLEMENTED(false);
}
/* This is subject to races, but should only happen at init/attach when
* there should only be one live thread.
*/
static bool
safe_read_via_query(const void *base, size_t size, void *out_buf, size_t *bytes_read)
{
bool res = false;
size_t num_read = 0;
ASSERT(!fault_handling_initialized);
/* XXX: in today's init ordering, allmem will never be initialized when we come
* here, but we check it nevertheless to be general in case this routine is
* ever called at some later time
*/
if (IF_MEMQUERY_ELSE(false, memcache_initialized()))
res = is_readable_without_exception_internal(base, size, false /*use allmem*/);
else
res = is_readable_without_exception_query_os((void *)base, size);
if (res) {
memcpy(out_buf, base, size);
num_read = size;
}
if (bytes_read != NULL)
*bytes_read = num_read;
return res;
}
bool
safe_read_ex(const void *base, size_t size, void *out_buf, size_t *bytes_read)
{
STATS_INC(num_safe_reads);
/* XXX i#350: we'd like to always use safe_read_fast() and remove this extra
* call layer, but safe_read_fast() requires fault handling to be set up.
* We do set up an early signal handler in d_r_os_init(),
* but there is still be a window prior to that with no handler.
*/
if (!fault_handling_initialized) {
return safe_read_via_query(base, size, out_buf, bytes_read);
} else {
return safe_read_fast(base, size, out_buf, bytes_read);
}
}
bool
safe_read_if_fast(const void *base, size_t size, void *out_buf)
{
if (!fault_handling_initialized) {
memcpy(out_buf, base, size);
return true;
} else {
return safe_read_ex(base, size, out_buf, NULL);
}
}
/* FIXME - fold this together with safe_read_ex() (is a lot of places to update) */
bool
d_r_safe_read(const void *base, size_t size, void *out_buf)
{
return safe_read_ex(base, size, out_buf, NULL);
}
bool
safe_write_ex(void *base, size_t size, const void *in_buf, size_t *bytes_written)
{
return safe_write_try_except(base, size, in_buf, bytes_written);
}
/* is_readable_without_exception checks to see that all bytes with addresses
* from pc to pc+size-1 are readable and that reading from there won't
* generate an exception. if 'from_os' is true, check what the os thinks
* the prot bits are instead of using the all memory list.
*/
static bool
is_readable_without_exception_internal(const byte *pc, size_t size, bool query_os)
{
uint prot = MEMPROT_NONE;
byte *check_pc = (byte *)ALIGN_BACKWARD(pc, PAGE_SIZE);
if (size > ((byte *)POINTER_MAX - pc))
size = (byte *)POINTER_MAX - pc;
do {
bool rc = query_os ? get_memory_info_from_os(check_pc, NULL, NULL, &prot)
: get_memory_info(check_pc, NULL, NULL, &prot);
if (!rc || !TESTANY(MEMPROT_READ | MEMPROT_EXEC, prot))
return false;
if (POINTER_OVERFLOW_ON_ADD(check_pc, PAGE_SIZE))
break;
check_pc += PAGE_SIZE;
} while (check_pc < pc + size);
return true;
}
bool
is_readable_without_exception(const byte *pc, size_t size)
{
/* case 9745 / i#853: We've had problems with all_memory_areas not being
* accurate in the past. Parsing proc maps is too slow for some apps, so we
* use a runtime option.
*/
bool query_os = IF_MEMQUERY_ELSE(true, !DYNAMO_OPTION(use_all_memory_areas));
return is_readable_without_exception_internal(pc, size, query_os);
}
/* Identical to is_readable_without_exception except that the os is queried
* for info on the indicated region */
bool
is_readable_without_exception_query_os(byte *pc, size_t size)
{
return is_readable_without_exception_internal(pc, size, true);
}
bool
is_readable_without_exception_query_os_noblock(byte *pc, size_t size)
{
if (memquery_from_os_will_block())
return false;
return is_readable_without_exception_internal(pc, size, true);
}
bool
is_user_address(byte *pc)
{
/* FIXME: NYI */
/* note returning true will always skip the case 9022 logic on Linux */
return true;
}
/* change protections on memory region starting at pc of length length
* this does not update the all memory area info
*/
bool
os_set_protection(byte *pc, size_t length, uint prot /*MEMPROT_*/)
{
app_pc start_page = (app_pc)PAGE_START(pc);
uint num_bytes = ALIGN_FORWARD(length + (pc - start_page), PAGE_SIZE);
long res = 0;
uint flags = memprot_to_osprot(prot);
DOSTATS({
/* once on each side of prot, to get on right side of writability */
if (!TEST(PROT_WRITE, flags)) {
STATS_INC(protection_change_calls);
STATS_ADD(protection_change_pages, num_bytes / PAGE_SIZE);
}
});
res = mprotect_syscall((void *)start_page, num_bytes, flags);
if (res != 0)
return false;
LOG(THREAD_GET, LOG_VMAREAS, 3,
"change_prot(" PFX ", " PIFX ", %s) => "
"mprotect(" PFX ", " PIFX ", %d)==%d pages\n",
pc, length, memprot_string(prot), start_page, num_bytes, flags,
num_bytes / PAGE_SIZE);
DOSTATS({
/* once on each side of prot, to get on right side of writability */
if (TEST(PROT_WRITE, flags)) {
STATS_INC(protection_change_calls);
STATS_ADD(protection_change_pages, num_bytes / PAGE_SIZE);
}
});
return true;
}
/* change protections on memory region starting at pc of length length */
bool
set_protection(byte *pc, size_t length, uint prot /*MEMPROT_*/)
{
if (os_set_protection(pc, length, prot) == false)
return false;
#ifndef HAVE_MEMINFO_QUERY
else {
app_pc start_page = (app_pc)PAGE_START(pc);
uint num_bytes = ALIGN_FORWARD(length + (pc - start_page), PAGE_SIZE);
memcache_update_locked(start_page, start_page + num_bytes, prot,
-1 /*type unchanged*/, true /*exists*/);
}
#endif
return true;
}
/* change protections on memory region starting at pc of length length */
bool
change_protection(byte *pc, size_t length, bool writable)
{
if (writable)
return make_writable(pc, length);
else
make_unwritable(pc, length);
return true;
}
/* make pc's page writable */
bool
make_writable(byte *pc, size_t size)
{
long res;
app_pc start_page = (app_pc)PAGE_START(pc);
size_t prot_size = (size == 0) ? PAGE_SIZE : size;
uint prot = PROT_EXEC | PROT_READ | PROT_WRITE;
/* if can get current protection then keep old read/exec flags.
* this is crucial on modern linux kernels which refuse to mark stack +x.
*/
if (!is_in_dynamo_dll(pc) /*avoid allmem assert*/ &&
#ifdef STATIC_LIBRARY
/* FIXME i#975: is_in_dynamo_dll() is always false for STATIC_LIBRARY,
* but we can't call get_memory_info() until allmem is initialized. Our
* uses before then are for patching x86.asm, which is OK.
*/
IF_NO_MEMQUERY(memcache_initialized() &&)
#endif
get_memory_info(pc, NULL, NULL, &prot))
prot |= PROT_WRITE;
ASSERT(start_page == pc && ALIGN_FORWARD(size, PAGE_SIZE) == size);
res = mprotect_syscall((void *)start_page, prot_size, prot);
LOG(THREAD_GET, LOG_VMAREAS, 3, "make_writable: pc " PFX " -> " PFX "-" PFX " %d\n",
pc, start_page, start_page + prot_size, res);
ASSERT(res == 0);
if (res != 0)
return false;
STATS_INC(protection_change_calls);
STATS_ADD(protection_change_pages, size / PAGE_SIZE);
#ifndef HAVE_MEMINFO_QUERY
/* update all_memory_areas list with the protection change */
if (memcache_initialized()) {
memcache_update_locked(start_page, start_page + prot_size,
osprot_to_memprot(prot), -1 /*type unchanged*/,
true /*exists*/);
}
#endif
return true;
}
/* like make_writable but adds COW */
bool
make_copy_on_writable(byte *pc, size_t size)
{
/* FIXME: for current usage this should be fine */
return make_writable(pc, size);
}
/* make pc's page unwritable */
void
make_unwritable(byte *pc, size_t size)
{
long res;
app_pc start_page = (app_pc)PAGE_START(pc);
size_t prot_size = (size == 0) ? PAGE_SIZE : size;
uint prot = PROT_EXEC | PROT_READ;
/* if can get current protection then keep old read/exec flags.
* this is crucial on modern linux kernels which refuse to mark stack +x.
*/
if (!is_in_dynamo_dll(pc) /*avoid allmem assert*/ &&
#ifdef STATIC_LIBRARY
/* FIXME i#975: is_in_dynamo_dll() is always false for STATIC_LIBRARY,
* but we can't call get_memory_info() until allmem is initialized. Our
* uses before then are for patching x86.asm, which is OK.
*/
IF_NO_MEMQUERY(memcache_initialized() &&)
#endif
get_memory_info(pc, NULL, NULL, &prot))
prot &= ~PROT_WRITE;
ASSERT(start_page == pc && ALIGN_FORWARD(size, PAGE_SIZE) == size);
/* inc stats before making unwritable, in case messing w/ data segment */
STATS_INC(protection_change_calls);
STATS_ADD(protection_change_pages, size / PAGE_SIZE);
res = mprotect_syscall((void *)start_page, prot_size, prot);
LOG(THREAD_GET, LOG_VMAREAS, 3, "make_unwritable: pc " PFX " -> " PFX "-" PFX "\n",
pc, start_page, start_page + prot_size);
ASSERT(res == 0);
#ifndef HAVE_MEMINFO_QUERY
/* update all_memory_areas list with the protection change */
if (memcache_initialized()) {
memcache_update_locked(start_page, start_page + prot_size,
osprot_to_memprot(prot), -1 /*type unchanged*/,
false /*!exists*/);
}
#endif
}
/****************************************************************************/
/* SYSTEM CALLS */
/* SYS_ defines are in /usr/include/bits/syscall.h
* numbers used by libc are in /usr/include/asm/unistd.h
* kernel defines are in /usr/src/linux-2.4/include/asm-i386/unistd.h
* kernel function names are in /usr/src/linux/arch/i386/kernel/entry.S
*
* For now, we've copied the SYS/NR defines from syscall.h and unistd.h
* and put them in our own local syscall.h.
*/
/* num_raw should be the xax register value.
* For a live system call, dcontext_live should be passed (for examining
* the dcontext->last_exit and exit_reason flags); otherwise, gateway should
* be passed.
*/
int
os_normalized_sysnum(int num_raw, instr_t *gateway, dcontext_t *dcontext)
{
#ifdef MACOS
/* The x64 encoding indicates the syscall type in the top 8 bits.
* We drop the 0x2000000 for BSD so we can use the SYS_ enum constants.
* That leaves 0x1000000 for Mach and 0x3000000 for Machdep.
* On 32-bit, a different encoding is used: we transform that
* to the x64 encoding minus BSD.
*/
int interrupt = 0;
int num = 0;
if (gateway != NULL) {
if (instr_is_interrupt(gateway))
interrupt = instr_get_interrupt_number(gateway);
} else {
ASSERT(dcontext != NULL);
if (TEST(LINK_SPECIAL_EXIT, dcontext->last_exit->flags)) {
if (dcontext->upcontext.upcontext.exit_reason ==
EXIT_REASON_NI_SYSCALL_INT_0x81)
interrupt = 0x81;
else {
ASSERT(dcontext->upcontext.upcontext.exit_reason ==
EXIT_REASON_NI_SYSCALL_INT_0x82);
interrupt = 0x82;
}
}
}
# ifdef X64
if (num_raw >> 24 == 0x2)
return (int)(num_raw & 0xffffff); /* Drop BSD bit */
else
num = (int)num_raw; /* Keep Mach and Machdep bits */
# else
if ((ptr_int_t)num_raw < 0) /* Mach syscall */
return (SYSCALL_NUM_MARKER_MACH | -(int)num_raw);
else {
/* Bottom 16 bits are the number, top are arg size. */
num = (int)(num_raw & 0xffff);
}
# endif
if (interrupt == 0x81)
num |= SYSCALL_NUM_MARKER_MACH;
else if (interrupt == 0x82)
num |= SYSCALL_NUM_MARKER_MACHDEP;
return num;
#else
return num_raw;
#endif
}
static bool
ignorable_system_call_normalized(int num)
{
switch (num) {
#if defined(SYS_exit_group)
case SYS_exit_group:
#endif
case SYS_exit:
#ifdef MACOS
case SYS_bsdthread_terminate:
#endif
#ifdef LINUX
case SYS_brk:
# ifdef SYS_uselib
case SYS_uselib:
# endif
#endif
#if defined(X64) || !defined(ARM)
case SYS_mmap:
#endif
#if !defined(X64) && !defined(MACOS)
case SYS_mmap2:
#endif
case SYS_munmap:
#ifdef LINUX
case SYS_mremap:
#endif
case SYS_mprotect:
#ifdef ANDROID
case SYS_prctl:
#endif
case SYS_execve:
#ifdef LINUX
case SYS_clone3:
case SYS_clone:
#elif defined(MACOS)
case SYS_bsdthread_create:
case SYS_posix_spawn:
#endif
#ifdef SYS_fork
case SYS_fork:
#endif
#ifdef SYS_vfork
case SYS_vfork:
#endif
case SYS_kill:
#if defined(SYS_tkill)
case SYS_tkill:
#endif
#if defined(SYS_tgkill)
case SYS_tgkill:
#endif
#if defined(LINUX) && !defined(X64) && !defined(ARM)
case SYS_signal:
#endif
#ifdef MACOS
case SYS_sigsuspend_nocancel:
#endif
#if !defined(X64) || defined(MACOS)
case SYS_sigaction:
case SYS_sigsuspend:
case SYS_sigpending:
case SYS_sigreturn:
case SYS_sigprocmask:
#endif
#ifdef LINUX
case SYS_rt_sigreturn:
case SYS_rt_sigaction:
case SYS_rt_sigprocmask:
case SYS_rt_sigpending:
# ifdef SYS_rt_sigtimedwait_time64
case SYS_rt_sigtimedwait_time64:
# endif
case SYS_rt_sigtimedwait:
case SYS_rt_sigqueueinfo:
case SYS_rt_sigsuspend:
# ifdef SYS_signalfd
case SYS_signalfd:
# endif
case SYS_signalfd4:
#endif
case SYS_sigaltstack:
#if defined(LINUX) && !defined(X64) && !defined(ARM)
case SYS_sgetmask:
case SYS_ssetmask:
#endif
case SYS_setitimer:
case SYS_getitimer:
#ifdef MACOS
case SYS_close_nocancel:
#endif
#ifdef SYS_close_range
case SYS_close_range:
#endif
case SYS_close:
#ifdef SYS_dup2
case SYS_dup2:
#endif
#ifdef LINUX
case SYS_dup3:
#endif
#ifdef MACOS
case SYS_fcntl_nocancel:
#endif
case SYS_fcntl:
#if defined(X64) || !defined(ARM)
case SYS_getrlimit:
#endif
#if defined(LINUX) && !defined(X64)
case SYS_ugetrlimit:
#endif
case SYS_setrlimit:
#ifdef LINUX
case SYS_prlimit64:
#endif
#if defined(LINUX) && defined(X86)
/* i#784: app may have behavior relying on SIGALRM */
case SYS_alarm:
#endif
/* i#107: syscall might change/query app's seg memory
* need stop app from clobbering our GDT slot.
*/
#if defined(LINUX) && defined(X86) && defined(X64)
case SYS_arch_prctl:
#endif
#if defined(LINUX) && defined(X86)
case SYS_set_thread_area:
case SYS_get_thread_area:
/* FIXME: we might add SYS_modify_ldt later. */
#endif
#if defined(LINUX) && defined(ARM)
/* syscall changes app's thread register */
case SYS_set_tls:
case SYS_cacheflush:
#endif
#if defined(LINUX)
/* Syscalls change procsigmask */
# ifdef SYS_pselect6_time64
case SYS_pselect6_time64:
# endif
case SYS_pselect6:
# ifdef SYS_ppoll_time64
case SYS_ppoll_time64:
# endif
case SYS_ppoll:
case SYS_epoll_pwait:
/* Used as a lazy trigger. */
case SYS_rseq:
#endif
#ifdef DEBUG
# ifdef MACOS
case SYS_open_nocancel:
# endif
# ifdef SYS_open
case SYS_open:
# endif
#endif
return false;
#ifdef LINUX
# ifdef SYS_readlink
case SYS_readlink:
# endif
case SYS_readlinkat: return !DYNAMO_OPTION(early_inject);
#endif
#ifdef SYS_openat2
case SYS_openat2:
#endif
case SYS_openat: return IS_STRING_OPTION_EMPTY(xarch_root);
default:
#ifdef VMX86_SERVER
if (is_vmkuw_sysnum(num))
return vmkuw_ignorable_system_call(num);
#endif
return true;
}
}
bool
ignorable_system_call(int num_raw, instr_t *gateway, dcontext_t *dcontext_live)
{
return ignorable_system_call_normalized(
os_normalized_sysnum(num_raw, gateway, dcontext_live));
}
typedef struct {
unsigned long addr;
unsigned long len;
unsigned long prot;
unsigned long flags;
unsigned long fd;
unsigned long offset;
} mmap_arg_struct_t;
static inline reg_t *
sys_param_addr(dcontext_t *dcontext, int num)
{
/* we force-inline get_mcontext() and so don't take it as a param */
priv_mcontext_t *mc = get_mcontext(dcontext);
#if defined(X86) && defined(X64)
switch (num) {
case 0: return &mc->xdi;
case 1: return &mc->xsi;
case 2: return &mc->xdx;
case 3: return &mc->r10; /* since rcx holds retaddr for syscall instr */
case 4: return &mc->r8;
case 5: return &mc->r9;
default: CLIENT_ASSERT(false, "invalid system call parameter number");
}
#else
# ifdef MACOS
/* XXX: if we don't end up using dcontext->sys_was_int here, we could
* make that field Linux-only.
*/
/* For 32-bit, the args are passed on the stack, above a retaddr slot
* (regardless of whether using a sysenter or int gateway).
*/
return ((reg_t *)mc->esp) + 1 /*retaddr*/ + num;
# endif
/* even for vsyscall where ecx (syscall) or esp (sysenter) are saved into
* ebp, the original parameter registers are not yet changed pre-syscall,
* except for ebp, which is pushed on the stack:
* 0xffffe400 55 push %ebp %esp -> %esp (%esp)
* 0xffffe401 89 cd mov %ecx -> %ebp
* 0xffffe403 0f 05 syscall -> %ecx
*
* 0xffffe400 51 push %ecx %esp -> %esp (%esp)
* 0xffffe401 52 push %edx %esp -> %esp (%esp)
* 0xffffe402 55 push %ebp %esp -> %esp (%esp)
* 0xffffe403 89 e5 mov %esp -> %ebp
* 0xffffe405 0f 34 sysenter -> %esp
*/
switch (num) {
case 0: return &mc->IF_X86_ELSE(xbx, r0);
case 1: return &mc->IF_X86_ELSE(xcx, r1);
case 2: return &mc->IF_X86_ELSE(xdx, r2);
case 3: return &mc->IF_X86_ELSE(xsi, r3);
case 4: return &mc->IF_X86_ELSE(xdi, r4);
/* FIXME: do a safe_read: but what about performance?
* See the #if 0 below, as well. */
case 5:
return IF_X86_ELSE((dcontext->sys_was_int ? &mc->xbp : ((reg_t *)mc->xsp)),
&mc->r5);
# ifdef ARM
/* AArch32 supposedly has 7 args in some cases. */
case 6: return &mc->r6;
# endif
default: CLIENT_ASSERT(false, "invalid system call parameter number");
}
#endif
return 0;
}
static inline reg_t
sys_param(dcontext_t *dcontext, int num)
{
return *sys_param_addr(dcontext, num);
}
void
set_syscall_param(dcontext_t *dcontext, int param_num, reg_t new_value)
{
*sys_param_addr(dcontext, param_num) = new_value;
}
static inline bool
syscall_successful(priv_mcontext_t *mc, int normalized_sysnum)
{
#ifdef MACOS
if (TEST(SYSCALL_NUM_MARKER_MACH, normalized_sysnum)) {
/* XXX: Mach syscalls vary (for some KERN_SUCCESS=0 is success,
* for others that return mach_port_t 0 is failure (I think?).
* We defer to drsyscall.
*/
return ((ptr_int_t)MCXT_SYSCALL_RES(mc) >= 0);
} else
return !TEST(EFLAGS_CF, mc->xflags);
#else
if (normalized_sysnum == IF_X64_ELSE(SYS_mmap, SYS_mmap2) ||
# if !defined(ARM) && !defined(X64)
normalized_sysnum == SYS_mmap ||
# endif
normalized_sysnum == SYS_mremap)
return mmap_syscall_succeeded((byte *)MCXT_SYSCALL_RES(mc));
return ((ptr_int_t)MCXT_SYSCALL_RES(mc) >= 0);
#endif
}
/* For non-Mac, this does nothing to indicate "success": you can pass -errno.
* For Mac, this clears CF and just sets xax. To return a 64-bit value in
* 32-bit mode, the caller must explicitly set xdx as well (we don't always
* do so b/c syscalls that just return 32-bit values do not touch xdx).
*/
static inline void
set_success_return_val(dcontext_t *dcontext, reg_t val)
{
/* since always coming from d_r_dispatch now, only need to set mcontext */
priv_mcontext_t *mc = get_mcontext(dcontext);
#ifdef MACOS
/* On MacOS, success is determined by CF, except for Mach syscalls, but
* there it doesn't hurt to set CF.
*/
mc->xflags &= ~(EFLAGS_CF);
#endif
MCXT_SYSCALL_RES(mc) = val;
}
/* Always pass a positive value for errno */
static inline void
set_failure_return_val(dcontext_t *dcontext, uint errno_val)
{
priv_mcontext_t *mc = get_mcontext(dcontext);
#ifdef MACOS
/* On MacOS, success is determined by CF, and errno is positive */
mc->xflags |= EFLAGS_CF;
MCXT_SYSCALL_RES(mc) = errno_val;
#else
MCXT_SYSCALL_RES(mc) = -(int)errno_val;
#endif
}
DR_API
reg_t
dr_syscall_get_param(void *drcontext, int param_num)
{
dcontext_t *dcontext = (dcontext_t *)drcontext;
CLIENT_ASSERT(dcontext->client_data->in_pre_syscall,
"dr_syscall_get_param() can only be called from pre-syscall event");
return sys_param(dcontext, param_num);
}
DR_API
void
dr_syscall_set_param(void *drcontext, int param_num, reg_t new_value)
{
dcontext_t *dcontext = (dcontext_t *)drcontext;
CLIENT_ASSERT(dcontext->client_data->in_pre_syscall ||
dcontext->client_data->in_post_syscall,
"dr_syscall_set_param() can only be called from a syscall event");
*sys_param_addr(dcontext, param_num) = new_value;
}
DR_API
reg_t
dr_syscall_get_result(void *drcontext)
{
dcontext_t *dcontext = (dcontext_t *)drcontext;
CLIENT_ASSERT(dcontext->client_data->in_post_syscall,
"dr_syscall_get_param() can only be called from post-syscall event");
return MCXT_SYSCALL_RES(get_mcontext(dcontext));
}
DR_API
bool
dr_syscall_get_result_ex(void *drcontext, dr_syscall_result_info_t *info INOUT)
{
dcontext_t *dcontext = (dcontext_t *)drcontext;
priv_mcontext_t *mc = get_mcontext(dcontext);
CLIENT_ASSERT(dcontext->client_data->in_post_syscall,
"only call dr_syscall_get_param_ex() from post-syscall event");
CLIENT_ASSERT(info != NULL, "invalid parameter");
CLIENT_ASSERT(info->size == sizeof(*info), "invalid dr_syscall_result_info_t size");
if (info->size != sizeof(*info))
return false;
info->value = MCXT_SYSCALL_RES(mc);
info->succeeded = syscall_successful(mc, dcontext->sys_num);
if (info->use_high) {
/* MacOS has some 32-bit syscalls that return 64-bit values in
* xdx:xax, but the other syscalls don't clear xdx, so we can't easily
* return a 64-bit value all the time.
*/
IF_X86_ELSE({ info->high = mc->xdx; }, { ASSERT_NOT_REACHED(); });
}
if (info->use_errno) {
if (info->succeeded)
info->errno_value = 0;
else {
info->errno_value = (uint)IF_LINUX(-(int)) MCXT_SYSCALL_RES(mc);
}
}
return true;
}
DR_API
void
dr_syscall_set_result(void *drcontext, reg_t value)
{
dcontext_t *dcontext = (dcontext_t *)drcontext;
CLIENT_ASSERT(dcontext->client_data->in_pre_syscall ||
dcontext->client_data->in_post_syscall,
"dr_syscall_set_result() can only be called from a syscall event");
/* For non-Mac, the caller can still pass -errno and this will work */
set_success_return_val(dcontext, value);
}
DR_API
bool
dr_syscall_set_result_ex(void *drcontext, dr_syscall_result_info_t *info)
{
dcontext_t *dcontext = (dcontext_t *)drcontext;
priv_mcontext_t *mc = get_mcontext(dcontext);
CLIENT_ASSERT(dcontext->client_data->in_pre_syscall ||
dcontext->client_data->in_post_syscall,
"dr_syscall_set_result() can only be called from a syscall event");
CLIENT_ASSERT(info->size == sizeof(*info), "invalid dr_syscall_result_info_t size");
if (info->size != sizeof(*info))
return false;
if (info->use_errno) {
if (info->succeeded) {
/* a weird case but we let the user combine these */
set_success_return_val(dcontext, info->errno_value);
} else
set_failure_return_val(dcontext, info->errno_value);
} else {
if (info->succeeded)
set_success_return_val(dcontext, info->value);
else {
/* use this to set CF, even though it might negate the value */
set_failure_return_val(dcontext, (uint)info->value);
/* now set the value, overriding set_failure_return_val() */
MCXT_SYSCALL_RES(mc) = info->value;
}
if (info->use_high) {
/* MacOS has some 32-bit syscalls that return 64-bit values in
* xdx:xax.
*/
IF_X86_ELSE({ mc->xdx = info->high; }, { ASSERT_NOT_REACHED(); });
}
}
return true;
}
DR_API
void
dr_syscall_set_sysnum(void *drcontext, int new_num)
{
dcontext_t *dcontext = (dcontext_t *)drcontext;
priv_mcontext_t *mc = get_mcontext(dcontext);
CLIENT_ASSERT(dcontext->client_data->in_pre_syscall ||
dcontext->client_data->in_post_syscall,
"dr_syscall_set_sysnum() can only be called from a syscall event");
MCXT_SYSNUM_REG(mc) = new_num;
}
DR_API
void
dr_syscall_invoke_another(void *drcontext)
{
dcontext_t *dcontext = (dcontext_t *)drcontext;
CLIENT_ASSERT(dcontext->client_data->in_post_syscall,
"dr_syscall_invoke_another() can only be called from post-syscall "
"event");
LOG(THREAD, LOG_SYSCALLS, 2, "invoking additional syscall on client request\n");
dcontext->client_data->invoke_another_syscall = true;
#ifdef X86
if (get_syscall_method() == SYSCALL_METHOD_SYSENTER) {
priv_mcontext_t *mc = get_mcontext(dcontext);
/* restore xbp to xsp */
mc->xbp = mc->xsp;
}
#endif /* X86 */
/* for x64 we don't need to copy xcx into r10 b/c we use r10 as our param */
}
static inline bool
is_thread_create_syscall_helper(ptr_uint_t sysnum, uint64 flags)
{
#ifdef MACOS
/* XXX i#1403: we need earlier injection to intercept
* bsdthread_register in order to capture workqueue threads.
*/
return (sysnum == SYS_bsdthread_create || sysnum == SYS_vfork);
#else
# ifdef SYS_vfork
if (sysnum == SYS_vfork)
return true;
# endif
# ifdef LINUX
if ((sysnum == SYS_clone || sysnum == SYS_clone3) && TEST(CLONE_VM, flags))
return true;
# endif
return false;
#endif
}
bool
is_thread_create_syscall(dcontext_t *dcontext _IF_LINUX(void *maybe_clone_args))
{
priv_mcontext_t *mc = get_mcontext(dcontext);
uint64 flags = sys_param(dcontext, 0);
ptr_uint_t sysnum = MCXT_SYSNUM_REG(mc);
#ifdef LINUX
/* For clone3, we use flags from the clone_args that was obtained using a
* a safe read from the user-provided syscall args.
*/
if (sysnum == SYS_clone3) {
ASSERT(maybe_clone_args != NULL);
flags = ((clone3_syscall_args_t *)maybe_clone_args)->flags;
}
#endif
return is_thread_create_syscall_helper(sysnum, flags);
}
#ifdef LINUX
static ptr_uint_t
get_stored_clone3_flags(dcontext_t *dcontext)
{
return ((uint64)dcontext->sys_param4 << 32) | dcontext->sys_param3;
}
#endif
bool
was_thread_create_syscall(dcontext_t *dcontext)
{
uint64 flags = dcontext->sys_param0;
#ifdef LINUX
if (dcontext->sys_num == SYS_clone3)
flags = get_stored_clone3_flags(dcontext);
#endif
return is_thread_create_syscall_helper(dcontext->sys_num, flags);
}
bool
is_sigreturn_syscall_number(int sysnum)
{
#ifdef MACOS
return sysnum == SYS_sigreturn;
#else
return (IF_NOT_X64(sysnum == SYS_sigreturn ||) sysnum == SYS_rt_sigreturn);
#endif
}
bool
is_sigreturn_syscall(dcontext_t *dcontext)
{
priv_mcontext_t *mc = get_mcontext(dcontext);
return is_sigreturn_syscall_number(MCXT_SYSNUM_REG(mc));
}
bool
was_sigreturn_syscall(dcontext_t *dcontext)
{
return is_sigreturn_syscall_number(dcontext->sys_num);
}
/* process a signal this process/thread is sending to itself */
static void
handle_self_signal(dcontext_t *dcontext, uint sig)
{
/* FIXME PR 297903: watch for all DEFAULT_TERMINATE signals,
* and for any thread in the group, not just self.
*
* FIXME PR 297033: watch for SIGSTOP and SIGCONT.
*
* With -intercept_all_signals, we only need to watch for SIGKILL
* and SIGSTOP here, and we avoid the FIXMEs below. If it's fine
* for DR not to clean up on a SIGKILL, then SIGSTOP is all that's
* left (at least once we have PR 297033 and are intercepting the
* various STOP variations and CONT).
*/
if (sig == SIGABRT && !DYNAMO_OPTION(intercept_all_signals)) {
LOG(GLOBAL, LOG_TOP | LOG_SYSCALLS, 1,
"thread " TIDFMT " sending itself a SIGABRT\n", d_r_get_thread_id());
KSTOP(num_exits_dir_syscall);
/* FIXME: need to check whether app has a handler for SIGABRT! */
/* FIXME PR 211180/6723: this will do SYS_exit rather than the SIGABRT.
* Should do set_default_signal_action(SIGABRT) (and set a flag so
* no races w/ another thread re-installing?) and then SYS_kill.
*/
block_cleanup_and_terminate(dcontext, SYSNUM_EXIT_THREAD, -1, 0,
(is_last_app_thread() && !dynamo_exited),
IF_MACOS_ELSE(dcontext->thread_port, 0), 0);
ASSERT_NOT_REACHED();
}
}
/***************************************************************************
* EXECVE
*/
/* when adding here, also add to the switch in handle_execve if necessary */
enum {
ENV_PROP_RUNUNDER,
ENV_PROP_OPTIONS,
ENV_PROP_EXECVE_LOGDIR,
ENV_PROP_EXE_PATH,
ENV_PROP_CONFIGDIR,
};
static const char *const env_to_propagate[] = {
/* these must line up with the enum */
DYNAMORIO_VAR_RUNUNDER,
DYNAMORIO_VAR_OPTIONS,
/* DYNAMORIO_VAR_EXECVE_LOGDIR is different from DYNAMORIO_VAR_LOGDIR:
* - DYNAMORIO_VAR_LOGDIR: a parent dir inside which a new dir will be created;
* - DYNAMORIO_VAR_EXECVE_LOGDIR: the same subdir with the pre-execve process.
* Xref comment in create_log_dir about their precedence.
*/
DYNAMORIO_VAR_EXECVE_LOGDIR,
/* i#909: needed for early injection */
DYNAMORIO_VAR_EXE_PATH,
/* these will only be propagated if they exist */
DYNAMORIO_VAR_CONFIGDIR,
};
#define NUM_ENV_TO_PROPAGATE (sizeof(env_to_propagate) / sizeof(env_to_propagate[0]))
/* Called at pre-SYS_execve to append DR vars in the target process env vars list.
* For late injection via libdrpreload, we call this for *all children, because
* even if -no_follow_children is specified, a whitelist will still ask for takeover
* and it's libdrpreload who checks the whitelist.
* For -early, however, we check the config ahead of time and only call this routine
* if we in fact want to inject.
* XXX i#1679: these parent vs child differences bring up corner cases of which
* config dir takes precedence (if the child clears the HOME env var, e.g.).
*/
static void
add_dr_env_vars(dcontext_t *dcontext, char *inject_library_path, const char *app_path)
{
char **envp = (char **)sys_param(dcontext, 2);
int idx, j, preload = -1, ldpath = -1;
int num_old, num_new, sz;
bool need_var[NUM_ENV_TO_PROPAGATE];
int prop_idx[NUM_ENV_TO_PROPAGATE];
bool ldpath_us = false, preload_us = false;
char **new_envp, *var, *old;
/* check if any var needs to be propagated */
for (j = 0; j < NUM_ENV_TO_PROPAGATE; j++) {
prop_idx[j] = -1;
if (get_config_val(env_to_propagate[j]) == NULL)
need_var[j] = false;
else
need_var[j] = true;
}
/* Special handling for DYNAMORIO_VAR_EXECVE_LOGDIR:
* we only need it if follow_children is true and PROCESS_DIR exists.
*/
if (DYNAMO_OPTION(follow_children) && get_log_dir(PROCESS_DIR, NULL, NULL))
need_var[ENV_PROP_EXECVE_LOGDIR] = true;
else
need_var[ENV_PROP_EXECVE_LOGDIR] = false;
if (DYNAMO_OPTION(early_inject))
need_var[ENV_PROP_EXE_PATH] = true;
/* iterate the env in target process */
if (envp == NULL) {
LOG(THREAD, LOG_SYSCALLS, 3, "\tenv is NULL\n");
idx = 0;
} else {
for (idx = 0; envp[idx] != NULL; idx++) {
/* execve env vars should never be set here */
ASSERT(strstr(envp[idx], DYNAMORIO_VAR_EXECVE) != envp[idx]);
for (j = 0; j < NUM_ENV_TO_PROPAGATE; j++) {
if (strstr(envp[idx], env_to_propagate[j]) == envp[idx]) {
/* If conflict between env and cfg, we assume those env vars
* are for DR usage only, and replace them with cfg value.
*/
prop_idx[j] = idx; /* remember the index for replacing later */
break;
}
}
if (!DYNAMO_OPTION(early_inject) &&
strstr(envp[idx], "LD_LIBRARY_PATH=") == envp[idx]) {
ldpath = idx;
if (strstr(envp[idx], inject_library_path) != NULL)
ldpath_us = true;
}
if (!DYNAMO_OPTION(early_inject) &&
strstr(envp[idx], "LD_PRELOAD=") == envp[idx]) {
preload = idx;
if (strstr(envp[idx], DYNAMORIO_PRELOAD_NAME) != NULL &&
strstr(envp[idx], get_dynamorio_library_path()) != NULL) {
preload_us = true;
}
}
LOG(THREAD, LOG_SYSCALLS, 3, "\tenv %d: %s\n", idx, envp[idx]);
}
}
/* We want to add new env vars, so we create a new envp
* array. We have to deallocate them and restore the old
* envp if execve fails; if execve succeeds, the address
* space is reset so we don't need to do anything.
*/
num_old = idx;
/* how many new env vars we need add */
num_new = 2 + /* execve indicator var plus final NULL */
(DYNAMO_OPTION(early_inject)
? 0
: (((preload < 0) ? 1 : 0) + ((ldpath < 0) ? 1 : 0)));
for (j = 0; j < NUM_ENV_TO_PROPAGATE; j++) {
if ((DYNAMO_OPTION(follow_children) || j == ENV_PROP_EXE_PATH) && need_var[j] &&
prop_idx[j] < 0)
num_new++;
}
/* setup new envp */
new_envp =
heap_alloc(dcontext, sizeof(char *) * (num_old + num_new) HEAPACCT(ACCT_OTHER));
/* copy old envp */
memcpy(new_envp, envp, sizeof(char *) * num_old);
/* change/add preload and ldpath if necessary */
if (!DYNAMO_OPTION(early_inject) && !preload_us) {
int idx_preload;
LOG(THREAD, LOG_SYSCALLS, 1,
"WARNING: execve env does NOT preload DynamoRIO, forcing it!\n");
if (preload >= 0) {
/* replace the existing preload */
const char *dr_lib_path = get_dynamorio_library_path();
sz = strlen(envp[preload]) + strlen(DYNAMORIO_PRELOAD_NAME) +
strlen(dr_lib_path) + 3;
var = heap_alloc(dcontext, sizeof(char) * sz HEAPACCT(ACCT_OTHER));
old = envp[preload] + strlen("LD_PRELOAD=");
snprintf(var, sz, "LD_PRELOAD=%s %s %s", DYNAMORIO_PRELOAD_NAME, dr_lib_path,
old);
idx_preload = preload;
} else {
/* add new preload */
const char *dr_lib_path = get_dynamorio_library_path();
sz = strlen("LD_PRELOAD=") + strlen(DYNAMORIO_PRELOAD_NAME) +
strlen(dr_lib_path) + 2;
var = heap_alloc(dcontext, sizeof(char) * sz HEAPACCT(ACCT_OTHER));
snprintf(var, sz, "LD_PRELOAD=%s %s", DYNAMORIO_PRELOAD_NAME, dr_lib_path);
idx_preload = idx++;
}
*(var + sz - 1) = '\0'; /* null terminate */
new_envp[idx_preload] = var;
LOG(THREAD, LOG_SYSCALLS, 2, "\tnew env %d: %s\n", idx_preload,
new_envp[idx_preload]);
}
if (!DYNAMO_OPTION(early_inject) && !ldpath_us) {
int idx_ldpath;
if (ldpath >= 0) {
sz = strlen(envp[ldpath]) + strlen(inject_library_path) + 2;
var = heap_alloc(dcontext, sizeof(char) * sz HEAPACCT(ACCT_OTHER));
old = envp[ldpath] + strlen("LD_LIBRARY_PATH=");
snprintf(var, sz, "LD_LIBRARY_PATH=%s:%s", inject_library_path, old);
idx_ldpath = ldpath;
} else {
sz = strlen("LD_LIBRARY_PATH=") + strlen(inject_library_path) + 1;
var = heap_alloc(dcontext, sizeof(char) * sz HEAPACCT(ACCT_OTHER));
snprintf(var, sz, "LD_LIBRARY_PATH=%s", inject_library_path);
idx_ldpath = idx++;
}
*(var + sz - 1) = '\0'; /* null terminate */
new_envp[idx_ldpath] = var;
LOG(THREAD, LOG_SYSCALLS, 2, "\tnew env %d: %s\n", idx_ldpath,
new_envp[idx_ldpath]);
}
/* propagating DR env vars */
for (j = 0; j < NUM_ENV_TO_PROPAGATE; j++) {
const char *val = "";
if (!need_var[j])
continue;
if (!DYNAMO_OPTION(follow_children) && j != ENV_PROP_EXE_PATH)
continue;
switch (j) {
case ENV_PROP_RUNUNDER:
ASSERT(strcmp(env_to_propagate[j], DYNAMORIO_VAR_RUNUNDER) == 0);
/* Must pass RUNUNDER_ALL to get child injected if has no app config.
* If rununder var is already set we assume it's set to 1.
*/
ASSERT((RUNUNDER_ON | RUNUNDER_ALL) == 0x3); /* else, update "3" */
val = "3";
break;
case ENV_PROP_OPTIONS:
ASSERT(strcmp(env_to_propagate[j], DYNAMORIO_VAR_OPTIONS) == 0);
val = d_r_option_string;
break;
case ENV_PROP_EXECVE_LOGDIR:
/* we use PROCESS_DIR for DYNAMORIO_VAR_EXECVE_LOGDIR */
ASSERT(strcmp(env_to_propagate[j], DYNAMORIO_VAR_EXECVE_LOGDIR) == 0);
ASSERT(get_log_dir(PROCESS_DIR, NULL, NULL));
break;
case ENV_PROP_EXE_PATH:
ASSERT(strcmp(env_to_propagate[j], DYNAMORIO_VAR_EXE_PATH) == 0);
val = app_path;
break;
default:
val = getenv(env_to_propagate[j]);
if (val == NULL)
val = "";
break;
}
if (j == ENV_PROP_EXECVE_LOGDIR) {
uint logdir_length;
get_log_dir(PROCESS_DIR, NULL, &logdir_length);
/* logdir_length includes the terminating NULL */
sz = strlen(DYNAMORIO_VAR_EXECVE_LOGDIR) + logdir_length + 1 /* '=' */;
var = heap_alloc(dcontext, sizeof(char) * sz HEAPACCT(ACCT_OTHER));
snprintf(var, sz, "%s=", DYNAMORIO_VAR_EXECVE_LOGDIR);
get_log_dir(PROCESS_DIR, var + strlen(var), &logdir_length);
} else {
sz = strlen(env_to_propagate[j]) + strlen(val) + 2 /* '=' + null */;
var = heap_alloc(dcontext, sizeof(char) * sz HEAPACCT(ACCT_OTHER));
snprintf(var, sz, "%s=%s", env_to_propagate[j], val);
}
*(var + sz - 1) = '\0'; /* null terminate */
prop_idx[j] = (prop_idx[j] >= 0) ? prop_idx[j] : idx++;
new_envp[prop_idx[j]] = var;
LOG(THREAD, LOG_SYSCALLS, 2, "\tnew env %d: %s\n", prop_idx[j],
new_envp[prop_idx[j]]);
}
if (!DYNAMO_OPTION(follow_children) && !DYNAMO_OPTION(early_inject)) {
if (prop_idx[ENV_PROP_RUNUNDER] >= 0) {
/* disable auto-following of this execve, yet still allow preload
* on other side to inject if config file exists.
* kind of hacky mangle here:
*/
ASSERT(!need_var[ENV_PROP_RUNUNDER]);
ASSERT(new_envp[prop_idx[ENV_PROP_RUNUNDER]][0] == 'D');
new_envp[prop_idx[ENV_PROP_RUNUNDER]][0] = 'X';
}
}
sz = strlen(DYNAMORIO_VAR_EXECVE) + 4;
/* we always pass this var to indicate "post-execve" */
var = heap_alloc(dcontext, sizeof(char) * sz HEAPACCT(ACCT_OTHER));
/* PR 458917: we overload this to also pass our gdt index */
ASSERT(os_tls_get_gdt_index(dcontext) < 100 &&
os_tls_get_gdt_index(dcontext) >= -1); /* only 2 chars allocated */
snprintf(var, sz, "%s=%02d", DYNAMORIO_VAR_EXECVE, os_tls_get_gdt_index(dcontext));
*(var + sz - 1) = '\0'; /* null terminate */
new_envp[idx++] = var;
LOG(THREAD, LOG_SYSCALLS, 2, "\tnew env %d: %s\n", idx - 1, new_envp[idx - 1]);
/* must end with NULL */
new_envp[idx++] = NULL;
ASSERT((num_new + num_old) == idx);
/* update syscall param */
*sys_param_addr(dcontext, 2) = (reg_t)new_envp; /* OUT */
/* store for reset in case execve fails, and for cleanup if
* this is a vfork thread
*/
dcontext->sys_param0 = (reg_t)envp;
dcontext->sys_param1 = (reg_t)new_envp;
}
static ssize_t
script_file_reader(const char *pathname, void *buf, size_t count)
{
/* FIXME i#2090: Check file is executable. */
file_t file = os_open(pathname, OS_OPEN_READ);
size_t len;
if (file == INVALID_FILE)
return -1;
len = os_read(file, buf, count);
os_close(file);
return len;
}
/* For early injection, recognise when the executable is a script ("#!") and
* modify the syscall parameters to invoke a script interpreter instead. In
* this case we will have allocated memory here but we expect the caller to
* do a non-failing execve of libdynamorio.so and therefore not to have to
* free the memory. That is one reason for checking that the (final) script
* interpreter really is an executable binary.
* We recognise one error case here and return the non-zero error code (ELOOP)
* but in other cases we leave it up to the caller to detect the error, which
* it may do by attempting to exec the path natively, expecting this to fail,
* though there is the obvious danger that the file might have been modified
* just before the exec.
* We do not, and cannot easily, handle a file that is executable but not
* readable. Currently such files will be executed without DynamoRIO though
* in some situations it would be more helpful to stop with an error.
*
* XXX: There is a minor transparency bug with misformed binaries. For example,
* execve can return EINVAL if the ELF executable has more than one PT_INTERP
* segment but we do not check this and so under DynamoRIO the error would be
* detected only after the exec, if we are following the child.
*
* FIXME i#2091: There is a memory leak if a script is recognised, and it is
* later decided not to inject (see where should_inject is set), and the exec
* fails, because in this case there is no mechanism for freeing the memory
* allocated in this function. This function should return sufficient information
* for the caller to free the memory, which it can do so before the exec if it
* reverts to the original syscall arguments and execs the script.
*/
static int
handle_execve_script(dcontext_t *dcontext)
{
char *fname = (char *)sys_param(dcontext, 0);
char **orig_argv = (char **)sys_param(dcontext, 1);
script_interpreter_t *script;
int ret = 0;
script = global_heap_alloc(sizeof(*script) HEAPACCT(ACCT_OTHER));
if (!find_script_interpreter(script, fname, script_file_reader))
goto free_and_return;
if (script->argc == 0) {
ret = ELOOP;
goto free_and_return;
}
/* Check that the final interpreter is an executable binary. */
{
file_t file = os_open(script->argv[0], OS_OPEN_READ);
bool is64;
if (file == INVALID_FILE)
goto free_and_return;
if (!module_file_is_module64(file, &is64, NULL)) {
os_close(file);
goto free_and_return;
}
}
{
size_t i, orig_argc = 0;
char **new_argv;
/* Concatenate new arguments and original arguments. */
while (orig_argv[orig_argc] != NULL)
++orig_argc;
if (orig_argc == 0)
orig_argc = 1;
new_argv = global_heap_alloc((script->argc + orig_argc + 1) *
sizeof(char *) HEAPACCT(ACCT_OTHER));
for (i = 0; i < script->argc; i++)
new_argv[i] = script->argv[i];
new_argv[script->argc] = fname; /* replaces orig_argv[0] */
for (i = 1; i < orig_argc; i++)
new_argv[script->argc + i] = orig_argv[i];
new_argv[script->argc + orig_argc] = NULL;
/* Modify syscall parameters. */
*sys_param_addr(dcontext, 0) = (reg_t)new_argv[0];
*sys_param_addr(dcontext, 1) = (reg_t)new_argv;
}
return 0;
free_and_return:
global_heap_free(script, sizeof(*script) HEAPACCT(ACCT_OTHER));
return ret;
}
static int
handle_execve(dcontext_t *dcontext)
{
/* in /usr/src/linux/arch/i386/kernel/process.c:
* asmlinkage int sys_execve(struct pt_regs regs) { ...
* error = do_execve(filename, (char **) regs.xcx, (char **) regs.xdx, ®s);
* in fs/exec.c:
* int do_execve(char * filename, char ** argv, char ** envp, struct pt_regs * regs)
*/
/* We need to make sure we get injected into the new image:
* we simply make sure LD_PRELOAD contains us, and that our directory
* is on LD_LIBRARY_PATH (seems not to work to put absolute paths in
* LD_PRELOAD).
* FIXME: this doesn't work for setuid programs
*
* For -follow_children we also pass the current DYNAMORIO_RUNUNDER and
* DYNAMORIO_OPTIONS and logdir to the new image to support a simple
* run-all-children model without bothering w/ setting up config files for
* children, and to support injecting across execve that does not
* preserve $HOME.
* FIXME i#287/PR 546544: we'll need to propagate DYNAMORIO_AUTOINJECT too
* once we use it in preload
*/
/* FIXME i#191: supposed to preserve things like pending signal
* set across execve: going to ignore for now
*/
char *fname;
bool x64 = IF_X64_ELSE(true, false);
bool expect_to_fail = false;
bool should_inject;
file_t file;
char *inject_library_path;
char rununder_buf[16]; /* just an integer printed in ascii */
bool app_specific, from_env, rununder_on;
#if defined(LINUX) || defined(DEBUG)
const char **argv;
#endif
if (DYNAMO_OPTION(follow_children) && DYNAMO_OPTION(early_inject)) {
int ret = handle_execve_script(dcontext);
if (ret != 0)
return ret;
}
fname = (char *)sys_param(dcontext, 0);
#if defined(LINUX) || defined(DEBUG)
argv = (const char **)sys_param(dcontext, 1);
#endif
#ifdef LINUX
if (DYNAMO_OPTION(early_inject) && symlink_is_self_exe(fname)) {
/* i#907: /proc/self/exe points at libdynamorio.so. Make sure we run
* the right thing here.
*/
fname = get_application_name();
}
#endif
LOG(GLOBAL, LOG_ALL, 1,
"\n---------------------------------------------------------------------------"
"\n");
LOG(THREAD, LOG_ALL, 1,
"\n---------------------------------------------------------------------------"
"\n");
DODEBUG({
int i;
SYSLOG_INTERNAL_INFO("-- execve %s --", fname);
LOG(THREAD, LOG_SYSCALLS, 1, "syscall: execve %s\n", fname);
LOG(GLOBAL, LOG_TOP | LOG_SYSCALLS, 1, "execve %s\n", fname);
if (d_r_stats->loglevel >= 3) {
if (argv == NULL) {
LOG(THREAD, LOG_SYSCALLS, 3, "\targs are NULL\n");
} else {
for (i = 0; argv[i] != NULL; i++) {
LOG(THREAD, LOG_SYSCALLS, 2, "\targ %d: len=%d\n", i,
strlen(argv[i]));
LOG(THREAD, LOG_SYSCALLS, 3, "\targ %d: %s\n", i, argv[i]);
}
}
}
});
/* i#237/PR 498284: if we're a vfork "thread" we're really in a different
* process and if we exec then the parent process will still be alive. We
* can't easily clean our own state (dcontext, dstack, etc.) up in our
* parent process: we need it to invoke the syscall and the syscall might
* fail. We could expand cleanup_and_terminate to also be able to invoke
* SYS_execve: but execve seems more likely to fail than termination
* syscalls. Our solution is to mark this thread as "execve" and hide it
* from regular thread queries; we clean it up in the process-exiting
* synch_with_thread(), or if the same parent thread performs another vfork
* (to prevent heap accumulation from repeated vfork+execve). Since vfork
* on linux suspends the parent, there cannot be any races with the execve
* syscall completing: there can't even be peer vfork threads, so we could
* set a flag and clean up in d_r_dispatch, but that seems overkill. (If vfork
* didn't suspend the parent we'd need to touch a marker file or something
* to know the execve was finished.)
*/
mark_thread_execve(dcontext->thread_record, true);
#ifdef STATIC_LIBRARY
/* no way we can inject, we just lose control */
SYSLOG_INTERNAL_WARNING("WARNING: static DynamoRIO library, losing control on "
"execve");
return 0;
#endif
/* Issue 20: handle cross-architecture execve */
file = os_open(fname, OS_OPEN_READ);
if (file != INVALID_FILE) {
if (!module_file_is_module64(file, &x64,
NULL /*only care about primary==execve*/))
expect_to_fail = true;
os_close(file);
} else
expect_to_fail = true;
inject_library_path =
IF_X64_ELSE(x64, !x64) ? dynamorio_library_path : dynamorio_alt_arch_path;
should_inject = DYNAMO_OPTION(follow_children);
if (get_config_val_other_app(get_short_name(fname), get_process_id(),
x64 ? DR_PLATFORM_64BIT : DR_PLATFORM_32BIT,
DYNAMORIO_VAR_RUNUNDER, rununder_buf,
BUFFER_SIZE_ELEMENTS(rununder_buf), &app_specific,
&from_env, NULL /* 1config is ok */)) {
if (should_inject_from_rununder(rununder_buf, app_specific, from_env,
&rununder_on))
should_inject = rununder_on;
}
if (should_inject)
add_dr_env_vars(dcontext, inject_library_path, fname);
else {
dcontext->sys_param0 = 0;
dcontext->sys_param1 = 0;
}
#ifdef LINUX
/* We have to be accurate with expect_to_fail as we cannot come back
* and fail the syscall once the kernel execs DR!
*/
if (should_inject && DYNAMO_OPTION(early_inject) && !expect_to_fail) {
/* i#909: change the target image to libdynamorio.so */
const char *drpath = IF_X64_ELSE(x64, !x64) ? dynamorio_library_filepath
: dynamorio_alt_arch_filepath;
TRY_EXCEPT(dcontext, /* try */
{
if (symlink_is_self_exe(argv[0])) {
/* we're out of sys_param entries so we assume argv[0] == fname
*/
dcontext->sys_param3 = (reg_t)argv;
argv[0] = fname; /* XXX: handle readable but not writable! */
} else
dcontext->sys_param3 = 0; /* no restore in post */
dcontext->sys_param4 =
(reg_t)fname; /* store for restore in post */
*sys_param_addr(dcontext, 0) = (reg_t)drpath;
LOG(THREAD, LOG_SYSCALLS, 2, "actual execve on: %s\n",
(char *)sys_param(dcontext, 0));
},
/* except */
{
dcontext->sys_param3 = 0; /* no restore in post */
dcontext->sys_param4 = 0; /* no restore in post */
LOG(THREAD, LOG_SYSCALLS, 2,
"argv is unreadable, expect execve to fail\n");
});
} else {
dcontext->sys_param3 = 0; /* no restore in post */
dcontext->sys_param4 = 0; /* no restore in post */
}
#endif
/* we need to clean up the .1config file here. if the execve fails,
* we'll just live w/o dynamic option re-read.
*/
d_r_config_exit();
return 0;
}
static void
handle_execve_post(dcontext_t *dcontext)
{
/* if we get here it means execve failed (doesn't return on success),
* or we did an execve from a vfork and its memory changes are visible
* in the parent process.
* we have to restore env to how it was and free the allocated heap.
*/
char **old_envp = (char **)dcontext->sys_param0;
char **new_envp = (char **)dcontext->sys_param1;
#ifdef STATIC_LIBRARY
/* nothing to clean up */
return;
#endif
#ifdef LINUX
if (dcontext->sys_param4 != 0) {
/* restore original /proc/.../exe */
*sys_param_addr(dcontext, 0) = dcontext->sys_param4;
if (dcontext->sys_param3 != 0) {
/* restore original argv[0] */
const char **argv = (const char **)dcontext->sys_param3;
argv[0] = (const char *)dcontext->sys_param4;
}
}
#endif
if (new_envp != NULL) {
int i;
LOG(THREAD, LOG_SYSCALLS, 2, "\tcleaning up our env vars\n");
/* we replaced existing ones and/or added new ones.
* we can't compare to old_envp b/c it may have changed by now.
*/
for (i = 0; new_envp[i] != NULL; i++) {
if (is_dynamo_address((byte *)new_envp[i])) {
heap_free(dcontext, new_envp[i],
sizeof(char) * (strlen(new_envp[i]) + 1) HEAPACCT(ACCT_OTHER));
}
}
i++; /* need to de-allocate final null slot too */
heap_free(dcontext, new_envp, sizeof(char *) * i HEAPACCT(ACCT_OTHER));
/* restore prev envp if we're post-syscall */
if (!dcontext->thread_record->execve)
*sys_param_addr(dcontext, 2) = (reg_t)old_envp;
}
}
/* i#237/PR 498284: to avoid accumulation of thread state we clean up a vfork
* child who invoked execve here so we have at most one outstanding thread. we
* also clean up at process exit and before thread creation. we could do this
* in d_r_dispatch but too rare to be worth a flag check there.
*/
static void
cleanup_after_vfork_execve(dcontext_t *dcontext)
{
thread_record_t **threads;
int num_threads, i;
if (num_execve_threads == 0)
return;
d_r_mutex_lock(&thread_initexit_lock);
get_list_of_threads_ex(&threads, &num_threads, true /*include execve*/);
for (i = 0; i < num_threads; i++) {
if (threads[i]->execve) {
LOG(THREAD, LOG_SYSCALLS, 2, "cleaning up earlier vfork thread " TIDFMT "\n",
threads[i]->id);
dynamo_other_thread_exit(threads[i]);
}
}
d_r_mutex_unlock(&thread_initexit_lock);
global_heap_free(threads,
num_threads * sizeof(thread_record_t *) HEAPACCT(ACCT_THREAD_MGT));
}
static void
set_stdfile_fileno(stdfile_t **stdfile, file_t file_no)
{
#ifdef STDFILE_FILENO
(*stdfile)->STDFILE_FILENO = file_no;
#else
# warning stdfile_t is opaque; DynamoRIO will not set fds of libc FILEs.
/* i#1973: musl libc support (and potentially other non-glibcs) */
/* only called by handle_close_pre(), so warning is specific to that. */
SYSLOG_INTERNAL_WARNING_ONCE(
"DynamoRIO cannot set the file descriptors of private libc FILEs on "
"this platform. Client usage of stdio.h stdin, stdout, or stderr may "
"no longer work as expected, because the app is closing the UNIX fds "
"backing these.");
#endif
}
/* returns whether to execute syscall */
static bool
handle_close_generic_pre(dcontext_t *dcontext, file_t fd, bool set_return_val)
{
LOG(THREAD, LOG_SYSCALLS, 3, "syscall: close fd %d\n", fd);
/* prevent app from closing our files */
if (fd_is_dr_owned(fd)) {
SYSLOG_INTERNAL_WARNING_ONCE("app trying to close DR file(s)");
LOG(THREAD, LOG_TOP | LOG_SYSCALLS, 1,
"WARNING: app trying to close DR file %d! Not allowing it.\n", fd);
if (set_return_val) {
if (DYNAMO_OPTION(fail_on_stolen_fds)) {
set_failure_return_val(dcontext, EBADF);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
} else
set_success_return_val(dcontext, 0);
}
return false; /* do not execute syscall */
}
/* Xref PR 258731 - duplicate STDOUT/STDERR when app closes them so we (or
* a client) can continue to use them for logging. */
if (DYNAMO_OPTION(dup_stdout_on_close) && fd == STDOUT) {
our_stdout = fd_priv_dup(fd);
if (our_stdout < 0) /* no private fd available */
our_stdout = dup_syscall(fd);
if (our_stdout >= 0)
fd_mark_close_on_exec(our_stdout);
fd_table_add(our_stdout, 0);
LOG(THREAD, LOG_TOP | LOG_SYSCALLS, 1,
"WARNING: app is closing stdout=%d - duplicating descriptor for "
"DynamoRIO usage got %d.\n",
fd, our_stdout);
if (privmod_stdout != NULL && INTERNAL_OPTION(private_loader)) {
/* update the privately loaded libc's stdout _fileno. */
set_stdfile_fileno(privmod_stdout, our_stdout);
}
}
if (DYNAMO_OPTION(dup_stderr_on_close) && fd == STDERR) {
our_stderr = fd_priv_dup(fd);
if (our_stderr < 0) /* no private fd available */
our_stderr = dup_syscall(fd);
if (our_stderr >= 0)
fd_mark_close_on_exec(our_stderr);
fd_table_add(our_stderr, 0);
LOG(THREAD, LOG_TOP | LOG_SYSCALLS, 1,
"WARNING: app is closing stderr=%d - duplicating descriptor for "
"DynamoRIO usage got %d.\n",
fd, our_stderr);
if (privmod_stderr != NULL && INTERNAL_OPTION(private_loader)) {
/* update the privately loaded libc's stderr _fileno. */
set_stdfile_fileno(privmod_stderr, our_stderr);
}
}
if (DYNAMO_OPTION(dup_stdin_on_close) && fd == STDIN) {
our_stdin = fd_priv_dup(fd);
if (our_stdin < 0) /* no private fd available */
our_stdin = dup_syscall(fd);
if (our_stdin >= 0)
fd_mark_close_on_exec(our_stdin);
fd_table_add(our_stdin, 0);
LOG(THREAD, LOG_TOP | LOG_SYSCALLS, 1,
"WARNING: app is closing stdin=%d - duplicating descriptor for "
"DynamoRIO usage got %d.\n",
fd, our_stdin);
if (privmod_stdin != NULL && INTERNAL_OPTION(private_loader)) {
/* update the privately loaded libc's stdout _fileno. */
set_stdfile_fileno(privmod_stdin, our_stdin);
}
}
return true;
}
static bool
handle_close_pre(dcontext_t *dcontext)
{
return handle_close_generic_pre(dcontext, (uint)sys_param(dcontext, 0),
true /*set_return_val*/);
}
#ifdef SYS_close_range
static bool
handle_close_range_pre(dcontext_t *dcontext, file_t fd)
{
return handle_close_generic_pre(dcontext, fd, false /*set_return_val*/);
}
#endif
/***************************************************************************/
/* Used to obtain the pc of the syscall instr itself when the dcontext dc
* is currently in a syscall handler.
* Alternatively for sysenter we could set app_sysenter_instr_addr for Linux.
*/
#define SYSCALL_PC(dc) \
((get_syscall_method() == SYSCALL_METHOD_INT || \
get_syscall_method() == SYSCALL_METHOD_SYSCALL) \
? (ASSERT(SYSCALL_LENGTH == INT_LENGTH), POST_SYSCALL_PC(dc) - INT_LENGTH) \
: (vsyscall_syscall_end_pc - SYSENTER_LENGTH))
static void
handle_exit(dcontext_t *dcontext)
{
priv_mcontext_t *mc = get_mcontext(dcontext);
bool exit_process = false;
if (dcontext->sys_num == SYSNUM_EXIT_PROCESS) {
/* We can have multiple thread groups within the same address space.
* We need to know whether this is the only group left.
* FIXME: we can have races where new threads are created after our
* check: we'll live with that for now, but the right approach is to
* suspend all threads via synch_with_all_threads(), do the check,
* and if exit_process then exit w/o resuming: though have to
* coordinate lock access w/ cleanup_and_terminate.
* Xref i#94. Xref PR 541760.
*/
process_id_t mypid = get_process_id();
thread_record_t **threads;
int num_threads, i;
exit_process = true;
d_r_mutex_lock(&thread_initexit_lock);
get_list_of_threads(&threads, &num_threads);
for (i = 0; i < num_threads; i++) {
if (threads[i]->pid != mypid && !IS_CLIENT_THREAD(threads[i]->dcontext)) {
exit_process = false;
break;
}
}
if (!exit_process) {
/* We need to clean up the other threads in our group here. */
thread_id_t myid = d_r_get_thread_id();
priv_mcontext_t mcontext;
DEBUG_DECLARE(thread_synch_result_t synch_res;)
LOG(THREAD, LOG_TOP | LOG_SYSCALLS, 1,
"SYS_exit_group %d not final group: %d cleaning up just "
"threads in group\n",
get_process_id(), d_r_get_thread_id());
/* Set where we are to handle reciprocal syncs */
copy_mcontext(mc, &mcontext);
mc->pc = SYSCALL_PC(dcontext);
for (i = 0; i < num_threads; i++) {
if (threads[i]->id != myid && threads[i]->pid == mypid) {
/* See comments in dynamo_process_exit_cleanup(): we terminate
* to make cleanup easier, but may want to switch to shifting
* the target thread to a stack-free loop.
*/
DEBUG_DECLARE(synch_res =)
synch_with_thread(
threads[i]->id, true /*block*/, true /*have initexit lock*/,
THREAD_SYNCH_VALID_MCONTEXT, THREAD_SYNCH_TERMINATED_AND_CLEANED,
THREAD_SYNCH_SUSPEND_FAILURE_IGNORE);
/* initexit lock may be released and re-acquired in course of
* doing the synch so we may have races where the thread
* exits on its own (or new threads appear): we'll live
* with those for now.
*/
ASSERT(synch_res == THREAD_SYNCH_RESULT_SUCCESS);
}
}
copy_mcontext(&mcontext, mc);
}
d_r_mutex_unlock(&thread_initexit_lock);
global_heap_free(
threads, num_threads * sizeof(thread_record_t *) HEAPACCT(ACCT_THREAD_MGT));
}
if (is_last_app_thread() && !dynamo_exited) {
LOG(THREAD, LOG_TOP | LOG_SYSCALLS, 1,
"SYS_exit%s(%d) in final thread " TIDFMT " of " PIDFMT
" => exiting DynamoRIO\n",
(dcontext->sys_num == SYSNUM_EXIT_PROCESS) ? "_group" : "",
MCXT_SYSNUM_REG(mc), d_r_get_thread_id(), get_process_id());
/* we want to clean up even if not automatic startup! */
automatic_startup = true;
exit_process = true;
} else {
LOG(THREAD, LOG_TOP | LOG_THREADS | LOG_SYSCALLS, 1,
"SYS_exit%s(%d) in thread " TIDFMT " of " PIDFMT " => cleaning up %s\n",
(dcontext->sys_num == SYSNUM_EXIT_PROCESS) ? "_group" : "",
MCXT_SYSNUM_REG(mc), d_r_get_thread_id(), get_process_id(),
exit_process ? "process" : "thread");
}
KSTOP(num_exits_dir_syscall);
block_cleanup_and_terminate(dcontext, MCXT_SYSNUM_REG(mc), sys_param(dcontext, 0),
sys_param(dcontext, 1), exit_process,
/* SYS_bsdthread_terminate has 2 more args */
sys_param(dcontext, 2), sys_param(dcontext, 3));
}
#if defined(LINUX) && defined(X86) /* XXX i#58: until we have Mac support */
static bool
os_set_app_thread_area(dcontext_t *dcontext, our_modify_ldt_t *user_desc)
{
# ifdef X86
int i;
os_thread_data_t *ostd = dcontext->os_field;
our_modify_ldt_t *desc = (our_modify_ldt_t *)ostd->app_thread_areas;
if (user_desc->seg_not_present == 1) {
/* find an empty one to update */
for (i = 0; i < GDT_NUM_TLS_SLOTS; i++) {
if (desc[i].seg_not_present == 1)
break;
}
if (i < GDT_NUM_TLS_SLOTS) {
user_desc->entry_number = GDT_SELECTOR(i + tls_min_index());
memcpy(&desc[i], user_desc, sizeof(*user_desc));
} else
return false;
} else {
/* If we used early injection, this might be ld.so trying to set up TLS. We
* direct the app to use the GDT entry we already set up for our private
* libraries, but only the first time it requests TLS.
*/
if (user_desc->entry_number == -1 && return_stolen_lib_tls_gdt) {
d_r_mutex_lock(&set_thread_area_lock);
if (return_stolen_lib_tls_gdt) {
uint selector = read_thread_register(LIB_SEG_TLS);
uint index = SELECTOR_INDEX(selector);
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
return_stolen_lib_tls_gdt = false;
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
user_desc->entry_number = index;
LOG(GLOBAL, LOG_THREADS, 2,
"%s: directing app to use "
"selector 0x%x for first call to set_thread_area\n",
__FUNCTION__, selector);
}
d_r_mutex_unlock(&set_thread_area_lock);
}
/* update the specific one */
i = user_desc->entry_number - tls_min_index();
if (i < 0 || i >= GDT_NUM_TLS_SLOTS)
return false;
LOG(GLOBAL, LOG_THREADS, 2,
"%s: change selector 0x%x base from " PFX " to " PFX "\n", __FUNCTION__,
GDT_SELECTOR(user_desc->entry_number), desc[i].base_addr,
user_desc->base_addr);
memcpy(&desc[i], user_desc, sizeof(*user_desc));
}
/* if not conflict with dr's tls, perform the syscall */
if (!INTERNAL_OPTION(private_loader) &&
GDT_SELECTOR(user_desc->entry_number) != read_thread_register(SEG_TLS) &&
GDT_SELECTOR(user_desc->entry_number) != read_thread_register(LIB_SEG_TLS))
return false;
# elif defined(ARM)
/* FIXME i#1551: NYI on ARM */
ASSERT_NOT_IMPLEMENTED(false);
# endif /* X86/ARM */
return true;
}
static bool
os_get_app_thread_area(dcontext_t *dcontext, our_modify_ldt_t *user_desc)
{
# ifdef X86
os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field;
our_modify_ldt_t *desc = (our_modify_ldt_t *)ostd->app_thread_areas;
int i = user_desc->entry_number - tls_min_index();
if (i < 0 || i >= GDT_NUM_TLS_SLOTS)
return false;
if (desc[i].seg_not_present == 1)
return false;
# elif defined(ARM)
/* FIXME i#1551: NYI on ARM */
ASSERT_NOT_IMPLEMENTED(false);
# endif /* X86/ARM */
return true;
}
#endif
/* This function is used for switch lib tls segment on creating thread.
* We switch to app's lib tls seg before thread creation system call, i.e.
* clone and vfork, and switch back to dr's lib tls seg after the system call.
* They are only called on parent thread, not the child thread.
* The child thread's tls is setup in os_tls_app_seg_init.
*/
/* XXX: It looks like the Linux kernel has some dependency on the segment
* descriptor. If using dr's segment descriptor, the created thread will have
* access violation for tls not being setup. However, it works fine if we switch
* the descriptor to app's segment descriptor before creating the thread.
* We should be able to remove this function later if we find the problem.
*/
static bool
os_switch_lib_tls(dcontext_t *dcontext, bool to_app)
{
return os_switch_seg_to_context(dcontext, LIB_SEG_TLS, to_app);
}
#ifdef X86
/* dcontext can be NULL if !to_app */
static bool
os_switch_seg_to_base(dcontext_t *dcontext, os_local_state_t *os_tls, reg_id_t seg,
bool to_app, app_pc base)
{
bool res = false;
ASSERT(dcontext != NULL);
ASSERT(IF_X86_ELSE((seg == SEG_FS || seg == SEG_GS),
(seg == DR_REG_TPIDRURW || DR_REG_TPIDRURO)));
switch (os_tls->tls_type) {
# if defined(X64) && !defined(MACOS)
case TLS_TYPE_ARCH_PRCTL: {
res = tls_set_fs_gs_segment_base(os_tls->tls_type, seg, base, NULL);
ASSERT(res);
LOG(GLOBAL, LOG_THREADS, 2,
"%s %s: arch_prctl successful for thread " TIDFMT " base " PFX "\n",
__FUNCTION__, to_app ? "to app" : "to DR", d_r_get_thread_id(), base);
if (seg == SEG_TLS && base == NULL) {
/* Set the selector to 0 so we don't think TLS is available. */
/* FIXME i#107: Still assumes app isn't using SEG_TLS. */
reg_t zero = 0;
WRITE_DR_SEG(zero);
}
break;
}
# endif
case TLS_TYPE_GDT: {
our_modify_ldt_t desc;
uint index;
uint selector;
if (to_app) {
selector = os_tls->app_lib_tls_reg;
index = SELECTOR_INDEX(selector);
} else {
index = (seg == LIB_SEG_TLS ? tls_priv_lib_index() : tls_dr_index());
ASSERT(index != -1 && "TLS indices not initialized");
selector = GDT_SELECTOR(index);
}
if (selector != 0) {
if (to_app) {
our_modify_ldt_t *areas =
((os_thread_data_t *)dcontext->os_field)->app_thread_areas;
ASSERT((index >= tls_min_index()) &&
((index - tls_min_index()) <= GDT_NUM_TLS_SLOTS));
desc = areas[index - tls_min_index()];
} else {
tls_init_descriptor(&desc, base, GDT_NO_SIZE_LIMIT, index);
}
res = tls_set_fs_gs_segment_base(os_tls->tls_type, seg, NULL, &desc);
ASSERT(res);
} else {
/* For a selector of zero, we just reset the segment to zero. We
* don't need to call set_thread_area.
*/
res = true; /* Indicate success. */
}
/* XXX i#2098: it's unsafe to call LOG here in between GDT and register changes */
/* i558 update lib seg reg to enforce the segment changes */
if (seg == SEG_TLS)
WRITE_DR_SEG(selector);
else
WRITE_LIB_SEG(selector);
LOG(THREAD, LOG_LOADER, 2, "%s: switching to %s, setting %s to 0x%x\n",
__FUNCTION__, (to_app ? "app" : "dr"), reg_names[seg], selector);
LOG(THREAD, LOG_LOADER, 2,
"%s %s: set_thread_area successful for thread " TIDFMT " base " PFX "\n",
__FUNCTION__, to_app ? "to app" : "to DR", d_r_get_thread_id(), base);
break;
}
case TLS_TYPE_LDT: {
uint index;
uint selector;
if (to_app) {
selector = os_tls->app_lib_tls_reg;
index = SELECTOR_INDEX(selector);
} else {
index = (seg == LIB_SEG_TLS ? tls_priv_lib_index() : tls_dr_index());
ASSERT(index != -1 && "TLS indices not initialized");
selector = LDT_SELECTOR(index);
}
LOG(THREAD, LOG_LOADER, 2, "%s: switching to %s, setting %s to 0x%x\n",
__FUNCTION__, (to_app ? "app" : "dr"), reg_names[seg], selector);
if (seg == SEG_TLS)
WRITE_DR_SEG(selector);
else
WRITE_LIB_SEG(selector);
LOG(THREAD, LOG_LOADER, 2,
"%s %s: ldt selector swap successful for thread " TIDFMT "\n", __FUNCTION__,
to_app ? "to app" : "to DR", d_r_get_thread_id());
break;
}
default: ASSERT_NOT_REACHED(); return false;
}
ASSERT((!to_app && seg == SEG_TLS) ||
BOOLS_MATCH(to_app, os_using_app_state(dcontext)));
return res;
}
static bool
os_set_dr_tls_base(dcontext_t *dcontext, os_local_state_t *tls, byte *base)
{
if (tls == NULL) {
ASSERT(dcontext != NULL);
tls = get_os_tls_from_dc(dcontext);
}
return os_switch_seg_to_base(dcontext, tls, SEG_TLS, false, base);
}
#endif /* X86 */
static bool
os_switch_seg_to_context(dcontext_t *dcontext, reg_id_t seg, bool to_app)
{
os_local_state_t *os_tls = get_os_tls_from_dc(dcontext);
#ifdef X86
app_pc base;
/* we can only update the executing thread's segment (i#920) */
ASSERT_MESSAGE(CHKLVL_ASSERTS + 1 /*expensive*/, "can only act on executing thread",
/* i#2089: a clone syscall, or when native, temporarily puts in
* invalid TLS, so we don't check get_thread_private_dcontext().
*/
is_thread_tls_allocated() &&
dcontext->owning_thread == get_sys_thread_id());
if (to_app) {
base = os_get_app_tls_base(dcontext, seg);
} else {
base = os_get_priv_tls_base(dcontext, seg);
}
return os_switch_seg_to_base(dcontext, os_tls, seg, to_app, base);
#elif defined(AARCHXX)
bool res = false;
os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field;
ASSERT(INTERNAL_OPTION(private_loader));
if (to_app) {
/* We need to handle being called when we're already in the requested state. */
ptr_uint_t cur_seg = read_thread_register(LIB_SEG_TLS);
if ((void *)cur_seg == os_tls->app_lib_tls_base)
return true;
bool app_mem_valid = true;
if (os_tls->app_lib_tls_base == NULL)
app_mem_valid = false;
else {
uint prot;
bool rc = get_memory_info(os_tls->app_lib_tls_base, NULL, NULL, &prot);
/* Rule out a garbage value, which happens in our own test
* common.allasm_aarch_isa.
* Also rule out an unwritable region, which seems to happen on arm
* where at process init the thread reg points at rodata in libc
* until properly set to a writable mmap later.
*/
if (!rc || !TESTALL(MEMPROT_READ | MEMPROT_WRITE, prot))
app_mem_valid = false;
}
if (!app_mem_valid) {
/* XXX i#1578: For pure-asm apps that do not use libc, the app may have no
* thread register value. For detach we would like to write a 0 back into
* the thread register, but it complicates our exit code, which wants access
* to DR's TLS between dynamo_thread_exit_common()'s call to
* dynamo_thread_not_under_dynamo() and its call to
* set_thread_private_dcontext(NULL). For now we just leave our privlib
* segment in there. It seems rather unlikely to cause a problem: app code
* is unlikely to read the thread register; it's going to assume it owns it
* and will just blindly write to it.
*/
return true;
}
/* On switching to app's TLS, we need put DR's TLS base into app's TLS
* at the same offset so it can be loaded on entering code cache.
* Otherwise, the context switch code on entering fcache will fault on
* accessing DR's TLS.
* The app's TLS slot value is stored into privlib's TLS slot for
* later restore on switching back to privlib's TLS.
*/
byte **priv_lib_tls_swap_slot =
(byte **)(ostd->priv_lib_tls_base + DR_TLS_BASE_OFFSET);
byte **app_lib_tls_swap_slot =
(byte **)(os_tls->app_lib_tls_base + DR_TLS_BASE_OFFSET);
LOG(THREAD, LOG_LOADER, 3,
"%s: switching to app: app slot=&" PFX " *" PFX ", priv slot=&" PFX " *" PFX
"\n",
__FUNCTION__, app_lib_tls_swap_slot, *app_lib_tls_swap_slot,
priv_lib_tls_swap_slot, *priv_lib_tls_swap_slot);
byte *dr_tls_base = *priv_lib_tls_swap_slot;
*priv_lib_tls_swap_slot = *app_lib_tls_swap_slot;
*app_lib_tls_swap_slot = dr_tls_base;
LOG(THREAD, LOG_LOADER, 2, "%s: switching to %s, setting coproc reg to 0x%x\n",
__FUNCTION__, (to_app ? "app" : "dr"), os_tls->app_lib_tls_base);
res = write_thread_register(os_tls->app_lib_tls_base);
} else {
/* We need to handle being called when we're already in the requested state. */
ptr_uint_t cur_seg = read_thread_register(LIB_SEG_TLS);
if ((void *)cur_seg == ostd->priv_lib_tls_base)
return true;
/* Restore the app's TLS slot that we used for storing DR's TLS base,
* and put DR's TLS base back to privlib's TLS slot.
*/
byte **priv_lib_tls_swap_slot =
(byte **)(ostd->priv_lib_tls_base + DR_TLS_BASE_OFFSET);
byte **app_lib_tls_swap_slot =
(byte **)(os_tls->app_lib_tls_base + DR_TLS_BASE_OFFSET);
byte *dr_tls_base = *app_lib_tls_swap_slot;
LOG(THREAD, LOG_LOADER, 3,
"%s: switching to DR: app slot=&" PFX " *" PFX ", priv slot=&" PFX " *" PFX
"\n",
__FUNCTION__, app_lib_tls_swap_slot, *app_lib_tls_swap_slot,
priv_lib_tls_swap_slot, *priv_lib_tls_swap_slot);
*app_lib_tls_swap_slot = *priv_lib_tls_swap_slot;
*priv_lib_tls_swap_slot = dr_tls_base;
LOG(THREAD, LOG_LOADER, 2, "%s: switching to %s, setting coproc reg to 0x%x\n",
__FUNCTION__, (to_app ? "app" : "dr"), ostd->priv_lib_tls_base);
res = write_thread_register(ostd->priv_lib_tls_base);
}
LOG(THREAD, LOG_LOADER, 2, "%s %s: set_tls swap success=%d for thread " TIDFMT "\n",
__FUNCTION__, to_app ? "to app" : "to DR", res, d_r_get_thread_id());
return res;
#endif /* X86/AARCHXX */
}
#ifdef LINUX
static bool
handle_clone_pre(dcontext_t *dcontext)
{
/* For the clone syscall, in /usr/src/linux/arch/i386/kernel/process.c
* 32-bit params: flags, newsp, ptid, tls, ctid
* 64-bit params: should be the same yet tls (for ARCH_SET_FS) is in r8?!?
* I don't see how sys_clone gets its special args: shouldn't it
* just get pt_regs as a "special system call"?
* sys_clone(unsigned long clone_flags, unsigned long newsp,
* void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
*/
uint64_t flags;
/* For the clone3 syscall, DR creates its own copy of clone_args for two
* reasons: to ensure that the app-provided clone_args is readable
* without any fault, and to avoid modifying the app's clone_args in the
* is_thread_create_syscall case (see below).
*/
clone3_syscall_args_t *dr_clone_args = NULL, *app_clone_args = NULL;
uint app_clone_args_size = 0;
if (dcontext->sys_num == SYS_clone3) {
if (is_clone3_enosys) {
/* We know that clone3 will return ENOSYS, so we skip the pre-syscall
* handling and fail early.
*/
LOG(THREAD, LOG_SYSCALLS, 2, "\treturning ENOSYS to app for clone3\n");
set_failure_return_val(dcontext, ENOSYS);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
return false;
}
app_clone_args_size =
(uint)sys_param(dcontext, SYSCALL_PARAM_CLONE3_CLONE_ARGS_SIZE);
if (app_clone_args_size < CLONE_ARGS_SIZE_VER0) {
LOG(THREAD, LOG_SYSCALLS, 2, "\treturning EINVAL to app for clone3\n");
set_failure_return_val(dcontext, EINVAL);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
return false;
}
app_clone_args =
(clone3_syscall_args_t *)sys_param(dcontext, SYSCALL_PARAM_CLONE3_CLONE_ARGS);
/* Note that the struct clone_args being used by the app may have
* less/more fields than DR's internal struct (clone3_syscall_args_t).
* For creating DR's copy of the app's clone_args object, we need to
* allocate as much space as specified by the app in the clone3
* syscall's args.
*/
dr_clone_args = (clone3_syscall_args_t *)heap_alloc(
dcontext, app_clone_args_size HEAPACCT(ACCT_OTHER));
if (!d_r_safe_read(app_clone_args, app_clone_args_size, dr_clone_args)) {
LOG(THREAD, LOG_SYSCALLS, 2, "\treturning EFAULT to app for clone3\n");
set_failure_return_val(dcontext, EFAULT);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
heap_free(dcontext, dr_clone_args, app_clone_args_size HEAPACCT(ACCT_OTHER));
return false;
}
flags = dr_clone_args->flags;
/* Save for post_system_call */
/* We need to save the pointer to the app's clone_args so that we can restore it
* post-syscall.
*/
dcontext->sys_param0 = (reg_t)app_clone_args;
/* For freeing the allocated memory. */
dcontext->sys_param1 = (reg_t)dr_clone_args;
dcontext->sys_param2 = (reg_t)app_clone_args_size;
/* clone3 flags are 64-bit even on 32-bit systems. So we need to split them across
* two reg_t vars on 32-bit. We do it on 64-bit systems as well for simpler code.
*/
dcontext->sys_param3 = (reg_t)(flags & CLONE3_FLAGS_4_BYTE_MASK);
ASSERT((flags >> 32 & ~CLONE3_FLAGS_4_BYTE_MASK) == 0);
dcontext->sys_param4 = (reg_t)((flags >> 32));
LOG(THREAD, LOG_SYSCALLS, 2,
"syscall: clone3 with args: flags = 0x" HEX64_FORMAT_STRING
", exit_signal = 0x" HEX64_FORMAT_STRING ", stack = 0x" HEX64_FORMAT_STRING
", stack_size = 0x" HEX64_FORMAT_STRING "\n",
dr_clone_args->flags, dr_clone_args->exit_signal, dr_clone_args->stack,
dr_clone_args->stack_size);
} else {
flags = (uint)sys_param(dcontext, 0);
/* Save for post_system_call.
* Unlike clone3, here the flags are 32-bit, so truncation is okay.
*/
dcontext->sys_param0 = (reg_t)flags;
LOG(THREAD, LOG_SYSCALLS, 2,
"syscall: clone with args: flags = " PFX ", stack = " PFX
", tid_field_parent = " PFX ", tid_field_child = " PFX ", thread_ptr = " PFX
"\n",
sys_param(dcontext, 0), sys_param(dcontext, 1), sys_param(dcontext, 2),
sys_param(dcontext, 3), sys_param(dcontext, 4));
}
handle_clone(dcontext, flags);
if ((flags & CLONE_VM) == 0) {
LOG(THREAD, LOG_SYSCALLS, 1, "\tWARNING: CLONE_VM not set!\n");
}
/* i#1010: If we have private fds open (usually logfiles), we should
* clean those up before they get reused by a new thread.
* XXX: Ideally we'd do this in fd_table_add(), but we can't acquire
* thread_initexit_lock there.
*/
cleanup_after_vfork_execve(dcontext);
/* For thread creation clone syscalls a clone_record_t structure
* containing the pc after the app's syscall instr and other data
* (see i#27) is placed at the bottom of the dstack (which is allocated
* by create_clone_record() - it also saves app stack and switches
* to dstack). xref i#149/PR 403015.
* Note: This must be done after sys_param0 is set.
*/
if (is_thread_create_syscall(dcontext, dr_clone_args)) {
if (dcontext->sys_num == SYS_clone3) {
/* create_clone_record modifies some fields in clone_args for the
* clone3 syscall. Instead of reusing the app's copy of
* clone_args and modifying it, we choose to use our own copy.
* Under CLONE_VM, the parent and child threads have a pointer to
* the same app clone_args. By using our own copy of clone_args
* for the syscall, we obviate the need to restore the modified
* fields in the app's copy after the syscall in either the parent
* or the child thread, which can be racy under CLONE_VM as the
* parent and/or child threads may need to access/modify it. By
* using a copy instead, both parent and child threads only
* need to restore their own SYSCALL_PARAM_CLONE3_CLONE_ARGS reg
* to the pointer to the app's clone_args. It is saved in the
* clone record for the child thread, and in sys_param0 for the
* parent thread. The DR copy of clone_args is freed by the parent
* thread in the post-syscall handling of clone3; as it is used
* only by the parent thread, there is no use-after-free danger here.
*/
ASSERT(app_clone_args != NULL && dr_clone_args != NULL);
*sys_param_addr(dcontext, SYSCALL_PARAM_CLONE3_CLONE_ARGS) =
(reg_t)dr_clone_args;
/* The pointer to the app's clone_args was saved in sys_param0 above. */
create_clone_record(dcontext, NULL, dr_clone_args, app_clone_args);
} else {
/* We replace the app-provided stack pointer with our own stack
* pointer in create_clone_record. Save the original pointer so
* that we can restore it post-syscall in the parent. The same is
* restored in the child in restore_clone_param_from_clone_record.
*/
dcontext->sys_param1 = sys_param(dcontext, SYSCALL_PARAM_CLONE_STACK);
create_clone_record(dcontext,
sys_param_addr(dcontext, SYSCALL_PARAM_CLONE_STACK), NULL,
NULL);
}
os_clone_pre(dcontext);
os_new_thread_pre();
} else {
/* This is really a fork. */
if (dcontext->sys_num == SYS_clone3) {
/* We free this memory before the actual fork, to avoid having to free
* it in the parent *and* the child later.
*/
ASSERT(app_clone_args_size == (uint)dcontext->sys_param2);
ASSERT(dr_clone_args == (clone3_syscall_args_t *)dcontext->sys_param1);
heap_free(dcontext, dr_clone_args, app_clone_args_size HEAPACCT(ACCT_OTHER));
/* We do not need these anymore for the fork case. */
dcontext->sys_param1 = 0;
dcontext->sys_param2 = 0;
}
os_fork_pre(dcontext);
}
return true;
}
#endif
/* System call interception: put any special handling here
* Arguments come from the pusha right before the call
*/
/* WARNING: flush_fragments_and_remove_region assumes that pre and post system
* call handlers do not examine or modify fcache or its fragments in any
* way except for calling flush_fragments_and_remove_region!
*/
/* WARNING: All registers are IN values, but NOT OUT values --
* must set mcontext's register for that.
*/
/* Returns false if system call should NOT be executed (in which case,
* post_system_call() will *not* be called!).
* Returns true if system call should go ahead
*/
/* XXX: split out specific handlers into separate routines
*/
bool
pre_system_call(dcontext_t *dcontext)
{
priv_mcontext_t *mc = get_mcontext(dcontext);
bool execute_syscall = true;
dr_where_am_i_t old_whereami = dcontext->whereami;
dcontext->whereami = DR_WHERE_SYSCALL_HANDLER;
/* FIXME We haven't yet done the work to detect which syscalls we
* can determine a priori will fail. Once we do, we will set the
* expect_last_syscall_to_fail to true for those case, and can
* confirm in post_system_call() that the syscall failed as
* expected.
*/
DODEBUG(dcontext->expect_last_syscall_to_fail = false;);
/* save key register values for post_system_call (they get clobbered
* in syscall itself)
*/
dcontext->sys_num = os_normalized_sysnum((int)MCXT_SYSNUM_REG(mc), NULL, dcontext);
RSTATS_INC(pre_syscall);
DOSTATS({
if (ignorable_system_call_normalized(dcontext->sys_num))
STATS_INC(pre_syscall_ignorable);
});
LOG(THREAD, LOG_SYSCALLS, 2, "system call %d\n", dcontext->sys_num);
#if defined(LINUX) && defined(X86)
/* PR 313715: If we fail to hook the vsyscall page (xref PR 212570, PR 288330)
* we fall back on int, but we have to tweak syscall param #5 (ebp)
* Once we have PR 288330 we can remove this.
*/
if (should_syscall_method_be_sysenter() && !dcontext->sys_was_int) {
dcontext->sys_xbp = mc->xbp;
/* not using SAFE_READ due to performance concerns (we do this for
* every single system call on systems where we can't hook vsyscall!)
*/
TRY_EXCEPT(dcontext, /* try */ { mc->xbp = *(reg_t *)mc->xsp; }, /* except */
{
ASSERT_NOT_REACHED();
mc->xbp = 0;
});
}
#endif
switch (dcontext->sys_num) {
case SYSNUM_EXIT_PROCESS:
#if defined(LINUX) && VMX86_SERVER
if (os_in_vmkernel_32bit()) {
/* on esx 3.5 => ENOSYS, so wait for SYS_exit */
LOG(THREAD, LOG_SYSCALLS, 2, "on esx35 => ignoring exitgroup\n");
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
break;
}
#endif
/* fall-through */
case SYSNUM_EXIT_THREAD: {
handle_exit(dcontext);
break;
}
/****************************************************************************/
/* MEMORY REGIONS */
#if defined(LINUX) && !defined(X64) && !defined(ARM)
case SYS_mmap: {
/* in /usr/src/linux/arch/i386/kernel/sys_i386.c:
asmlinkage int old_mmap(struct mmap_arg_struct_t *arg)
*/
mmap_arg_struct_t *arg = (mmap_arg_struct_t *)sys_param(dcontext, 0);
mmap_arg_struct_t arg_buf;
if (d_r_safe_read(arg, sizeof(mmap_arg_struct_t), &arg_buf)) {
void *addr = (void *)arg->addr;
size_t len = (size_t)arg->len;
uint prot = (uint)arg->prot;
LOG(THREAD, LOG_SYSCALLS, 2,
"syscall: mmap addr=" PFX " size=" PIFX " prot=0x%x"
" flags=" PIFX " offset=" PIFX " fd=%d\n",
addr, len, prot, arg->flags, arg->offset, arg->fd);
/* Check for overlap with existing code or patch-proof regions */
if (addr != NULL &&
!app_memory_pre_alloc(dcontext, addr, len, osprot_to_memprot(prot),
!TEST(MAP_FIXED, arg->flags),
false /*we'll update in post*/,
false /*unknown*/)) {
/* Rather than failing or skipping the syscall we'd like to just
* remove the hint -- but we don't want to write to app memory, so
* we do fail. We could set up our own mmap_arg_struct_t but
* we'd need dedicate per-thread storage, and SYS_mmap is obsolete.
*/
execute_syscall = false;
set_failure_return_val(dcontext, ENOMEM);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
break;
}
}
/* post_system_call does the work */
dcontext->sys_param0 = (reg_t)arg;
break;
}
#endif
case IF_MACOS_ELSE(SYS_mmap, IF_X64_ELSE(SYS_mmap, SYS_mmap2)): {
/* in /usr/src/linux/arch/i386/kernel/sys_i386.c:
asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long pgoff)
*/
void *addr = (void *)sys_param(dcontext, 0);
size_t len = (size_t)sys_param(dcontext, 1);
uint prot = (uint)sys_param(dcontext, 2);
uint flags = (uint)sys_param(dcontext, 3);
LOG(THREAD, LOG_SYSCALLS, 2,
"syscall: mmap2 addr=" PFX " size=" PIFX " prot=0x%x"
" flags=" PIFX " offset=" PIFX " fd=%d\n",
addr, len, prot, flags, sys_param(dcontext, 5), sys_param(dcontext, 4));
/* Check for overlap with existing code or patch-proof regions */
/* Try to see whether it's an image, though we can't tell for addr==NULL
* (typical for 1st mmap).
*/
bool image = addr != NULL && !TEST(MAP_ANONYMOUS, flags) &&
mmap_check_for_module_overlap(addr, len, TEST(PROT_READ, prot), 0, true);
if (addr != NULL &&
!app_memory_pre_alloc(dcontext, addr, len, osprot_to_memprot(prot),
!TEST(MAP_FIXED, flags), false /*we'll update in post*/,
image /*best estimate*/)) {
if (!TEST(MAP_FIXED, flags)) {
/* Rather than failing or skipping the syscall we just remove
* the hint which should eliminate any overlap.
*/
*sys_param_addr(dcontext, 0) = 0;
} else {
execute_syscall = false;
set_failure_return_val(dcontext, ENOMEM);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
break;
}
}
/* post_system_call does the work */
dcontext->sys_param0 = (reg_t)addr;
dcontext->sys_param1 = len;
dcontext->sys_param2 = prot;
dcontext->sys_param3 = flags;
break;
}
/* must flush stale fragments when we see munmap/mremap */
case SYS_munmap: {
/* in /usr/src/linux/mm/mmap.c:
asmlinkage long sys_munmap(unsigned long addr, uint len)
*/
app_pc addr = (void *)sys_param(dcontext, 0);
size_t len = (size_t)sys_param(dcontext, 1);
LOG(THREAD, LOG_SYSCALLS, 2, "syscall: munmap addr=" PFX " size=" PFX "\n", addr,
len);
RSTATS_INC(num_app_munmaps);
/* FIXME addr is supposed to be on a page boundary so we
* could detect that condition here and set
* expect_last_syscall_to_fail.
*/
/* save params in case an undo is needed in post_system_call */
dcontext->sys_param0 = (reg_t)addr;
dcontext->sys_param1 = len;
/* We assume that the unmap will succeed and so are conservative
* and remove the region from exec areas and flush all fragments
* prior to issuing the syscall. If the unmap fails, we try to
* recover in post_system_call() by re-adding the region. This
* approach has its shortcomings -- see comments below in
* post_system_call().
*/
/* Check for unmapping a module. */
os_get_module_info_lock();
if (module_overlaps(addr, len)) {
/* FIXME - handle unmapping more than one module at once, or only unmapping
* part of a module (for which case should adjust view size? or treat as full
* unmap?). Theoretical for now as we haven't seen this. */
module_area_t *ma = module_pc_lookup(addr);
ASSERT_CURIOSITY(ma != NULL);
ASSERT_CURIOSITY(addr == ma->start);
/* XREF 307599 on rounding module end to the next PAGE boundary */
ASSERT_CURIOSITY((app_pc)ALIGN_FORWARD(addr + len, PAGE_SIZE) == ma->end);
os_get_module_info_unlock();
/* i#210:
* we only think a module is removed if its first memory region
* is unloaded (unmapped).
* XREF i#160 to fix the real problem of handling module splitting.
*/
if (ma != NULL && ma->start == addr)
module_list_remove(addr, ALIGN_FORWARD(len, PAGE_SIZE));
} else
os_get_module_info_unlock();
app_memory_deallocation(dcontext, (app_pc)addr, len,
false /* don't own thread_initexit_lock */,
true /* image, FIXME: though not necessarily */);
/* FIXME: case 4983 use is_elf_so_header() */
#ifndef HAVE_MEMINFO_QUERY
memcache_lock();
memcache_remove(addr, addr + len);
memcache_unlock();
#endif
break;
}
#ifdef LINUX
case SYS_mremap: {
/* in /usr/src/linux/mm/mmap.c:
asmlinkage unsigned long sys_mremap(unsigned long addr,
unsigned long old_len, unsigned long new_len,
unsigned long flags, unsigned long new_addr)
*/
dr_mem_info_t info;
app_pc addr = (void *)sys_param(dcontext, 0);
size_t old_len = (size_t)sys_param(dcontext, 1);
size_t new_len = (size_t)sys_param(dcontext, 2);
DEBUG_DECLARE(bool ok;)
LOG(THREAD, LOG_SYSCALLS, 2, "syscall: mremap addr=" PFX " size=" PFX "\n", addr,
old_len);
/* post_system_call does the work */
dcontext->sys_param0 = (reg_t)addr;
dcontext->sys_param1 = old_len;
dcontext->sys_param2 = new_len;
/* i#173
* we need memory type and prot to set the
* new memory region in the post_system_call
*/
DEBUG_DECLARE(ok =)
query_memory_ex(addr, &info);
ASSERT(ok);
dcontext->sys_param3 = info.prot;
dcontext->sys_param4 = info.type;
DOCHECK(1, {
/* we don't expect to see remappings of modules */
os_get_module_info_lock();
ASSERT_CURIOSITY(!module_overlaps(addr, old_len));
os_get_module_info_unlock();
});
break;
}
#endif
case SYS_mprotect: {
/* in /usr/src/linux/mm/mprotect.c:
asmlinkage long sys_mprotect(unsigned long start, uint len,
unsigned long prot)
*/
uint res;
DEBUG_DECLARE(size_t size;)
app_pc addr = (void *)sys_param(dcontext, 0);
size_t len = (size_t)sys_param(dcontext, 1);
uint prot = (uint)sys_param(dcontext, 2);
uint old_memprot = MEMPROT_NONE, new_memprot;
bool exists = true;
/* save params in case an undo is needed in post_system_call */
dcontext->sys_param0 = (reg_t)addr;
dcontext->sys_param1 = len;
dcontext->sys_param2 = prot;
LOG(THREAD, LOG_SYSCALLS, 2,
"syscall: mprotect addr=" PFX " size=" PFX " prot=%s\n", addr, len,
memprot_string(osprot_to_memprot(prot)));
if (!get_memory_info(addr, NULL, IF_DEBUG_ELSE(&size, NULL), &old_memprot)) {
exists = false;
/* Xref PR 413109, PR 410921: if the start, or any page, is not mapped,
* this should fail with ENOMEM. We used to force-fail it to avoid
* asserts in our own allmem update code, but there are cases where a
* seemingly unmapped page succeeds (i#1912: next page of grows-down
* initial stack). Thus we let it go through.
*/
LOG(THREAD, LOG_SYSCALLS, 2,
"\t" PFX " isn't mapped: probably mprotect will fail\n", addr);
} else {
/* If mprotect region spans beyond the end of the vmarea then it
* spans 2 or more vmareas with dissimilar protection (xref
* PR 410921) or has unallocated regions in between (PR 413109).
*/
DOCHECK(1, dcontext->mprot_multi_areas = len > size ? true : false;);
}
new_memprot = osprot_to_memprot(prot) |
/* mprotect won't change meta flags */
(old_memprot & MEMPROT_META_FLAGS);
res = app_memory_protection_change(dcontext, addr, len, new_memprot, &new_memprot,
NULL, false /*!image*/);
if (res != DO_APP_MEM_PROT_CHANGE) {
if (res == FAIL_APP_MEM_PROT_CHANGE) {
ASSERT_NOT_IMPLEMENTED(false); /* return code? */
} else {
ASSERT_NOT_IMPLEMENTED(res != SUBSET_APP_MEM_PROT_CHANGE);
ASSERT_NOT_REACHED();
}
execute_syscall = false;
} else {
/* FIXME Store state for undo if the syscall fails. */
IF_NO_MEMQUERY(memcache_update_locked(addr, addr + len, new_memprot,
-1 /*type unchanged*/, exists));
}
break;
}
#ifdef ANDROID
case SYS_prctl:
dcontext->sys_param0 = sys_param(dcontext, 0);
dcontext->sys_param1 = sys_param(dcontext, 1);
dcontext->sys_param2 = sys_param(dcontext, 2);
dcontext->sys_param3 = sys_param(dcontext, 3);
dcontext->sys_param4 = sys_param(dcontext, 4);
break;
#endif
#ifdef LINUX
case SYS_brk: {
if (DYNAMO_OPTION(emulate_brk)) {
/* i#1004: emulate brk via a separate mmap */
byte *new_val = (byte *)sys_param(dcontext, 0);
byte *res = emulate_app_brk(dcontext, new_val);
execute_syscall = false;
/* SYS_brk returns old brk on failure */
set_success_return_val(dcontext, (reg_t)res);
} else {
/* i#91/PR 396352: need to watch SYS_brk to maintain all_memory_areas.
* We store the old break in the param1 slot.
*/
DODEBUG(dcontext->sys_param0 = (reg_t)sys_param(dcontext, 0););
dcontext->sys_param1 = dynamorio_syscall(SYS_brk, 1, 0);
}
break;
}
# ifdef SYS_uselib
case SYS_uselib: {
/* Used to get the kernel to load a share library (legacy system call).
* Was primarily used when statically linking to dynamically loaded shared
* libraries that were loaded at known locations. Shouldn't be used by
* applications using the dynamic loader (ld) which is currently the only
* way we can inject so we don't expect to see this. PR 307621. */
ASSERT_NOT_IMPLEMENTED(false);
break;
}
# endif
#endif
/****************************************************************************/
/* SPAWNING */
#ifdef LINUX
case SYS_clone3:
case SYS_clone: execute_syscall = handle_clone_pre(dcontext); break;
#elif defined(MACOS)
case SYS_bsdthread_create: {
/* XXX i#1403: we need earlier injection to intercept
* bsdthread_register in order to capture workqueue threads.
* For now we settle for intercepting bsd threads at the user thread func.
* We miss a little user-mode code but this is enough to get started.
*/
app_pc func = (app_pc)sys_param(dcontext, 0);
void *func_arg = (void *)sys_param(dcontext, 1);
void *clone_rec;
LOG(THREAD, LOG_SYSCALLS, 1,
"bsdthread_create: thread func " PFX ", arg " PFX "\n", func, func_arg);
handle_clone(dcontext, CLONE_THREAD | CLONE_VM | CLONE_SIGHAND | SIGCHLD);
clone_rec = create_clone_record(dcontext, NULL, func, func_arg);
dcontext->sys_param0 = (reg_t)func;
dcontext->sys_param1 = (reg_t)func_arg;
*sys_param_addr(dcontext, 0) = (reg_t)new_bsdthread_intercept;
*sys_param_addr(dcontext, 1) = (reg_t)clone_rec;
os_new_thread_pre();
break;
}
case SYS_posix_spawn: {
/* FIXME i#1644: monitor this call which can be fork or exec */
ASSERT_NOT_IMPLEMENTED(false);
break;
}
#endif
#ifdef SYS_vfork
case SYS_vfork: {
/* treat as if sys_clone with flags just as sys_vfork does */
/* in /usr/src/linux/arch/i386/kernel/process.c */
uint flags = CLONE_VFORK | CLONE_VM | SIGCHLD;
LOG(THREAD, LOG_SYSCALLS, 2, "syscall: vfork\n");
handle_clone(dcontext, flags);
cleanup_after_vfork_execve(dcontext);
/* save for post_system_call, treated as if SYS_clone */
dcontext->sys_param0 = (reg_t)flags;
/* vfork has the same needs as clone. Pass info via a clone_record_t
* structure to child. See SYS_clone for info about i#149/PR 403015.
*/
IF_LINUX(ASSERT(is_thread_create_syscall(dcontext, NULL)));
dcontext->sys_param1 = mc->xsp; /* for restoring in parent */
# ifdef MACOS
create_clone_record(dcontext, (reg_t *)&mc->xsp, NULL, NULL);
# else
create_clone_record(dcontext, (reg_t *)&mc->xsp /*child uses parent sp*/, NULL,
NULL);
# endif
os_clone_pre(dcontext);
os_new_thread_pre();
break;
}
#endif
#ifdef SYS_fork
case SYS_fork: {
LOG(THREAD, LOG_SYSCALLS, 2, "syscall: fork\n");
os_fork_pre(dcontext);
break;
}
#endif
case SYS_execve: {
int ret = handle_execve(dcontext);
if (ret != 0) {
execute_syscall = false;
set_failure_return_val(dcontext, ret);
}
break;
}
/****************************************************************************/
/* SIGNALS */
case IF_MACOS_ELSE(SYS_sigaction, SYS_rt_sigaction): { /* 174 */
/* in /usr/src/linux/kernel/signal.c:
asmlinkage long
sys_rt_sigaction(int sig, const struct sigaction *act,
struct sigaction *oact, size_t sigsetsize)
*/
int sig = (int)sys_param(dcontext, 0);
const kernel_sigaction_t *act =
(const kernel_sigaction_t *)sys_param(dcontext, 1);
prev_sigaction_t *oact = (prev_sigaction_t *)sys_param(dcontext, 2);
size_t sigsetsize = (size_t)
/* On Mac there is no size arg (but it doesn't use old sigaction, so
* closer to rt_ than non-rt_ below).
*/
IF_MACOS_ELSE(sizeof(kernel_sigset_t), sys_param(dcontext, 3));
uint res;
LOG(THREAD, LOG_SYSCALLS, 2, "syscall: %ssigaction %d " PFX " " PFX " %d\n",
IF_MACOS_ELSE("", "rt_"), sig, act, oact, sigsetsize);
/* post_syscall does some work as well */
dcontext->sys_param0 = (reg_t)sig;
dcontext->sys_param1 = (reg_t)act;
dcontext->sys_param2 = (reg_t)oact;
dcontext->sys_param3 = (reg_t)sigsetsize;
execute_syscall = handle_sigaction(dcontext, sig, act, oact, sigsetsize, &res);
if (!execute_syscall) {
LOG(THREAD, LOG_SYSCALLS, 2, "sigaction emulation => %d\n", -res);
if (res == 0)
set_success_return_val(dcontext, 0);
else
set_failure_return_val(dcontext, res);
}
break;
}
#if defined(LINUX) && !defined(X64)
case SYS_sigaction: { /* 67 */
/* sys_sigaction(int sig, const struct old_sigaction *act,
* struct old_sigaction *oact)
*/
int sig = (int)sys_param(dcontext, 0);
const old_sigaction_t *act = (const old_sigaction_t *)sys_param(dcontext, 1);
old_sigaction_t *oact = (old_sigaction_t *)sys_param(dcontext, 2);
uint res;
LOG(THREAD, LOG_SYSCALLS, 2, "syscall: sigaction %d " PFX " " PFX "\n", sig, act,
oact);
dcontext->sys_param0 = (reg_t)sig;
dcontext->sys_param1 = (reg_t)act;
dcontext->sys_param2 = (reg_t)oact;
execute_syscall = handle_old_sigaction(dcontext, sig, act, oact, &res);
if (!execute_syscall) {
LOG(THREAD, LOG_SYSCALLS, 2, "sigaction emulation => %d\n", -res);
if (res == 0)
set_success_return_val(dcontext, 0);
else
set_failure_return_val(dcontext, res);
}
break;
}
#endif
#if defined(LINUX) && !defined(X64)
case SYS_sigreturn: { /* 119 */
/* in /usr/src/linux/arch/i386/kernel/signal.c:
asmlinkage int sys_sigreturn(unsigned long __unused)
*/
execute_syscall = handle_sigreturn(dcontext, false);
/* app will not expect syscall to return, so when handle_sigreturn
* returns false it always redirects the context, and thus no
* need to set return val here.
*/
break;
}
#endif
#ifdef LINUX
case SYS_rt_sigreturn: { /* 173 */
/* in /usr/src/linux/arch/i386/kernel/signal.c:
asmlinkage int sys_rt_sigreturn(unsigned long __unused)
*/
execute_syscall = handle_sigreturn(dcontext, true);
/* see comment for SYS_sigreturn on return val */
break;
}
#endif
#ifdef MACOS
case SYS_sigreturn: {
/* int sigreturn(struct ucontext *uctx, int infostyle) */
execute_syscall = handle_sigreturn(dcontext, (void *)sys_param(dcontext, 0),
(int)sys_param(dcontext, 1));
/* see comment for SYS_sigreturn on return val */
break;
}
#endif
case SYS_sigaltstack: { /* 186 */
/* in /usr/src/linux/arch/i386/kernel/signal.c:
asmlinkage int
sys_sigaltstack(const stack_t *uss, stack_t *uoss)
*/
const stack_t *uss = (const stack_t *)sys_param(dcontext, 0);
stack_t *uoss = (stack_t *)sys_param(dcontext, 1);
uint res;
LOG(THREAD, LOG_SYSCALLS, 2, "syscall: sigaltstack " PFX " " PFX "\n", uss, uoss);
execute_syscall =
handle_sigaltstack(dcontext, uss, uoss, get_mcontext(dcontext)->xsp, &res);
if (!execute_syscall) {
LOG(THREAD, LOG_SYSCALLS, 2, "sigaltstack emulation => %d\n", -res);
if (res == 0)
set_success_return_val(dcontext, res);
else
set_failure_return_val(dcontext, res);
}
break;
}
case IF_MACOS_ELSE(SYS_sigprocmask, SYS_rt_sigprocmask): { /* 175 */
/* in /usr/src/linux/kernel/signal.c:
asmlinkage long
sys_rt_sigprocmask(int how, sigset_t *set, sigset_t *oset,
size_t sigsetsize)
*/
/* we also need access to the params in post_system_call */
dcontext->sys_param0 = sys_param(dcontext, 0);
dcontext->sys_param1 = sys_param(dcontext, 1);
dcontext->sys_param2 = sys_param(dcontext, 2);
dcontext->sys_param3 = sys_param(dcontext, 3);
execute_syscall = handle_sigprocmask(dcontext, (int)sys_param(dcontext, 0),
(kernel_sigset_t *)sys_param(dcontext, 1),
(kernel_sigset_t *)sys_param(dcontext, 2),
(size_t)sys_param(dcontext, 3));
if (!execute_syscall)
set_success_return_val(dcontext, 0);
break;
}
#ifdef MACOS
case SYS_sigsuspend_nocancel:
#endif
case IF_MACOS_ELSE(SYS_sigsuspend, SYS_rt_sigsuspend): { /* 179 */
/* in /usr/src/linux/kernel/signal.c:
asmlinkage int
sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize)
*/
handle_sigsuspend(dcontext, (kernel_sigset_t *)sys_param(dcontext, 0),
(size_t)sys_param(dcontext, 1));
break;
}
#ifdef LINUX
# ifdef SYS_signalfd
case SYS_signalfd: /* 282/321 */
# endif
case SYS_signalfd4: { /* 289 */
/* int signalfd (int fd, const sigset_t *mask, size_t sizemask) */
/* int signalfd4(int fd, const sigset_t *mask, size_t sizemask, int flags) */
ptr_int_t new_result;
dcontext->sys_param0 = sys_param(dcontext, 0);
dcontext->sys_param1 = sys_param(dcontext, 1);
dcontext->sys_param2 = sys_param(dcontext, 2);
# ifdef SYS_signalfd
if (dcontext->sys_num == SYS_signalfd)
dcontext->sys_param3 = 0;
else
# endif
dcontext->sys_param3 = sys_param(dcontext, 3);
new_result = handle_pre_signalfd(
dcontext, (int)dcontext->sys_param0, (kernel_sigset_t *)dcontext->sys_param1,
(size_t)dcontext->sys_param2, (int)dcontext->sys_param3);
execute_syscall = false;
/* since non-Mac, we can use this even if the call failed */
set_success_return_val(dcontext, new_result);
break;
}
#endif
case SYS_kill: { /* 37 */
/* in /usr/src/linux/kernel/signal.c:
* asmlinkage long sys_kill(int pid, int sig)
*/
pid_t pid = (pid_t)sys_param(dcontext, 0);
uint sig = (uint)sys_param(dcontext, 1);
LOG(GLOBAL, LOG_TOP | LOG_SYSCALLS, 2,
"thread " TIDFMT " sending signal %d to pid " PIDFMT "\n",
d_r_get_thread_id(), sig, pid);
/* We check whether targeting this process or this process group */
if (pid == get_process_id() || pid == 0 || pid == -get_process_group_id()) {
handle_self_signal(dcontext, sig);
}
break;
}
#if defined(SYS_tkill)
case SYS_tkill: { /* 238 */
/* in /usr/src/linux/kernel/signal.c:
* asmlinkage long sys_tkill(int pid, int sig)
*/
pid_t tid = (pid_t)sys_param(dcontext, 0);
uint sig = (uint)sys_param(dcontext, 1);
LOG(GLOBAL, LOG_TOP | LOG_SYSCALLS, 2,
"thread " TIDFMT " sending signal %d to tid %d\n", d_r_get_thread_id(), sig,
tid);
if (tid == d_r_get_thread_id()) {
handle_self_signal(dcontext, sig);
}
break;
}
#endif
#if defined(SYS_tgkill)
case SYS_tgkill: { /* 270 */
/* in /usr/src/linux/kernel/signal.c:
* asmlinkage long sys_tgkill(int tgid, int pid, int sig)
*/
pid_t tgid = (pid_t)sys_param(dcontext, 0);
pid_t tid = (pid_t)sys_param(dcontext, 1);
uint sig = (uint)sys_param(dcontext, 2);
LOG(GLOBAL, LOG_TOP | LOG_SYSCALLS, 2,
"thread " TIDFMT " sending signal %d to tid %d tgid %d\n",
d_r_get_thread_id(), sig, tid, tgid);
/* some kernels support -1 values:
+ tgkill(-1, tid, sig) == tkill(tid, sig)
* tgkill(tgid, -1, sig) == kill(tgid, sig)
* the 2nd was proposed but is not in 2.6.20 so I'm ignoring it, since
* I don't want to kill the thread when the signal is never sent!
* FIXME: the 1st is in my tkill manpage, but not my 2.6.20 kernel sources!
*/
if ((tgid == -1 || tgid == get_process_id()) && tid == d_r_get_thread_id()) {
handle_self_signal(dcontext, sig);
}
break;
}
#endif
case SYS_setitimer: /* 104 */
dcontext->sys_param0 = sys_param(dcontext, 0);
dcontext->sys_param1 = sys_param(dcontext, 1);
dcontext->sys_param2 = sys_param(dcontext, 2);
handle_pre_setitimer(dcontext, (int)sys_param(dcontext, 0),
(const struct itimerval *)sys_param(dcontext, 1),
(struct itimerval *)sys_param(dcontext, 2));
break;
case SYS_getitimer: /* 105 */
dcontext->sys_param0 = sys_param(dcontext, 0);
dcontext->sys_param1 = sys_param(dcontext, 1);
break;
#if defined(LINUX) && defined(X86)
case SYS_alarm: /* 27 on x86 and 37 on x64 */
dcontext->sys_param0 = sys_param(dcontext, 0);
handle_pre_alarm(dcontext, (unsigned int)dcontext->sys_param0);
break;
#endif
#if 0
# ifndef X64
case SYS_signal: { /* 48 */
/* in /usr/src/linux/kernel/signal.c:
asmlinkage unsigned long
sys_signal(int sig, __sighandler_t handler)
*/
break;
}
case SYS_sigsuspend: { /* 72 */
/* in /usr/src/linux/arch/i386/kernel/signal.c:
asmlinkage int
sys_sigsuspend(int history0, int history1, old_sigset_t mask)
*/
break;
}
case SYS_sigprocmask: { /* 126 */
/* in /usr/src/linux/kernel/signal.c:
asmlinkage long
sys_sigprocmask(int how, old_sigset_t *set, old_sigset_t *oset)
*/
break;
}
# endif
#else
/* until we've implemented them, keep down here to get warning: */
# if defined(LINUX) && !defined(X64)
# ifndef ARM
case SYS_signal:
# endif
case SYS_sigsuspend:
case SYS_sigprocmask:
# endif
#endif
#if defined(LINUX) && !defined(X64)
case SYS_sigpending: /* 73 */
# ifndef ARM
case SYS_sgetmask: /* 68 */
case SYS_ssetmask: /* 69 */
# endif
#endif
#ifdef LINUX
# ifdef SYS_rt_sigtimedwait_time64
case SYS_rt_sigtimedwait_time64: /* 421 */
# endif
case SYS_rt_sigtimedwait: /* 177 */
case SYS_rt_sigqueueinfo: /* 178 */
#endif
case IF_MACOS_ELSE(SYS_sigpending, SYS_rt_sigpending): { /* 176 */
/* FIXME i#92: handle all of these syscalls! */
LOG(THREAD, LOG_ASYNCH | LOG_SYSCALLS, 1,
"WARNING: unhandled signal system call %d\n", dcontext->sys_num);
SYSLOG_INTERNAL_WARNING_ONCE("unhandled signal system call %d",
dcontext->sys_num);
break;
}
#ifdef LINUX
# ifdef SYS_ppoll_time64
case SYS_ppoll_time64:
# endif
case SYS_ppoll: {
kernel_sigset_t *sigmask = (kernel_sigset_t *)sys_param(dcontext, 3);
dcontext->sys_param3 = (reg_t)sigmask;
if (sigmask == NULL)
break;
size_t sizemask = (size_t)sys_param(dcontext, 4);
/* The original app's sigmask parameter is now NULL effectively making the syscall
* a non p* version, and the mask's semantics are emulated by DR instead.
*/
set_syscall_param(dcontext, 3, (reg_t)NULL);
bool sig_pending = false;
if (!handle_pre_extended_syscall_sigmasks(dcontext, sigmask, sizemask,
&sig_pending)) {
/* In old kernels with sizeof(kernel_sigset_t) != sizemask, we're forcing
* failure. We're already violating app transparency in other places in DR.
*/
set_failure_return_val(dcontext, EINVAL);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
execute_syscall = false;
}
if (sig_pending) {
/* If there had been pending signals, we revert re-writing the app's
* parameter, but we leave the modified signal mask.
*/
set_syscall_param(dcontext, 3, dcontext->sys_param3);
set_failure_return_val(dcontext, EINTR);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
execute_syscall = false;
}
break;
}
# ifdef SYS_pselect6_time64
case SYS_pselect6_time64:
# endif
case SYS_pselect6: {
typedef struct {
kernel_sigset_t *sigmask;
size_t sizemask;
} data_t;
dcontext->sys_param3 = sys_param(dcontext, 5);
data_t *data_param = (data_t *)dcontext->sys_param3;
data_t data;
if (data_param == NULL) {
/* The kernel does not consider a NULL 6th+7th-args struct to be an error but
* just a NULL sigmask.
*/
dcontext->sys_param4 = (reg_t)NULL;
break;
}
/* Refer to comments in SYS_ppoll above. Taking extra steps here due to struct
* argument in pselect6.
*/
if (!d_r_safe_read(data_param, sizeof(data), &data)) {
LOG(THREAD, LOG_SYSCALLS, 2, "\treturning EFAULT to app for pselect6\n");
set_failure_return_val(dcontext, EFAULT);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
execute_syscall = false;
break;
}
dcontext->sys_param4 = (reg_t)data.sigmask;
if (data.sigmask == NULL)
break;
kernel_sigset_t *nullsigmaskptr = NULL;
if (!safe_write_ex((void *)&data_param->sigmask, sizeof(data_param->sigmask),
&nullsigmaskptr, NULL)) {
set_failure_return_val(dcontext, EFAULT);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
execute_syscall = false;
break;
}
bool sig_pending = false;
if (!handle_pre_extended_syscall_sigmasks(dcontext, data.sigmask, data.sizemask,
&sig_pending)) {
set_failure_return_val(dcontext, EINVAL);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
execute_syscall = false;
}
if (sig_pending) {
if (!safe_write_ex((void *)&data_param->sigmask, sizeof(data_param->sigmask),
&dcontext->sys_param4, NULL)) {
set_failure_return_val(dcontext, EFAULT);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
execute_syscall = false;
break;
}
set_failure_return_val(dcontext, EINTR);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
execute_syscall = false;
}
break;
}
case SYS_epoll_pwait: {
kernel_sigset_t *sigmask = (kernel_sigset_t *)sys_param(dcontext, 4);
dcontext->sys_param4 = (reg_t)sigmask;
if (sigmask == NULL)
break;
size_t sizemask = (size_t)sys_param(dcontext, 5);
/* Refer to comments in SYS_ppoll above. */
set_syscall_param(dcontext, 4, (reg_t)NULL);
bool sig_pending = false;
if (!handle_pre_extended_syscall_sigmasks(dcontext, sigmask, sizemask,
&sig_pending)) {
set_failure_return_val(dcontext, EINVAL);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
execute_syscall = false;
}
if (sig_pending) {
set_syscall_param(dcontext, 4, dcontext->sys_param4);
set_failure_return_val(dcontext, EINTR);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
execute_syscall = false;
}
break;
}
#endif
/****************************************************************************/
/* FILES */
/* prevent app from closing our files or opening a new file in our fd space.
* it's not worth monitoring all syscalls that take in fds from affecting ours.
*/
#ifdef MACOS
case SYS_close_nocancel:
#endif
#ifdef SYS_close_range
case SYS_close_range: {
/* client.file_io indeed tests this for all arch, but it hasn't yet been
* run on an AArchXX machine that has close_range available.
*/
IF_AARCHXX(ASSERT_NOT_TESTED());
uint first_fd = sys_param(dcontext, 0), last_fd = sys_param(dcontext, 1);
uint flags = sys_param(dcontext, 2);
bool is_cloexec = TEST(CLOSE_RANGE_CLOEXEC, flags);
if (is_cloexec) {
/* client.file_io has a test for CLOSE_RANGE_CLOEXEC, but it hasn't been
* verified on a system with kernel version >= 5.11 yet.
*/
ASSERT_NOT_TESTED();
}
/* We do not let the app execute their own close_range ever. Instead we
* make multiple close_range syscalls ourselves, one for each contiguous
* sub-range of non-DR-private fds in [first, last].
*/
execute_syscall = false;
if (first_fd > last_fd) {
set_failure_return_val(dcontext, EINVAL);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
break;
}
uint cur_range_first_fd, cur_range_last_fd;
bool cur_range_valid = false;
int ret = 0;
for (int i = first_fd; i <= last_fd; i++) {
/* Do not allow any changes to DR-owned FDs. */
if ((is_cloexec && fd_is_dr_owned(i)) ||
(!is_cloexec && !handle_close_range_pre(dcontext, i))) {
SYSLOG_INTERNAL_WARNING_ONCE("app trying to close private fd(s)");
if (cur_range_valid) {
cur_range_valid = false;
ret = dynamorio_syscall(SYS_close_range, 3, cur_range_first_fd,
cur_range_last_fd, flags);
if (ret != 0)
break;
}
} else {
# ifdef LINUX
if (!is_cloexec) {
signal_handle_close(dcontext, i);
}
# endif
if (cur_range_valid) {
ASSERT(cur_range_last_fd == i - 1);
cur_range_last_fd = i;
} else {
cur_range_first_fd = i;
cur_range_last_fd = i;
cur_range_valid = true;
}
}
}
if (cur_range_valid) {
ret = dynamorio_syscall(SYS_close_range, 3, cur_range_first_fd,
cur_range_last_fd, flags);
}
if (ret != 0) {
set_failure_return_val(dcontext, ret);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
} else {
set_success_return_val(dcontext, 0);
}
break;
}
#endif
case SYS_close: {
execute_syscall = handle_close_pre(dcontext);
#ifdef LINUX
if (execute_syscall)
signal_handle_close(dcontext, (file_t)sys_param(dcontext, 0));
#endif
break;
}
#if defined(SYS_dup2) || defined(SYS_dup3)
# ifdef SYS_dup3
case SYS_dup3:
# endif
# ifdef SYS_dup2
case SYS_dup2:
# endif
{
file_t newfd = (file_t)sys_param(dcontext, 1);
if (fd_is_dr_owned(newfd) || fd_is_in_private_range(newfd)) {
SYSLOG_INTERNAL_WARNING_ONCE("app trying to dup-close DR file(s)");
LOG(THREAD, LOG_TOP | LOG_SYSCALLS, 1,
"WARNING: app trying to dup2/dup3 to %d. Disallowing.\n", newfd);
if (DYNAMO_OPTION(fail_on_stolen_fds)) {
set_failure_return_val(dcontext, EBADF);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
} else
set_success_return_val(dcontext, 0);
execute_syscall = false;
}
break;
}
#endif
#ifdef MACOS
case SYS_fcntl_nocancel:
#endif
case SYS_fcntl: {
int cmd = (int)sys_param(dcontext, 1);
long arg = (long)sys_param(dcontext, 2);
/* we only check for asking for min in private space: not min below
* but actual will be above (see notes in os_file_init())
*/
if ((cmd == F_DUPFD || cmd == F_DUPFD_CLOEXEC) && fd_is_in_private_range(arg)) {
SYSLOG_INTERNAL_WARNING_ONCE("app trying to open private fd(s)");
LOG(THREAD, LOG_TOP | LOG_SYSCALLS, 1,
"WARNING: app trying to dup to >= %d. Disallowing.\n", arg);
set_failure_return_val(dcontext, EINVAL);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
execute_syscall = false;
} else {
dcontext->sys_param0 = sys_param(dcontext, 0);
dcontext->sys_param1 = cmd;
}
break;
}
#if defined(X64) || !defined(ARM) || defined(MACOS)
case SYS_getrlimit:
#endif
#if defined(LINUX) && !defined(X64)
case SYS_ugetrlimit:
#endif
/* save for post */
dcontext->sys_param0 = sys_param(dcontext, 0); /* resource */
dcontext->sys_param1 = sys_param(dcontext, 1); /* rlimit */
break;
case SYS_setrlimit: {
int resource = (int)sys_param(dcontext, 0);
if (resource == RLIMIT_NOFILE && DYNAMO_OPTION(steal_fds) > 0) {
#if !defined(ARM) && !defined(X64) && !defined(MACOS)
struct compat_rlimit rlim;
#else
struct rlimit rlim;
#endif
if (!d_r_safe_read((void *)sys_param(dcontext, 1), sizeof(rlim), &rlim)) {
LOG(THREAD, LOG_SYSCALLS, 2, "\treturning EFAULT to app for prlimit64\n");
set_failure_return_val(dcontext, EFAULT);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
} else if (rlim.rlim_cur > rlim.rlim_max) {
LOG(THREAD, LOG_SYSCALLS, 2, "\treturning EINVAL for prlimit64\n");
set_failure_return_val(dcontext, EINVAL);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
} else if (rlim.rlim_max <= min_dr_fd &&
/* Can't raise hard unless have CAP_SYS_RESOURCE capability.
* XXX i#2980: should query for that capability.
*/
rlim.rlim_max <= app_rlimit_nofile.rlim_max) {
/* if the new rlimit is lower, pretend succeed */
app_rlimit_nofile.rlim_cur = rlim.rlim_cur;
app_rlimit_nofile.rlim_max = rlim.rlim_max;
set_success_return_val(dcontext, 0);
} else {
LOG(THREAD, LOG_SYSCALLS, 2, "\treturning EPERM to app for setrlimit\n");
/* don't let app raise limits as that would mess up our fd space */
set_failure_return_val(dcontext, EPERM);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
}
execute_syscall = false;
}
break;
}
#ifdef LINUX
case SYS_prlimit64:
/* save for post */
dcontext->sys_param0 = sys_param(dcontext, 0); /* pid */
dcontext->sys_param1 = sys_param(dcontext, 1); /* resource */
dcontext->sys_param2 = sys_param(dcontext, 2); /* new rlimit */
dcontext->sys_param3 = sys_param(dcontext, 3); /* old rlimit */
if (/* XXX: how do we handle the case of setting rlimit.nofile on another
* process that is running with DynamoRIO?
*/
/* XXX: CLONE_FILES allows different processes to share the same file
* descriptor table, and different threads of the same process have
* separate file descriptor tables. POSIX specifies that rlimits are
* per-process, not per-thread, and Linux follows suit, so the threads
* with different descriptors will not matter, and the pids sharing
* descriptors turns into the hard-to-solve IPC problem.
*/
(dcontext->sys_param0 == 0 || dcontext->sys_param0 == get_process_id()) &&
dcontext->sys_param1 == RLIMIT_NOFILE &&
dcontext->sys_param2 != (reg_t)NULL && DYNAMO_OPTION(steal_fds) > 0) {
rlimit64_t rlim;
if (!d_r_safe_read((void *)(dcontext->sys_param2), sizeof(rlim), &rlim)) {
LOG(THREAD, LOG_SYSCALLS, 2, "\treturning EFAULT to app for prlimit64\n");
set_failure_return_val(dcontext, EFAULT);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
} else {
LOG(THREAD, LOG_SYSCALLS, 2,
"syscall: prlimit64 soft=" INT64_FORMAT_STRING
" hard=" INT64_FORMAT_STRING " vs DR %d\n",
rlim.rlim_cur, rlim.rlim_max, min_dr_fd);
if (rlim.rlim_cur > rlim.rlim_max) {
LOG(THREAD, LOG_SYSCALLS, 2, "\treturning EINVAL for prlimit64\n");
set_failure_return_val(dcontext, EINVAL);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
} else if (rlim.rlim_max <= min_dr_fd &&
/* Can't raise hard unless have CAP_SYS_RESOURCE capability.
* XXX i#2980: should query for that capability.
*/
rlim.rlim_max <= app_rlimit_nofile.rlim_max) {
/* if the new rlimit is lower, pretend succeed */
app_rlimit_nofile.rlim_cur = rlim.rlim_cur;
app_rlimit_nofile.rlim_max = rlim.rlim_max;
set_success_return_val(dcontext, 0);
/* set old rlimit if necessary */
if (dcontext->sys_param3 != (reg_t)NULL) {
safe_write_ex((void *)(dcontext->sys_param3), sizeof(rlim),
&app_rlimit_nofile, NULL);
}
} else {
/* don't let app raise limits as that would mess up our fd space */
LOG(THREAD, LOG_SYSCALLS, 2,
"\treturning EPERM to app for prlimit64\n");
set_failure_return_val(dcontext, EPERM);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
}
}
execute_syscall = false;
}
break;
#endif
#ifdef LINUX
# ifdef SYS_readlink
case SYS_readlink:
# endif
case SYS_readlinkat:
if (DYNAMO_OPTION(early_inject)) {
dcontext->sys_param0 = sys_param(dcontext, 0);
dcontext->sys_param1 = sys_param(dcontext, 1);
dcontext->sys_param2 = sys_param(dcontext, 2);
if (dcontext->sys_num == SYS_readlinkat)
dcontext->sys_param3 = sys_param(dcontext, 3);
}
break;
/* i#107 syscalls that might change/query app's segment */
# if defined(X86) && defined(X64)
case SYS_arch_prctl: {
/* we handle arch_prctl in post_syscall */
dcontext->sys_param0 = sys_param(dcontext, 0);
dcontext->sys_param1 = sys_param(dcontext, 1);
break;
}
# endif
# ifdef X86
case SYS_set_thread_area: {
our_modify_ldt_t desc;
if (INTERNAL_OPTION(mangle_app_seg) &&
d_r_safe_read((void *)sys_param(dcontext, 0), sizeof(desc), &desc)) {
if (os_set_app_thread_area(dcontext, &desc) &&
safe_write_ex((void *)sys_param(dcontext, 0), sizeof(desc), &desc,
NULL)) {
/* check if the range is unlimited */
ASSERT_CURIOSITY(desc.limit == 0xfffff);
execute_syscall = false;
set_success_return_val(dcontext, 0);
}
}
break;
}
case SYS_get_thread_area: {
our_modify_ldt_t desc;
if (INTERNAL_OPTION(mangle_app_seg) &&
d_r_safe_read((const void *)sys_param(dcontext, 0), sizeof(desc), &desc)) {
if (os_get_app_thread_area(dcontext, &desc) &&
safe_write_ex((void *)sys_param(dcontext, 0), sizeof(desc), &desc,
NULL)) {
execute_syscall = false;
set_success_return_val(dcontext, 0);
}
}
break;
}
# endif /* X86 */
# ifdef ARM
case SYS_set_tls: {
LOG(THREAD, LOG_VMAREAS | LOG_SYSCALLS, 2, "syscall: set_tls " PFX "\n",
sys_param(dcontext, 0));
if (os_set_app_tls_base(dcontext, TLS_REG_LIB, (void *)sys_param(dcontext, 0))) {
execute_syscall = false;
set_success_return_val(dcontext, 0);
} else {
ASSERT_NOT_REACHED();
}
break;
}
case SYS_cacheflush: {
/* We assume we don't want to change the executable_areas list or change
* the selfmod status of this region: else we should call something
* that invokes handle_modified_code() in a way that handles a bigger
* region than a single write.
*/
app_pc start = (app_pc)sys_param(dcontext, 0);
app_pc end = (app_pc)sys_param(dcontext, 1);
LOG(THREAD, LOG_VMAREAS | LOG_SYSCALLS, 2,
"syscall: cacheflush " PFX "-" PFX "\n", start, end);
flush_fragments_from_region(dcontext, start, end - start,
/* An unlink flush should be fine: the app must
* use synch to ensure other threads see the
* new code.
*/
false /*don't force synchall*/,
NULL /*flush_completion_callback*/,
NULL /*user_data*/);
break;
}
# endif /* ARM */
#elif defined(MACOS)
/* FIXME i#58: handle i386_{get,set}_ldt and thread_fast_set_cthread_self64 */
#endif
#ifdef DEBUG
# ifdef MACOS
case SYS_open_nocancel:
# endif
# ifdef SYS_open
case SYS_open: {
dcontext->sys_param0 = sys_param(dcontext, 0);
break;
}
# endif
#endif
#ifdef SYS_openat2
case SYS_openat2:
#endif
case SYS_openat: {
/* XXX: For completeness we might want to replace paths for SYS_open and
* possibly others, but SYS_openat is all we need on modern systems so we
* limit syscall overhead to this single point for now.
*/
dcontext->sys_param0 = 0;
dcontext->sys_param1 = sys_param(dcontext, 1);
const char *path = (const char *)dcontext->sys_param1;
if (!IS_STRING_OPTION_EMPTY(xarch_root) && !os_file_exists(path, false)) {
char *buf = heap_alloc(dcontext, MAXIMUM_PATH HEAPACCT(ACCT_OTHER));
string_option_read_lock();
snprintf(buf, MAXIMUM_PATH, "%s/%s", DYNAMO_OPTION(xarch_root), path);
buf[MAXIMUM_PATH - 1] = '\0';
string_option_read_unlock();
if (os_file_exists(buf, false)) {
LOG(THREAD, LOG_SYSCALLS, 2, "SYS_openat: replacing |%s| with |%s|\n",
path, buf);
set_syscall_param(dcontext, 1, (reg_t)buf);
/* Save for freeing in post. */
dcontext->sys_param0 = (reg_t)buf;
} else
heap_free(dcontext, buf, MAXIMUM_PATH HEAPACCT(ACCT_OTHER));
}
break;
}
#ifdef LINUX
case SYS_rseq:
LOG(THREAD, LOG_VMAREAS | LOG_SYSCALLS, 2, "syscall: rseq " PFX " %d %d %d\n",
sys_param(dcontext, 0), sys_param(dcontext, 1), sys_param(dcontext, 2),
sys_param(dcontext, 3));
if (DYNAMO_OPTION(disable_rseq)) {
set_failure_return_val(dcontext, ENOSYS);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
execute_syscall = false;
} else {
dcontext->sys_param0 = sys_param(dcontext, 0);
}
break;
#endif
default: {
#ifdef VMX86_SERVER
if (is_vmkuw_sysnum(dcontext->sys_num)) {
execute_syscall = vmkuw_pre_system_call(dcontext);
break;
}
#endif
break;
}
} /* end switch */
dcontext->whereami = old_whereami;
return execute_syscall;
}
void
all_memory_areas_lock(void)
{
IF_NO_MEMQUERY(memcache_lock());
}
void
all_memory_areas_unlock(void)
{
IF_NO_MEMQUERY(memcache_unlock());
}
void
update_all_memory_areas(app_pc start, app_pc end, uint prot, int type)
{
IF_NO_MEMQUERY(memcache_update(start, end, prot, type));
}
bool
remove_from_all_memory_areas(app_pc start, app_pc end)
{
IF_NO_MEMQUERY(return memcache_remove(start, end));
return true;
}
/* We consider a module load to happen at the first mmap, so we check on later
* overmaps to ensure things look consistent. */
static bool
mmap_check_for_module_overlap(app_pc base, size_t size, bool readable, uint64 inode,
bool at_map)
{
module_area_t *ma;
os_get_module_info_lock();
ma = module_pc_lookup(base);
if (ma != NULL) {
/* FIXME - how can we distinguish between the loader mapping the segments
* over the initial map from someone just mapping over part of a module? If
* is the latter case need to adjust the view size or remove from module list. */
LOG(GLOBAL, LOG_VMAREAS, 2,
"%s mmap overlapping module area : \n"
"\tmap : base=" PFX " base+size=" PFX " inode=" UINT64_FORMAT_STRING "\n"
"\tmod : start=" PFX " end=" PFX " inode=" UINT64_FORMAT_STRING "\n",
at_map ? "new" : "existing", base, base + size, inode, ma->start, ma->end,
ma->names.inode);
ASSERT_CURIOSITY(base >= ma->start);
if (at_map) {
ASSERT_CURIOSITY(base + size <= ma->end);
} else {
/* FIXME - I'm having problems with this check for existing maps. I
* haven't been able to get gdb to break in early enough to really get a good
* look at the early loader behavior. Two issues: One case is with our .so
* for which the anonymous .bss mapping is one page larger than expected
* (which might be some loader bug in the size calculation? or something? if
* so should see it trigger the at_map curiosity on some dll and can address
* then) and the other is that for a few executables the .bss mapping is much
* larger (~0x20000 larger) then expected when running under DR (but not
* running natively where it is instead the expected size). Both could just
* be the loader merging adjacent identically protected regions though I
* can't explain the discrepancy between DR and native given that our vmmheap
* is elsewhere in the address space (so who and how allocated that adjacent
* memory). I've yet to see any issue with dynamically loaded modules so
* it's probably the loader merging regions. Still worth investigating. */
ASSERT_CURIOSITY(inode == 0 /*see above comment*/ ||
module_contains_addr(ma, base + size - 1));
}
/* Handle cases like transparent huge pages where there are anon regions on top
* of the file mapping (i#2566).
*/
if (ma->names.inode == 0)
ma->names.inode = inode;
ASSERT_CURIOSITY(ma->names.inode == inode || inode == 0 /* for .bss */);
DOCHECK(1, {
if (readable && module_is_header(base, size)) {
/* Case 8879: For really small modules, to save disk space, the same
* disk page could hold both RO and .data, occupying just 1 page of
* disk space, e.g. /usr/lib/httpd/modules/mod_auth_anon.so. When
* such a module is mapped in, the os maps the same disk page twice,
* one readonly and one copy-on-write (see pg. 96, Sec 4.4 from
* Linkers and Loaders by John R. Levine). It also possible for
* such small modules to have multiple LOAD data segments. Since all
* these segments are mapped from a single disk page they will all have an
* elf_header satisfying the check above. So, if the new mmap overlaps an
* elf_area and it is also a header, then make sure the offsets (from the
* beginning of the backing file) of all the segments up to the currect
* one are within the page size. Note, if it is a header of a different
* module, then we'll not have an overlap, so we will not hit this case.
*/
bool cur_seg_found = false;
int seg_id = 0;
while (seg_id < ma->os_data.num_segments &&
ma->os_data.segments[seg_id].start <= base) {
cur_seg_found = ma->os_data.segments[seg_id].start == base;
ASSERT_CURIOSITY(
ma->os_data.segments[seg_id].offset <
PAGE_SIZE
/* On Mac we walk the dyld module list before the
* address space, so we often hit modules we already
* know about. */
IF_MACOS(|| !dynamo_initialized && ma->start == base));
++seg_id;
}
ASSERT_CURIOSITY(cur_seg_found);
}
});
}
os_get_module_info_unlock();
#ifdef ANDROID
/* i#1860: we need to keep looking for the segment with .dynamic as Android's
* loader does not map the whole file up front.
*/
if (ma != NULL && at_map && readable)
os_module_update_dynamic_info(base, size, at_map);
#endif
return ma != NULL;
}
static void
os_add_new_app_module(dcontext_t *dcontext, bool at_map, app_pc base, size_t size,
uint memprot)
{
memquery_iter_t iter;
bool found_map = false;
uint64 inode = 0;
const char *filename = "";
size_t mod_size = size;
if (!at_map) {
/* the size is the first seg size, get the whole module size instead */
app_pc first_seg_base = NULL;
app_pc first_seg_end = NULL;
app_pc last_seg_end = NULL;
if (module_walk_program_headers(base, size, at_map, false, &first_seg_base,
&first_seg_end, &last_seg_end, NULL, NULL)) {
ASSERT_CURIOSITY(size ==
(ALIGN_FORWARD(first_seg_end, PAGE_SIZE) -
(ptr_uint_t)first_seg_base) ||
base == vdso_page_start || base == vsyscall_page_start);
mod_size =
ALIGN_FORWARD(last_seg_end, PAGE_SIZE) - (ptr_uint_t)first_seg_base;
}
}
LOG(THREAD, LOG_SYSCALLS | LOG_VMAREAS, 2, "dlopen " PFX "-" PFX "%s\n", base,
base + mod_size, TEST(MEMPROT_EXEC, memprot) ? " +x" : "");
/* Mapping in a new module. From what we've observed of the loader's
* behavior, it first maps the file in with size equal to the final
* memory image size (I'm not sure how it gets that size without reading
* in the elf header and then walking through all the program headers to
* get the largest virtual offset). This is necessary to reserve all the
* space that will be needed. It then walks through the program headers
* mapping over the the previously mapped space with the appropriate
* permissions and offsets. Note that the .bss portion is mapped over
* as anonymous. It may also, depending on the program headers, make some
* areas read-only after fixing up their relocations etc. NOTE - at
* no point are the section headers guaranteed to be mapped in so we can't
* reliably walk sections (only segments) without looking to disk.
*/
/* FIXME - when should we add the module to our list? At the first map
* seems to be the best choice as we know the bounds and it's difficult to
* tell when the loader is finished. The downside is that at the initial map
* the memory layout isn't finalized (memory beyond the first segment will
* be shifted for page alignment reasons), so we have to be careful and
* make adjustments to read anything beyond the first segment until the
* loader finishes. This goes for the client too as it gets notified when we
* add to the list. FIXME we could try to track the expected segment overmaps
* and only notify the client after the last one (though that's still before
* linking and relocation, but that's true on Windows too). */
/* Get filename & inode for the list. */
memquery_iterator_start(&iter, base, true /* plan to alloc a module_area_t */);
while (memquery_iterator_next(&iter)) {
if (iter.vm_start == base) {
ASSERT_CURIOSITY(iter.inode != 0 || base == vdso_page_start ||
base == vsyscall_page_start);
ASSERT_CURIOSITY(iter.offset == 0); /* first map shouldn't have offset */
/* XREF 307599 on rounding module end to the next PAGE boundary */
ASSERT_CURIOSITY(
(iter.vm_end - iter.vm_start == ALIGN_FORWARD(size, PAGE_SIZE)));
inode = iter.inode;
filename = dr_strdup(iter.comment HEAPACCT(ACCT_OTHER));
found_map = true;
break;
}
}
memquery_iterator_stop(&iter);
#ifdef HAVE_MEMINFO
/* barring weird races we should find this map except */
ASSERT_CURIOSITY(found_map);
#else /* HAVE_MEMINFO */
/* Without /proc/maps or other memory querying interface available at
* library map time, there is no way to find out the name of the file
* that was mapped, thus its inode isn't available either.
*
* Just module_list_add with no filename will still result in
* library name being extracted from the .dynamic section and added
* to the module list. However, this name may not always exist, thus
* we might have a library with no file name available at all!
*
* Note: visor implements vsi mem maps that give file info, but, no
* path, should be ok. xref PR 401580.
*
* Once PR 235433 is implemented in visor then fix memquery_iterator*() to
* use vsi to find out page protection info, file name & inode.
*/
#endif /* HAVE_MEMINFO */
/* XREF 307599 on rounding module end to the next PAGE boundary */
if (found_map) {
module_list_add(base, ALIGN_FORWARD(mod_size, PAGE_SIZE), at_map, filename,
inode);
dr_strfree(filename HEAPACCT(ACCT_OTHER));
}
}
void
os_check_new_app_module(dcontext_t *dcontext, app_pc pc)
{
module_area_t *ma;
os_get_module_info_lock();
ma = module_pc_lookup(pc);
/* ma might be NULL due to dynamic generated code or custom loaded modules */
if (ma == NULL) {
dr_mem_info_t info;
/* i#1760: an app module loaded by custom loader (e.g., bionic libc)
* might not be detected by DynamoRIO in process_mmap.
*/
if (query_memory_ex_from_os(pc, &info) && info.type == DR_MEMTYPE_IMAGE) {
/* add the missing module */
os_get_module_info_unlock();
os_add_new_app_module(get_thread_private_dcontext(), false /*!at_map*/,
info.base_pc, info.size, info.prot);
os_get_module_info_lock();
}
}
os_get_module_info_unlock();
}
/* All processing for mmap and mmap2. */
static void
process_mmap(dcontext_t *dcontext, app_pc base, size_t size, uint prot,
uint flags _IF_DEBUG(const char *map_type))
{
bool image = false;
uint memprot = osprot_to_memprot(prot);
#ifdef ANDROID
/* i#1861: avoid merging file-backed w/ anon regions */
if (!TEST(MAP_ANONYMOUS, flags))
memprot |= MEMPROT_HAS_COMMENT;
#endif
LOG(THREAD, LOG_SYSCALLS, 4, "process_mmap(" PFX "," PFX ",0x%x,%s,%s)\n", base, size,
flags, memprot_string(memprot), map_type);
/* Notes on how ELF SOs are mapped in.
*
* o The initial mmap for an ELF file specifies enough space for
* all segments (and their constituent sections) in the file.
* The protection bits for that section are used for the entire
* region, and subsequent mmaps for subsequent segments within
* the region modify their portion's protection bits as needed.
* So if the prot bits for the first segment are +x, the entire
* region is +x. ** Note that our primary concern is adjusting
* exec areas to reflect the prot bits of subsequent
* segments. ** The region is added to the all-memory areas
* and also to exec areas (as determined by app_memory_allocation()).
*
* o Any subsequent segment sub-mappings specify their own protection
* bits and therefore are added to the exec areas via normal
* processing. They are also "naturally" added to the all-mems list.
* We do a little extra processing when mapping into a previously
* mapped region and the prot bits mismatch; if the new mapping is
* not +x, flushing needs to occur.
*/
/* process_mmap can be called with PROT_NONE, so we need to check if we
* can read the memory to see if it is a elf_header
*/
/* XXX: get inode for check */
if (TEST(MAP_ANONYMOUS, flags)) {
/* not an ELF mmap */
LOG(THREAD, LOG_SYSCALLS, 4, "mmap " PFX ": anon\n", base);
} else if (mmap_check_for_module_overlap(base, size, TEST(MEMPROT_READ, memprot), 0,
true)) {
/* FIXME - how can we distinguish between the loader mapping the segments
* over the initial map from someone just mapping over part of a module? If
* is the latter case need to adjust the view size or remove from module list. */
image = true;
DODEBUG({ map_type = "ELF SO"; });
LOG(THREAD, LOG_SYSCALLS, 4, "mmap " PFX ": overlaps image\n", base);
} else if (TEST(MEMPROT_READ, memprot) &&
/* i#727: We can still get SIGBUS on mmap'ed files that can't be
* read, so pass size=0 to use a safe_read.
*/
module_is_header(base, 0)) {
#ifdef ANDROID
/* The Android loader's initial all-segment-covering mmap is anonymous */
dr_mem_info_t info;
if (query_memory_ex_from_os((byte *)ALIGN_FORWARD(base + size, PAGE_SIZE),
&info) &&
info.prot == MEMPROT_NONE && info.type == DR_MEMTYPE_DATA) {
LOG(THREAD, LOG_SYSCALLS, 4, "mmap " PFX ": Android elf\n", base);
image = true;
DODEBUG({ map_type = "ELF SO"; });
os_add_new_app_module(dcontext, true /*at_map*/, base,
/* pass segment size, not whole module size */
size, memprot);
} else
#endif
if (module_is_partial_map(base, size, memprot)) {
/* i#1240: App might read first page of ELF header using mmap, which
* might accidentally be treated as a module load. Heuristically
* distinguish this by saying that if this is the first mmap for an ELF
* (i.e., it doesn't overlap with a previous map), and if it's small,
* then don't treat it as a module load.
*/
LOG(THREAD, LOG_SYSCALLS, 4, "mmap " PFX ": partial\n", base);
} else {
LOG(THREAD, LOG_SYSCALLS, 4, "mmap " PFX ": elf header\n", base);
image = true;
DODEBUG({ map_type = "ELF SO"; });
os_add_new_app_module(dcontext, true /*at_map*/, base, size, memprot);
}
}
LOG(THREAD, LOG_SYSCALLS, 4, "\t try app_mem_alloc\n");
IF_NO_MEMQUERY(memcache_handle_mmap(dcontext, base, size, memprot, image));
if (app_memory_allocation(dcontext, base, size, memprot, image _IF_DEBUG(map_type)))
STATS_INC(num_app_code_modules);
LOG(THREAD, LOG_SYSCALLS, 4, "\t app_mem_alloc -- DONE\n");
}
#ifdef LINUX
/* Call right after the system call.
* i#173: old_prot and old_type should be from before the system call
*/
static bool
handle_app_mremap(dcontext_t *dcontext, byte *base, size_t size, byte *old_base,
size_t old_size, uint old_prot, uint old_type)
{
if (!mmap_syscall_succeeded(base))
return false;
if (base != old_base || size < old_size) { /* take action only if
* there was a change */
DEBUG_DECLARE(bool ok;)
/* fragments were shifted...don't try to fix them, just flush */
app_memory_deallocation(dcontext, (app_pc)old_base, old_size,
false /* don't own thread_initexit_lock */,
false /* not image, FIXME: somewhat arbitrary */);
DOCHECK(1, {
/* we don't expect to see remappings of modules */
os_get_module_info_lock();
ASSERT_CURIOSITY(!module_overlaps(base, size));
os_get_module_info_unlock();
});
/* Verify that the current prot on the new region (according to
* the os) is the same as what the prot used to be for the old
* region.
*/
DOCHECK(1, {
uint memprot;
ok = get_memory_info_from_os(base, NULL, NULL, &memprot);
/* allow maps to have +x,
* +x may be caused by READ_IMPLIES_EXEC set in personality flag (i#262)
*/
ASSERT(ok &&
(memprot == old_prot || (memprot & (~MEMPROT_EXEC)) == old_prot));
});
app_memory_allocation(dcontext, base, size, old_prot,
old_type == DR_MEMTYPE_IMAGE _IF_DEBUG("mremap"));
IF_NO_MEMQUERY(memcache_handle_mremap(dcontext, base, size, old_base, old_size,
old_prot, old_type));
}
return true;
}
static void
handle_app_brk(dcontext_t *dcontext, byte *lowest_brk /*if known*/, byte *old_brk,
byte *new_brk)
{
/* i#851: the brk might not be page aligned */
old_brk = (app_pc)ALIGN_FORWARD(old_brk, PAGE_SIZE);
new_brk = (app_pc)ALIGN_FORWARD(new_brk, PAGE_SIZE);
if (new_brk < old_brk) {
/* Usually the heap is writable, so we don't really need to call
* this here: but seems safest to do so, esp if someone made part of
* the heap read-only and then put code there.
*/
app_memory_deallocation(dcontext, new_brk, old_brk - new_brk,
false /* don't own thread_initexit_lock */,
false /* not image */);
} else if (new_brk > old_brk) {
/* No need to call app_memory_allocation() as doesn't interact
* w/ security policies.
*/
}
IF_NO_MEMQUERY(memcache_handle_app_brk(lowest_brk, old_brk, new_brk));
}
#endif
/* This routine is *not* called is pre_system_call() returns false to skip
* the syscall.
*/
/* XXX: split out specific handlers into separate routines
*/
void
post_system_call(dcontext_t *dcontext)
{
priv_mcontext_t *mc = get_mcontext(dcontext);
/* registers have been clobbered, so sysnum is kept in dcontext */
int sysnum = dcontext->sys_num;
/* We expect most syscall failures to return < 0, so >= 0 is success.
* Some syscall return addresses that have the sign bit set and so
* appear to be failures but are not. They are handled on a
* case-by-case basis in the switch statement below.
*/
ptr_int_t result = (ptr_int_t)MCXT_SYSCALL_RES(mc); /* signed */
bool success = syscall_successful(mc, sysnum);
app_pc base;
size_t size;
uint prot;
dr_where_am_i_t old_whereami;
DEBUG_DECLARE(bool ok;)
RSTATS_INC(post_syscall);
old_whereami = dcontext->whereami;
dcontext->whereami = DR_WHERE_SYSCALL_HANDLER;
#if defined(LINUX) && defined(X86)
/* PR 313715: restore xbp since for some vsyscall sequences that use
* the syscall instruction its value is needed:
* 0xffffe400 <__kernel_vsyscall+0>: push %ebp
* 0xffffe401 <__kernel_vsyscall+1>: mov %ecx,%ebp
* 0xffffe403 <__kernel_vsyscall+3>: syscall
* 0xffffe405 <__kernel_vsyscall+5>: mov $0x2b,%ecx
* 0xffffe40a <__kernel_vsyscall+10>: movl %ecx,%ss
* 0xffffe40c <__kernel_vsyscall+12>: mov %ebp,%ecx
* 0xffffe40e <__kernel_vsyscall+14>: pop %ebp
* 0xffffe40f <__kernel_vsyscall+15>: ret
*/
if (should_syscall_method_be_sysenter() && !dcontext->sys_was_int) {
mc->xbp = dcontext->sys_xbp;
}
#endif
/* handle fork, try to do it early before too much logging occurs */
if (false
#ifdef SYS_fork
|| sysnum ==
SYS_fork
#endif
IF_LINUX(||
(sysnum == SYS_clone && !TEST(CLONE_VM, dcontext->sys_param0)) ||
(sysnum == SYS_clone3 &&
!TEST(CLONE_VM, get_stored_clone3_flags(dcontext))))) {
if (result == 0) {
/* we're the child */
thread_id_t child = get_sys_thread_id();
#ifdef DEBUG
thread_id_t parent = get_parent_id();
SYSLOG_INTERNAL_INFO("-- parent %d forked child %d --", parent, child);
#endif
/* first, fix TLS of dcontext */
ASSERT(parent != 0);
/* change parent pid to our pid */
replace_thread_id(dcontext->owning_thread, child);
dcontext->owning_thread = child;
dcontext->owning_process = get_process_id();
/* now let dynamo initialize new shared memory, logfiles, etc.
* need access to static vars in dynamo.c, that's why we don't do it. */
/* FIXME - xref PR 246902 - d_r_dispatch runs a lot of code before
* getting to post_system_call() is any of that going to be messed up
* by waiting till here to fixup the child logfolder/file and tid?
*/
dynamorio_fork_init(dcontext);
LOG(THREAD, LOG_SYSCALLS, 1,
"after fork-like syscall: parent is %d, child is %d\n", parent, child);
} else {
/* we're the parent */
os_fork_post(dcontext, true /*parent*/);
}
}
LOG(THREAD, LOG_SYSCALLS, 2, "post syscall: sysnum=" PFX ", result=" PFX " (%d)\n",
sysnum, MCXT_SYSCALL_RES(mc), (int)MCXT_SYSCALL_RES(mc));
switch (sysnum) {
/****************************************************************************/
/* MEMORY REGIONS */
#ifdef DEBUG
# ifdef MACOS
case SYS_open_nocancel:
# endif
# ifdef SYS_open
case SYS_open: {
if (success) {
/* useful for figuring out what module was loaded that then triggers
* module.c elf curiosities
*/
LOG(THREAD, LOG_SYSCALLS, 2, "SYS_open %s => %d\n", dcontext->sys_param0,
(int)result);
}
break;
}
# endif
#endif
#if defined(LINUX) && !defined(X64) && !defined(ARM)
case SYS_mmap:
#endif
case IF_MACOS_ELSE(SYS_mmap, IF_X64_ELSE(SYS_mmap, SYS_mmap2)): {
uint flags;
DEBUG_DECLARE(const char *map_type;)
RSTATS_INC(num_app_mmaps);
base = (app_pc)MCXT_SYSCALL_RES(mc); /* For mmap, it's NOT arg->addr! */
/* mmap isn't simply a user-space wrapper for mmap2. It's called
* directly when dynamically loading an SO, i.e., dlopen(). */
#ifdef LINUX /* MacOS success is in CF */
success = mmap_syscall_succeeded((app_pc)result);
/* The syscall either failed OR the retcode is less than the
* largest uint value of any errno and the addr returned is
* page-aligned.
*/
ASSERT_CURIOSITY(
!success ||
((app_pc)result < (app_pc)(ptr_int_t)-0x1000 && ALIGNED(base, PAGE_SIZE)));
#else
ASSERT_CURIOSITY(!success || ALIGNED(base, PAGE_SIZE));
#endif
if (!success)
goto exit_post_system_call;
#if defined(LINUX) && !defined(X64) && !defined(ARM)
if (sysnum == SYS_mmap) {
/* The syscall succeeded so the read of 'arg' should be
* safe. */
mmap_arg_struct_t *arg = (mmap_arg_struct_t *)dcontext->sys_param0;
size = (size_t)arg->len;
prot = (uint)arg->prot;
flags = (uint)arg->flags;
DEBUG_DECLARE(map_type = "mmap";)
} else {
#endif
size = (size_t)dcontext->sys_param1;
prot = (uint)dcontext->sys_param2;
flags = (uint)dcontext->sys_param3;
DEBUG_DECLARE(map_type = IF_X64_ELSE("mmap2", "mmap");)
#if defined(LINUX) && !defined(X64) && !defined(ARM)
}
#endif
process_mmap(dcontext, base, size, prot, flags _IF_DEBUG(map_type));
break;
}
case SYS_munmap: {
app_pc addr = (app_pc)dcontext->sys_param0;
size_t len = (size_t)dcontext->sys_param1;
/* We assumed in pre_system_call() that the unmap would succeed
* and flushed fragments and removed the region from exec areas.
* If the unmap failed, we re-add the region to exec areas.
* For zero-length unmaps we don't need to re-add anything,
* and we hit an assert in vmareas.c if we try (i#4031).
*
* The same logic can be used on Windows (but isn't yet).
*/
/* FIXME There are shortcomings to the approach. If another thread
* executes in the region after our pre_system_call processing
* but before the re-add below, it will get a security violation.
* That's less than ideal but at least isn't a security hole.
* The overall shortcoming is that we lose the state from our
* stateful security policies -- future exec list, tables used
* for RCT (.C/.E/.F) -- which can't be easily restored. Also,
* the re-add could add a region that wasn't on the exec list
* previously.
*
* See case 7559 for a better approach.
*/
if (!success && len != 0) {
dr_mem_info_t info;
/* must go to os to get real memory since we already removed */
DEBUG_DECLARE(ok =)
query_memory_ex_from_os(addr, &info);
ASSERT(ok);
app_memory_allocation(dcontext, addr, len, info.prot,
info.type ==
DR_MEMTYPE_IMAGE _IF_DEBUG("failed munmap"));
IF_NO_MEMQUERY(
memcache_update_locked((app_pc)ALIGN_BACKWARD(addr, PAGE_SIZE),
(app_pc)ALIGN_FORWARD(addr + len, PAGE_SIZE),
info.prot, info.type, false /*add back*/));
}
break;
}
#ifdef LINUX
case SYS_mremap: {
app_pc old_base = (app_pc)dcontext->sys_param0;
size_t old_size = (size_t)dcontext->sys_param1;
base = (app_pc)MCXT_SYSCALL_RES(mc);
size = (size_t)dcontext->sys_param2;
/* even if no shift, count as munmap plus mmap */
RSTATS_INC(num_app_munmaps);
RSTATS_INC(num_app_mmaps);
success =
handle_app_mremap(dcontext, base, size, old_base, old_size,
/* i#173: use memory prot and type
* obtained from pre_system_call
*/
(uint)dcontext->sys_param3, (uint)dcontext->sys_param4);
/* The syscall either failed OR the retcode is less than the
* largest uint value of any errno and the addr returned is
* is page-aligned.
*/
ASSERT_CURIOSITY(
!success ||
((app_pc)result < (app_pc)(ptr_int_t)-0x1000 && ALIGNED(base, PAGE_SIZE)));
if (!success)
goto exit_post_system_call;
break;
}
#endif
case SYS_mprotect: {
base = (app_pc)dcontext->sys_param0;
size = dcontext->sys_param1;
prot = dcontext->sys_param2;
#ifdef VMX86_SERVER
/* PR 475111: workaround for PR 107872 */
if (os_in_vmkernel_userworld() && result == -EBUSY && prot == PROT_NONE) {
result = mprotect_syscall(base, size, PROT_READ);
/* since non-Mac, we can use this even if the call failed */
set_success_return_val(dcontext, result);
success = (result >= 0);
LOG(THREAD, LOG_VMAREAS, 1,
"re-doing mprotect -EBUSY for " PFX "-" PFX " => %d\n", base, base + size,
(int)result);
SYSLOG_INTERNAL_WARNING_ONCE("re-doing mprotect for PR 475111, PR 107872");
}
#endif
/* FIXME i#143: we need to tweak the returned oldprot for
* writable areas we've made read-only
*/
if (!success) {
uint memprot = 0;
/* Revert the prot bits if needed. */
if (!get_memory_info_from_os(base, NULL, NULL, &memprot))
memprot = PROT_NONE;
LOG(THREAD, LOG_SYSCALLS, 3,
"syscall: mprotect failed: " PFX "-" PFX " prot->%d\n", base, base + size,
osprot_to_memprot(prot));
LOG(THREAD, LOG_SYSCALLS, 3, "\told prot->%d\n", memprot);
if (prot != memprot_to_osprot(memprot)) {
/* We're trying to reverse the prot change, assuming that
* this action doesn't have any unexpected side effects
* when doing so (such as not reversing some bit of internal
* state).
*/
uint new_memprot;
DEBUG_DECLARE(uint res =)
app_memory_protection_change(dcontext, base, size,
osprot_to_memprot(prot), &new_memprot, NULL,
false /*!image*/);
ASSERT_NOT_IMPLEMENTED(res != SUBSET_APP_MEM_PROT_CHANGE);
ASSERT(res == DO_APP_MEM_PROT_CHANGE ||
res == PRETEND_APP_MEM_PROT_CHANGE);
/* PR 410921 - Revert the changes to all-mems list.
* FIXME: This fix assumes the whole region had the prot &
* type, which is true in the cases we have seen so far, but
* theoretically may not be true. If it isn't true, multiple
* memory areas with different types/protections might have
* been changed in pre_system_call(), so will have to keep a
* list of all vmareas changed. This might be expensive for
* each mprotect syscall to guard against a rare theoretical bug.
*/
ASSERT_CURIOSITY(!dcontext->mprot_multi_areas);
IF_NO_MEMQUERY(memcache_update_locked(
base, base + size, memprot, -1 /*type unchanged*/, true /*exists*/));
}
}
break;
}
#ifdef ANDROID
case SYS_prctl: {
int code = (int)dcontext->sys_param0;
int subcode = (ulong)dcontext->sys_param1;
if (success && code == PR_SET_VMA && subcode == PR_SET_VMA_ANON_NAME) {
byte *addr = (byte *)dcontext->sys_param2;
size_t len = (size_t)dcontext->sys_param3;
IF_DEBUG(const char *comment = (const char *)dcontext->sys_param4;)
uint memprot = 0;
if (!get_memory_info_from_os(addr, NULL, NULL, &memprot))
memprot = MEMPROT_NONE;
/* We're post-syscall so from_os should match the prctl */
ASSERT((comment == NULL && !TEST(MEMPROT_HAS_COMMENT, memprot)) ||
(comment != NULL && TEST(MEMPROT_HAS_COMMENT, memprot)));
LOG(THREAD, LOG_SYSCALLS, 2,
"syscall: prctl PR_SET_VMA_ANON_NAME base=" PFX " size=" PFX
" comment=%s\n",
addr, len, comment == NULL ? "<null>" : comment);
IF_NO_MEMQUERY(memcache_update_locked(
addr, addr + len, memprot, -1 /*type unchanged*/, true /*exists*/));
}
break;
}
#endif
#ifdef LINUX
case SYS_brk: {
/* i#91/PR 396352: need to watch SYS_brk to maintain all_memory_areas.
* This code should work regardless of whether syscall failed
* (if it failed, the old break will be returned). We stored
* the old break in sys_param1 in pre-syscall.
*/
app_pc old_brk = (app_pc)dcontext->sys_param1;
app_pc new_brk = (app_pc)result;
DEBUG_DECLARE(app_pc req_brk = (app_pc)dcontext->sys_param0;);
ASSERT(!DYNAMO_OPTION(emulate_brk)); /* shouldn't get here */
# ifdef DEBUG
if (DYNAMO_OPTION(early_inject) &&
req_brk != NULL /* Ignore calls that don't increase brk. */) {
DO_ONCE({
ASSERT_CURIOSITY(new_brk > old_brk &&
"i#1004: first brk() "
"allocation failed with -early_inject");
});
}
# endif
handle_app_brk(dcontext, NULL, old_brk, new_brk);
break;
}
#endif
/****************************************************************************/
/* SPAWNING -- fork mostly handled above */
#ifdef LINUX
case SYS_clone3:
case SYS_clone: {
/* in /usr/src/linux/arch/i386/kernel/process.c */
LOG(THREAD, LOG_SYSCALLS, 2, "syscall: clone returned " PFX "\n",
MCXT_SYSCALL_RES(mc));
/* TODO i#5221: Handle clone3 returning errors other than ENOSYS. */
/* We switch the lib tls segment back to dr's privlib segment.
* Please refer to comment on os_switch_lib_tls.
* It is only called in parent thread.
* The child thread's tls setup is done in os_tls_app_seg_init.
*/
if (was_thread_create_syscall(dcontext)) {
if (INTERNAL_OPTION(private_loader))
os_switch_lib_tls(dcontext, false /*to dr*/);
/* i#2089: we already restored the DR tls in os_clone_post() */
if (sysnum == SYS_clone3) {
/* Free DR's copy of clone_args and restore the pointer to the
* app's copy in the SYSCALL_PARAM_CLONE3_CLONE_ARGS reg.
* sys_param1 contains the pointer to DR's clone_args, and
* sys_param0 contains the pointer to the app's original
* clone_args.
*/
# ifdef X86
ASSERT(sys_param(dcontext, SYSCALL_PARAM_CLONE3_CLONE_ARGS) ==
dcontext->sys_param1);
set_syscall_param(dcontext, SYSCALL_PARAM_CLONE3_CLONE_ARGS,
dcontext->sys_param0);
# else
/* On AArchXX r0 is used to pass the first arg to the syscall as well as
* to hold its return value. As the clone_args pointer isn't available
* post-syscall natively anyway, there's no need to restore here.
*/
# endif
uint app_clone_args_size = (uint)dcontext->sys_param2;
heap_free(dcontext, (clone3_syscall_args_t *)dcontext->sys_param1,
app_clone_args_size HEAPACCT(ACCT_OTHER));
} else if (sysnum == SYS_clone) {
set_syscall_param(dcontext, SYSCALL_PARAM_CLONE_STACK,
dcontext->sys_param1);
}
}
break;
}
#elif defined(MACOS) && !defined(X64)
case SYS_bsdthread_create: {
/* restore stack values we clobbered */
ASSERT(*sys_param_addr(dcontext, 0) == (reg_t)new_bsdthread_intercept);
*sys_param_addr(dcontext, 0) = dcontext->sys_param0;
*sys_param_addr(dcontext, 1) = dcontext->sys_param1;
break;
}
#endif
#ifdef SYS_fork
case SYS_fork: {
LOG(THREAD, LOG_SYSCALLS, 2, "syscall: fork returned " PFX "\n",
MCXT_SYSCALL_RES(mc));
break;
}
#endif
#ifdef SYS_vfork
case SYS_vfork: {
LOG(THREAD, LOG_SYSCALLS, 2, "syscall: vfork returned " PFX "\n",
MCXT_SYSCALL_RES(mc));
IF_LINUX(ASSERT(was_thread_create_syscall(dcontext)));
/* restore xsp in parent */
LOG(THREAD, LOG_SYSCALLS, 2, "vfork: restoring xsp from " PFX " to " PFX "\n",
mc->xsp, dcontext->sys_param1);
mc->xsp = dcontext->sys_param1;
if (MCXT_SYSCALL_RES(mc) != 0) {
/* We switch the lib tls segment back to dr's segment.
* Please refer to comment on os_switch_lib_tls.
* It is only called in parent thread.
* The child thread's tls setup is done in os_tls_app_seg_init.
*/
if (INTERNAL_OPTION(private_loader)) {
os_switch_lib_tls(dcontext, false /*to dr*/);
}
/* i#2089: we already restored the DR tls in os_clone_post() */
}
break;
}
#endif
case SYS_execve: {
/* if we get here it means execve failed (doesn't return on success) */
success = false;
mark_thread_execve(dcontext->thread_record, false);
ASSERT(result < 0);
LOG(THREAD, LOG_SYSCALLS, 2, "syscall: execve failed\n");
handle_execve_post(dcontext);
/* Don't 'break' as we have an ASSERT(success) just below
* the switch(). */
goto exit_post_system_call;
break; /* unnecessary but good form so keep it */
}
/****************************************************************************/
/* SIGNALS */
case IF_MACOS_ELSE(SYS_sigaction, SYS_rt_sigaction): { /* 174 */
/* in /usr/src/linux/kernel/signal.c:
asmlinkage long
sys_rt_sigaction(int sig, const struct sigaction *act,
struct sigaction *oact, size_t sigsetsize)
*/
/* FIXME i#148: Handle syscall failure. */
int sig = (int)dcontext->sys_param0;
const kernel_sigaction_t *act = (const kernel_sigaction_t *)dcontext->sys_param1;
prev_sigaction_t *oact = (prev_sigaction_t *)dcontext->sys_param2;
size_t sigsetsize = (size_t)dcontext->sys_param3;
uint res;
res = handle_post_sigaction(dcontext, success, sig, act, oact, sigsetsize);
LOG(THREAD, LOG_SYSCALLS, 2, "syscall: %ssigaction => %d\n",
IF_MACOS_ELSE("", "rt_"), -res);
if (res != 0)
set_failure_return_val(dcontext, res);
if (!success || res != 0)
goto exit_post_system_call;
break;
}
#if defined(LINUX) && !defined(X64)
case SYS_sigaction: { /* 67 */
int sig = (int)dcontext->sys_param0;
const old_sigaction_t *act = (const old_sigaction_t *)dcontext->sys_param1;
old_sigaction_t *oact = (old_sigaction_t *)dcontext->sys_param2;
uint res = handle_post_old_sigaction(dcontext, success, sig, act, oact);
LOG(THREAD, LOG_SYSCALLS, 2, "syscall: sigaction => %d\n", -res);
if (res != 0)
set_failure_return_val(dcontext, res);
if (!success || res != 0)
goto exit_post_system_call;
break;
}
#endif
case IF_MACOS_ELSE(SYS_sigprocmask, SYS_rt_sigprocmask): { /* 175 */
/* in /usr/src/linux/kernel/signal.c:
asmlinkage long
sys_rt_sigprocmask(int how, sigset_t *set, sigset_t *oset,
size_t sigsetsize)
*/
/* FIXME i#148: Handle syscall failure. */
handle_post_sigprocmask(
dcontext, (int)dcontext->sys_param0, (kernel_sigset_t *)dcontext->sys_param1,
(kernel_sigset_t *)dcontext->sys_param2, (size_t)dcontext->sys_param3);
break;
}
#if defined(LINUX) && !defined(X64)
case SYS_sigreturn: /* 119 */
#endif
case IF_MACOS_ELSE(SYS_sigreturn, SYS_rt_sigreturn): /* 173 */
/* there is no return value: it's just the value of eax, so avoid
* assert below
*/
success = true;
break;
case SYS_setitimer: /* 104 */
handle_post_setitimer(dcontext, success, (int)dcontext->sys_param0,
(const struct itimerval *)dcontext->sys_param1,
(struct itimerval *)dcontext->sys_param2);
break;
case SYS_getitimer: /* 105 */
handle_post_getitimer(dcontext, success, (int)dcontext->sys_param0,
(struct itimerval *)dcontext->sys_param1);
break;
#if defined(LINUX) && defined(X86)
case SYS_alarm: /* 27 on x86 and 37 on x64 */
handle_post_alarm(dcontext, success, (unsigned int)dcontext->sys_param0);
break;
#endif
#if defined(LINUX) && defined(X86) && defined(X64)
case SYS_arch_prctl: {
if (success && INTERNAL_OPTION(mangle_app_seg)) {
tls_handle_post_arch_prctl(dcontext, dcontext->sys_param0,
dcontext->sys_param1);
}
break;
}
#endif
#ifdef LINUX
# ifdef SYS_ppoll_time64
case SYS_ppoll_time64:
# endif
case SYS_ppoll: {
if (dcontext->sys_param3 == (reg_t)NULL)
break;
handle_post_extended_syscall_sigmasks(dcontext, success);
set_syscall_param(dcontext, 3, dcontext->sys_param3);
break;
}
# ifdef SYS_pselect6_time64
case SYS_pselect6_time64:
# endif
case SYS_pselect6: {
if (dcontext->sys_param4 == (reg_t)NULL)
break;
typedef struct {
kernel_sigset_t *sigmask;
size_t sizemask;
} data_t;
data_t *data_param = (data_t *)dcontext->sys_param3;
handle_post_extended_syscall_sigmasks(dcontext, success);
if (!safe_write_ex((void *)&data_param->sigmask, sizeof(data_param->sigmask),
&dcontext->sys_param4, NULL)) {
LOG(THREAD, LOG_SYSCALLS, 2, "\tEFAULT for pselect6 post syscall\n");
}
break;
}
case SYS_epoll_pwait: {
if (dcontext->sys_param4 == (reg_t)NULL)
break;
handle_post_extended_syscall_sigmasks(dcontext, success);
set_syscall_param(dcontext, 4, dcontext->sys_param4);
break;
}
#endif
/****************************************************************************/
/* FILES */
#ifdef SYS_dup2
case SYS_dup2: IF_LINUX(case SYS_dup3:) {
# ifdef LINUX
if (success) {
signal_handle_dup(dcontext, (file_t)sys_param(dcontext, 1),
(file_t)result);
}
# endif
break;
}
#endif
#ifdef MACOS
case SYS_fcntl_nocancel:
#endif
case SYS_fcntl: {
#ifdef LINUX /* Linux-only since only for signalfd */
if (success) {
file_t fd = (long)dcontext->sys_param0;
int cmd = (int)dcontext->sys_param1;
if ((cmd == F_DUPFD || cmd == F_DUPFD_CLOEXEC))
signal_handle_dup(dcontext, fd, (file_t)result);
}
break;
#endif
}
case IF_MACOS_ELSE(SYS_getrlimit, IF_X64_ELSE(SYS_getrlimit, SYS_ugetrlimit)): {
int resource = dcontext->sys_param0;
if (success && resource == RLIMIT_NOFILE) {
/* we stole some space: hide it from app */
struct rlimit *rlim = (struct rlimit *)dcontext->sys_param1;
safe_write_ex(&rlim->rlim_cur, sizeof(rlim->rlim_cur),
&app_rlimit_nofile.rlim_cur, NULL);
safe_write_ex(&rlim->rlim_max, sizeof(rlim->rlim_max),
&app_rlimit_nofile.rlim_max, NULL);
}
break;
}
#if !defined(ARM) && !defined(X64) && !defined(MACOS)
/* Old struct w/ smaller fields */
case SYS_getrlimit: {
int resource = dcontext->sys_param0;
if (success && resource == RLIMIT_NOFILE) {
struct compat_rlimit *rlim = (struct compat_rlimit *)dcontext->sys_param1;
safe_write_ex(&rlim->rlim_cur, sizeof(rlim->rlim_cur),
&app_rlimit_nofile.rlim_cur, NULL);
safe_write_ex(&rlim->rlim_max, sizeof(rlim->rlim_max),
&app_rlimit_nofile.rlim_max, NULL);
}
break;
}
#endif
#ifdef LINUX
case SYS_prlimit64: {
int resource = dcontext->sys_param1;
rlimit64_t *rlim = (rlimit64_t *)dcontext->sys_param3;
if (success && resource == RLIMIT_NOFILE && rlim != NULL &&
/* XXX: xref pid discussion in pre_system_call SYS_prlimit64 */
(dcontext->sys_param0 == 0 || dcontext->sys_param0 == get_process_id())) {
safe_write_ex(rlim, sizeof(*rlim), &app_rlimit_nofile, NULL);
}
break;
}
#endif
#ifdef LINUX
# ifdef SYS_readlink
case SYS_readlink:
# endif
case SYS_readlinkat:
if (success && DYNAMO_OPTION(early_inject)) {
bool is_at = (sysnum == SYS_readlinkat);
/* i#907: /proc/self/exe is a symlink to libdynamorio.so. We need
* to fix it up if the app queries. Any thread id can be passed to
* /proc/%d/exe, so we have to check. We could instead look for
* libdynamorio.so in the result but we've tweaked our injector
* in the past to exec different binaries so this seems more robust.
*/
if (symlink_is_self_exe((const char *)(is_at ? dcontext->sys_param1
: dcontext->sys_param0))) {
char *tgt = (char *)(is_at ? dcontext->sys_param2 : dcontext->sys_param1);
size_t tgt_sz =
(size_t)(is_at ? dcontext->sys_param3 : dcontext->sys_param2);
int len = snprintf(tgt, tgt_sz, "%s", get_application_name());
if (len > 0)
set_success_return_val(dcontext, len);
else {
set_failure_return_val(dcontext, EINVAL);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
}
}
}
break;
# ifdef SYS_openat2
case SYS_openat2:
# endif
case SYS_openat:
if (dcontext->sys_param0 != 0) {
heap_free(dcontext, (void *)dcontext->sys_param0,
MAXIMUM_PATH HEAPACCT(ACCT_OTHER));
}
break;
case SYS_rseq:
/* Lazy rseq handling. */
if (success) {
rseq_process_syscall(dcontext);
rseq_locate_rseq_regions();
}
break;
#endif
default:
#ifdef VMX86_SERVER
if (is_vmkuw_sysnum(sysnum)) {
vmkuw_post_system_call(dcontext);
break;
}
#endif
break;
} /* switch */
DODEBUG({
if (ignorable_system_call_normalized(sysnum)) {
STATS_INC(post_syscall_ignorable);
} else {
/* Many syscalls can fail though they aren't ignored. However, they
* shouldn't happen without us knowing about them. See PR 402769
* for SYS_close case.
*/
if (!(success || sysnum == SYS_close ||
IF_MACOS(sysnum == SYS_close_nocancel ||)
dcontext->expect_last_syscall_to_fail)) {
LOG(THREAD, LOG_SYSCALLS, 1,
"Unexpected failure of non-ignorable syscall %d\n", sysnum);
}
}
});
exit_post_system_call:
/* The instrument_post_syscall should be called after DR finishes all
* its operations, since DR needs to know the real syscall results,
* and any changes made by the client are simply to fool the app.
* Also, dr_syscall_invoke_another() needs to set eax, which shouldn't
* affect the result of the 1st syscall. Xref i#1.
*/
/* after restore of xbp so client sees it as though was sysenter */
instrument_post_syscall(dcontext, sysnum);
dcontext->whereami = old_whereami;
}
#ifdef LINUX
# ifdef STATIC_LIBRARY
/* Static libraries may optionally define two linker variables
* (dynamorio_so_start and dynamorio_so_end) to help mitigate
* edge cases in detecting DR's library bounds. They are optional.
*
* If not specified, the variables' location will default to
* weak_dynamorio_so_bounds_filler and they will not be used.
* Note that referencing the value of these symbols will crash:
* always use the address only.
*/
extern int dynamorio_so_start WEAK
__attribute__((alias("weak_dynamorio_so_bounds_filler")));
extern int dynamorio_so_end WEAK
__attribute__((alias("weak_dynamorio_so_bounds_filler")));
static int weak_dynamorio_so_bounds_filler;
# else /* !STATIC_LIBRARY */
/* For non-static linux we always get our bounds from linker-provided symbols.
* Note that referencing the value of these symbols will crash: always use the
* address only.
*/
extern int dynamorio_so_start, dynamorio_so_end;
# endif /* STATIC_LIBRARY */
#endif /* LINUX */
/* get_dynamo_library_bounds initializes dynamorio library bounds, using a
* release-time assert if there is a problem doing so. It does not use any
* heap, and we assume it is called prior to find_executable_vm_areas in a
* single thread.
*/
static void
get_dynamo_library_bounds(void)
{
/* Note that we're not counting DYNAMORIO_PRELOAD_NAME as a DR area, to match
* Windows, so we should unload it like we do there. The other reason not to
* count it is so is_in_dynamo_dll() can be the only exception to the
* never-execute-from-DR-areas list rule
*/
int res;
app_pc check_start, check_end;
char *libdir;
const char *dynamorio_libname = NULL;
bool do_memquery = true;
#ifdef STATIC_LIBRARY
# ifdef LINUX
/* For static+linux, we might have linker vars to help us and we definitely
* know our "library name" since we are in the app. When we have both we
* don't need to do a memquery.
*/
if (&dynamorio_so_start != &weak_dynamorio_so_bounds_filler &&
&dynamorio_so_end != &weak_dynamorio_so_bounds_filler) {
do_memquery = false;
dynamo_dll_start = (app_pc)&dynamorio_so_start;
dynamo_dll_end = (app_pc)ALIGN_FORWARD(&dynamorio_so_end, PAGE_SIZE);
LOG(GLOBAL, LOG_VMAREAS, 2,
"Using dynamorio_so_start and dynamorio_so_end for library bounds"
"\n");
const char *dr_path = get_application_name();
strncpy(dynamorio_library_filepath, dr_path,
BUFFER_SIZE_ELEMENTS(dynamorio_library_filepath));
NULL_TERMINATE_BUFFER(dynamorio_library_filepath);
const char *slash = strrchr(dr_path, '/');
ASSERT(slash != NULL);
/* Include the slash in the library path */
size_t copy_chars = 1 + slash - dr_path;
ASSERT(copy_chars < BUFFER_SIZE_ELEMENTS(dynamorio_library_path));
strncpy(dynamorio_library_path, dr_path, copy_chars);
dynamorio_library_path[copy_chars] = '\0';
}
# endif
if (do_memquery) {
/* No linker vars, so we need to find bound using an internal PC */
check_start = (app_pc)&get_dynamo_library_bounds;
}
#else /* !STATIC_LIBRARY */
# ifdef LINUX
/* PR 361594: we get our bounds from linker-provided symbols.
* Note that referencing the value of these symbols will crash:
* always use the address only.
*/
extern int dynamorio_so_start, dynamorio_so_end;
dynamo_dll_start = (app_pc)&dynamorio_so_start;
dynamo_dll_end = (app_pc)ALIGN_FORWARD(&dynamorio_so_end, PAGE_SIZE);
# elif defined(MACOS)
dynamo_dll_start = module_dynamorio_lib_base();
# endif
check_start = dynamo_dll_start;
#endif /* STATIC_LIBRARY */
if (do_memquery) {
static char dynamorio_libname_buf[MAXIMUM_PATH];
res = memquery_library_bounds(
NULL, &check_start, &check_end, dynamorio_library_path,
BUFFER_SIZE_ELEMENTS(dynamorio_library_path), dynamorio_libname_buf,
BUFFER_SIZE_ELEMENTS(dynamorio_libname_buf));
ASSERT(res > 0);
#ifndef STATIC_LIBRARY
dynamorio_libname = IF_UNIT_TEST_ELSE(UNIT_TEST_EXE_NAME, dynamorio_libname_buf);
#endif /* STATIC_LIBRARY */
snprintf(dynamorio_library_filepath,
BUFFER_SIZE_ELEMENTS(dynamorio_library_filepath), "%s%s",
dynamorio_library_path, dynamorio_libname);
NULL_TERMINATE_BUFFER(dynamorio_library_filepath);
#if !defined(STATIC_LIBRARY) && defined(LINUX)
ASSERT(check_start == dynamo_dll_start && check_end == dynamo_dll_end);
#elif defined(MACOS)
ASSERT(check_start == dynamo_dll_start);
dynamo_dll_end = check_end;
#else
dynamo_dll_start = check_start;
dynamo_dll_end = check_end;
#endif
}
LOG(GLOBAL, LOG_VMAREAS, 1, PRODUCT_NAME " library path: %s\n",
dynamorio_library_path);
LOG(GLOBAL, LOG_VMAREAS, 1, PRODUCT_NAME " library file path: %s\n",
dynamorio_library_filepath);
LOG(GLOBAL, LOG_VMAREAS, 1, "DR library bounds: " PFX " to " PFX "\n",
dynamo_dll_start, dynamo_dll_end);
/* Issue 20: we need the path to the alt arch */
strncpy(dynamorio_alt_arch_path, dynamorio_library_path,
BUFFER_SIZE_ELEMENTS(dynamorio_alt_arch_path));
/* Assumption: libdir name is not repeated elsewhere in path */
libdir = strstr(dynamorio_alt_arch_path, IF_X64_ELSE(DR_LIBDIR_X64, DR_LIBDIR_X86));
if (libdir != NULL) {
const char *newdir = IF_X64_ELSE(DR_LIBDIR_X86, DR_LIBDIR_X64);
/* do NOT place the NULL */
strncpy(libdir, newdir, strlen(newdir));
} else {
SYSLOG_INTERNAL_WARNING("unable to determine lib path for cross-arch execve");
}
NULL_TERMINATE_BUFFER(dynamorio_alt_arch_path);
LOG(GLOBAL, LOG_VMAREAS, 1, PRODUCT_NAME " alt arch path: %s\n",
dynamorio_alt_arch_path);
snprintf(dynamorio_alt_arch_filepath,
BUFFER_SIZE_ELEMENTS(dynamorio_alt_arch_filepath), "%s%s",
dynamorio_alt_arch_path, dynamorio_libname);
NULL_TERMINATE_BUFFER(dynamorio_alt_arch_filepath);
if (dynamo_dll_start == NULL || dynamo_dll_end == NULL) {
REPORT_FATAL_ERROR_AND_EXIT(FAILED_TO_FIND_DR_BOUNDS, 2, get_application_name(),
get_application_pid());
}
}
/* get full path to our own library, (cached), used for forking and message file name */
char *
get_dynamorio_library_path(void)
{
if (!dynamorio_library_filepath[0]) { /* not cached */
get_dynamo_library_bounds();
}
return dynamorio_library_filepath;
}
#ifdef LINUX
/* Get full path+name of executable file from /proc/self/exe. Returns an empty
* string on error.
* FIXME i#47: This will return DR's path when using early injection.
*/
static char *
read_proc_self_exe(bool ignore_cache)
{
static char exepath[MAXIMUM_PATH];
static bool tried = false;
# ifdef MACOS
ASSERT_NOT_IMPLEMENTED(false);
# endif
if (!tried || ignore_cache) {
tried = true;
/* assume we have /proc/self/exe symlink: could add HAVE_PROC_EXE
* but we have no alternative solution except assuming the first
* /proc/self/maps entry is the executable
*/
ssize_t res;
DEBUG_DECLARE(int len =)
snprintf(exepath, BUFFER_SIZE_ELEMENTS(exepath), "/proc/%d/exe",
get_process_id());
ASSERT(len > 0);
NULL_TERMINATE_BUFFER(exepath);
/* i#960: readlink does not null terminate, so we do it. */
# ifdef SYS_readlink
res = dynamorio_syscall(SYS_readlink, 3, exepath, exepath,
BUFFER_SIZE_ELEMENTS(exepath) - 1);
# else
res = dynamorio_syscall(SYS_readlinkat, 4, AT_FDCWD, exepath, exepath,
BUFFER_SIZE_ELEMENTS(exepath) - 1);
# endif
ASSERT(res < BUFFER_SIZE_ELEMENTS(exepath));
exepath[MAX(res, 0)] = '\0';
NULL_TERMINATE_BUFFER(exepath);
}
return exepath;
}
#endif /* LINUX */
app_pc
get_application_base(void)
{
if (executable_start == NULL) {
#if defined(STATIC_LIBRARY)
/* When compiled statically, the app and the DR's "library" are the same. */
executable_start = get_dynamorio_dll_start();
executable_end = get_dynamorio_dll_end();
#elif defined(HAVE_MEMINFO)
/* Haven't done find_executable_vm_areas() yet so walk maps ourselves */
const char *name = get_application_name();
if (name != NULL && name[0] != '\0') {
DEBUG_DECLARE(int count =)
memquery_library_bounds(name, &executable_start, &executable_end, NULL, 0,
NULL, 0);
ASSERT(count > 0 && executable_start != NULL);
}
#else
/* We have to fail. Should we dl_iterate this early? */
#endif
}
return executable_start;
}
app_pc
get_application_end(void)
{
if (executable_end == NULL)
get_application_base();
return executable_end;
}
app_pc
get_image_entry()
{
static app_pc image_entry_point = NULL;
if (image_entry_point == NULL && executable_start != NULL) {
module_area_t *ma;
os_get_module_info_lock();
ma = module_pc_lookup(executable_start);
ASSERT(ma != NULL);
if (ma != NULL) {
ASSERT(executable_start == ma->start);
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
image_entry_point = ma->entry_point;
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
}
os_get_module_info_unlock();
}
return image_entry_point;
}
#ifdef DEBUG
void
mem_stats_snapshot()
{
/* FIXME: NYI */
}
#endif
bool
is_in_dynamo_dll(app_pc pc)
{
ASSERT(dynamo_dll_start != NULL);
#ifdef VMX86_SERVER
/* We want to consider vmklib as part of the DR lib for allowing
* execution (_init calls os_in_vmkernel_classic()) and for
* reporting crashes as our fault
*/
if (vmk_in_vmklib(pc))
return true;
#endif
return (pc >= dynamo_dll_start && pc < dynamo_dll_end);
}
app_pc
get_dynamorio_dll_start()
{
if (dynamo_dll_start == NULL)
get_dynamo_library_bounds();
ASSERT(dynamo_dll_start != NULL);
return dynamo_dll_start;
}
app_pc
get_dynamorio_dll_end()
{
if (dynamo_dll_end == NULL)
get_dynamo_library_bounds();
ASSERT(dynamo_dll_end != NULL);
return dynamo_dll_end;
}
app_pc
get_dynamorio_dll_preferred_base()
{
/* on Linux there is no preferred base if we're PIC,
* therefore is always equal to dynamo_dll_start */
return get_dynamorio_dll_start();
}
static void
found_vsyscall_page(memquery_iter_t *iter _IF_DEBUG(OUT const char **map_type))
{
#ifndef X64
/* We assume no vsyscall page for x64; thus, checking the
* hardcoded address shouldn't have any false positives.
*/
ASSERT(iter->vm_end - iter->vm_start == PAGE_SIZE ||
/* i#1583: recent kernels have 2-page vdso */
iter->vm_end - iter->vm_start == 2 * PAGE_SIZE);
ASSERT(!dynamo_initialized); /* .data should be +w */
/* we're not considering as "image" even if part of ld.so (xref i#89) and
* thus we aren't adjusting our code origins policies to remove the
* vsyscall page exemption.
*/
DODEBUG({ *map_type = "VDSO"; });
/* On re-attach, the vdso can be split into two entries (from DR's hook),
* so take just the first one as the start (xref i#2157).
*/
if (vdso_page_start == NULL) {
vdso_page_start = iter->vm_start;
vdso_size = iter->vm_end - iter->vm_start;
}
/* The vsyscall page can be on the 2nd page inside the vdso, but until we
* see a syscall we don't know and we point it at the vdso start.
*/
if (vsyscall_page_start == NULL)
vsyscall_page_start = iter->vm_start;
LOG(GLOBAL, LOG_VMAREAS, 1, "found vdso/vsyscall pages @ " PFX " %s\n",
vsyscall_page_start, iter->comment);
#else
/* i#172
* fix bugs for OS where vdso page is set unreadable as below
* ffffffffff600000-ffffffffffe00000 ---p 00000000 00:00 0 [vdso]
* but it is readable indeed.
*/
/* i#430
* fix bugs for OS where vdso page is set unreadable as below
* ffffffffff600000-ffffffffffe00000 ---p 00000000 00:00 0 [vsyscall]
* but it is readable indeed.
*/
if (!TESTALL((PROT_READ | PROT_EXEC), iter->prot))
iter->prot |= (PROT_READ | PROT_EXEC);
/* i#1908: vdso and vsyscall pages are now split */
if (strncmp(iter->comment, VSYSCALL_PAGE_MAPS_NAME,
strlen(VSYSCALL_PAGE_MAPS_NAME)) == 0)
vdso_page_start = iter->vm_start;
else if (strncmp(iter->comment, VSYSCALL_REGION_MAPS_NAME,
strlen(VSYSCALL_REGION_MAPS_NAME)) == 0)
vsyscall_page_start = iter->vm_start;
#endif
}
#ifndef HAVE_MEMINFO_QUERY
static void
add_to_memcache(byte *region_start, byte *region_end, void *user_data)
{
memcache_update_locked(region_start, region_end, MEMPROT_NONE, DR_MEMTYPE_DATA,
false /*!exists*/);
}
#endif
int
os_walk_address_space(memquery_iter_t *iter, bool add_modules)
{
int count = 0;
#ifdef MACOS
app_pc shared_start, shared_end;
bool have_shared = module_dyld_shared_region(&shared_start, &shared_end);
#endif
#ifdef RETURN_AFTER_CALL
dcontext_t *dcontext = get_thread_private_dcontext();
os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field;
#endif
#ifndef HAVE_MEMINFO_QUERY
/* We avoid tracking the innards of vmheap for all_memory_areas by
* adding a single no-access region for the whole vmheap.
* Queries from heap routines use _from_os.
* Queries in check_thread_vm_area are fine getting "noaccess": wants
* any DR memory not on exec areas list to be noaccess.
* Queries from clients: should be ok to hide innards. Marking noaccess
* should be safer than marking free, as unruly client might try to mmap
* something in the free space: better to have it think it's reserved but
* not yet used memory. FIXME: we're not marking beyond-vmheap DR regions
* as noaccess!
*/
iterate_vmm_regions(add_to_memcache, NULL);
#endif
#ifndef HAVE_MEMINFO
count = find_vm_areas_via_probe();
#else
while (memquery_iterator_next(iter)) {
bool image = false;
size_t size = iter->vm_end - iter->vm_start;
/* i#479, hide private module and match Windows's behavior */
bool skip = dynamo_vm_area_overlap(iter->vm_start, iter->vm_end) &&
!is_in_dynamo_dll(iter->vm_start) /* our own text section is ok */
/* client lib text section is ok (xref i#487) */
&& !is_in_client_lib(iter->vm_start);
DEBUG_DECLARE(const char *map_type = "Private");
/* we can't really tell what's a stack and what's not, but we rely on
* our passing NULL preventing rwx regions from being added to executable
* or future list, even w/ -executable_if_alloc
*/
LOG(GLOBAL, LOG_VMAREAS, 2, "start=" PFX " end=" PFX " prot=%x comment=%s\n",
iter->vm_start, iter->vm_end, iter->prot, iter->comment);
/* Issue 89: the vdso might be loaded inside ld.so as below,
* which causes ASSERT_CURIOSITY in mmap_check_for_module_overlap fail.
* b7fa3000-b7fbd000 r-xp 00000000 08:01 108679 /lib/ld-2.8.90.so
* b7fbd000-b7fbe000 r-xp b7fbd000 00:00 0 [vdso]
* b7fbe000-b7fbf000 r--p 0001a000 08:01 108679 /lib/ld-2.8.90.so
* b7fbf000-b7fc0000 rw-p 0001b000 08:01 108679 /lib/ld-2.8.90.so
* So we always first check if it is a vdso page before calling
* mmap_check_for_module_overlap.
* Update: with i#160/PR 562667 handling non-contiguous modules like
* ld.so we now gracefully handle other objects like vdso in gaps in
* module, but it's simpler to leave this ordering here.
*/
if (skip) {
/* i#479, hide private module and match Windows's behavior */
LOG(GLOBAL, LOG_VMAREAS, 2, PFX "-" PFX " skipping: internal DR region\n",
iter->vm_start, iter->vm_end);
# ifdef MACOS
} else if (have_shared && iter->vm_start >= shared_start &&
iter->vm_start < shared_end) {
/* Skip modules we happen to find inside the dyld shared cache,
* as we'll fail to identify the library. We add them
* in module_walk_dyld_list instead.
*/
image = true;
# endif
} else if (strncmp(iter->comment, VSYSCALL_PAGE_MAPS_NAME,
strlen(VSYSCALL_PAGE_MAPS_NAME)) == 0 ||
IF_X64_ELSE(strncmp(iter->comment, VSYSCALL_REGION_MAPS_NAME,
strlen(VSYSCALL_REGION_MAPS_NAME)) == 0,
/* Older kernels do not label it as "[vdso]", but it is
* hardcoded there.
*/
/* 32-bit */
iter->vm_start == VSYSCALL_PAGE_START_HARDCODED)) {
if (add_modules) {
found_vsyscall_page(iter _IF_DEBUG(&map_type));
/* We'd like to add vsyscall to the module list too but when it's
* separate from vdso it has no ELF header which is too complex
* to force into the module list.
*/
if (module_is_header(iter->vm_start, iter->vm_end - iter->vm_start)) {
module_list_add(iter->vm_start, iter->vm_end - iter->vm_start, false,
iter->comment, iter->inode);
}
}
} else if (add_modules &&
mmap_check_for_module_overlap(iter->vm_start, size,
TEST(MEMPROT_READ, iter->prot),
iter->inode, false)) {
/* we already added the whole image region when we hit the first map for it */
image = true;
DODEBUG({ map_type = "ELF SO"; });
} else if (TEST(MEMPROT_READ, iter->prot) &&
module_is_header(iter->vm_start, size)) {
size_t image_size = size;
app_pc mod_base, mod_first_end, mod_max_end;
char *exec_match;
bool found_exec = false;
image = true;
DODEBUG({ map_type = "ELF SO"; });
LOG(GLOBAL, LOG_VMAREAS, 2,
"Found already mapped module first segment :\n"
"\t" PFX "-" PFX "%s inode=" UINT64_FORMAT_STRING " name=%s\n",
iter->vm_start, iter->vm_end, TEST(MEMPROT_EXEC, iter->prot) ? " +x" : "",
iter->inode, iter->comment);
# ifdef LINUX
/* Mapped images should have inodes, except for cases where an anon
* map is placed on top (i#2566)
*/
ASSERT_CURIOSITY(iter->inode != 0 || iter->comment[0] == '\0');
# endif
ASSERT_CURIOSITY(iter->offset == 0); /* first map shouldn't have offset */
/* Get size by walking the program headers. This includes .bss. */
if (module_walk_program_headers(iter->vm_start, size, false,
true, /* i#1589: ld.so relocated .dynamic */
&mod_base, &mod_first_end, &mod_max_end, NULL,
NULL)) {
image_size = mod_max_end - mod_base;
} else {
ASSERT_NOT_REACHED();
}
LOG(GLOBAL, LOG_VMAREAS, 2,
"Found already mapped module total module :\n"
"\t" PFX "-" PFX " inode=" UINT64_FORMAT_STRING " name=%s\n",
iter->vm_start, iter->vm_start + image_size, iter->inode, iter->comment);
if (add_modules) {
const char *modpath = iter->comment;
/* look for executable */
# ifdef LINUX
exec_match = get_application_name();
if (exec_match != NULL && exec_match[0] != '\0')
found_exec = (strcmp(iter->comment, exec_match) == 0);
/* Handle an anon region for the header (i#2566) */
if (!found_exec && executable_start != NULL &&
executable_start == iter->vm_start) {
found_exec = true;
/* The maps file's first entry may not have the path, in the
* presence of mremapping for hugepages (i#2566; i#3387) (this
* could happen for libraries too, but we don't have alternatives
* there). Or, it may have an incorrect path. Prefer the path
* we recorded in early injection or obtained from
* /proc/self/exe.
*/
modpath = get_application_name();
}
# else
/* We don't have a nice normalized name: it can have ./ or ../ inside
* it. But, we can distinguish an exe from a lib here, even for PIE,
* so we go with that plus a basename comparison.
*/
exec_match = (char *)get_application_short_name();
if (module_is_executable(iter->vm_start) && exec_match != NULL &&
exec_match[0] != '\0') {
const char *iter_basename = strrchr(iter->comment, '/');
if (iter_basename == NULL)
iter_basename = iter->comment;
else
iter_basename++;
found_exec = (strcmp(iter_basename, exec_match) == 0);
}
# endif
if (found_exec) {
if (executable_start == NULL)
executable_start = iter->vm_start;
else
ASSERT(iter->vm_start == executable_start);
LOG(GLOBAL, LOG_VMAREAS, 2,
"Found executable %s @" PFX "-" PFX " %s\n",
get_application_name(), iter->vm_start,
iter->vm_start + image_size, iter->comment);
}
/* We don't yet know whether contiguous so we have to settle for the
* first segment's size. We'll update it in module_list_add().
*/
module_list_add(iter->vm_start, mod_first_end - mod_base, false, modpath,
iter->inode);
# ifdef MACOS
/* look for dyld */
if (strcmp(iter->comment, "/usr/lib/dyld") == 0)
module_walk_dyld_list(iter->vm_start);
# endif
}
} else if (iter->inode != 0) {
DODEBUG({ map_type = "Mapped File"; });
}
/* add all regions (incl. dynamo_areas and stack) to all_memory_areas */
# ifndef HAVE_MEMINFO_QUERY
/* Don't add if we're using one single vmheap entry. */
if (!is_vmm_reserved_address(iter->vm_start, iter->vm_end - iter->vm_start, NULL,
NULL)) {
LOG(GLOBAL, LOG_VMAREAS, 4,
"os_walk_address_space: adding: " PFX "-" PFX " prot=%d\n",
iter->vm_start, iter->vm_end, iter->prot);
memcache_update_locked(iter->vm_start, iter->vm_end, iter->prot,
image ? DR_MEMTYPE_IMAGE : DR_MEMTYPE_DATA,
false /*!exists*/);
}
# endif
/* FIXME: best if we could pass every region to vmareas, but
* it has no way of determining if this is a stack b/c we don't have
* a dcontext at this point -- so we just don't pass the stack
*/
if (!skip /* i#479, hide private module and match Windows's behavior */ &&
add_modules &&
app_memory_allocation(NULL, iter->vm_start, (iter->vm_end - iter->vm_start),
iter->prot, image _IF_DEBUG(map_type))) {
count++;
}
}
#endif /* !HAVE_MEMINFO */
#ifndef HAVE_MEMINFO_QUERY
DOLOG(4, LOG_VMAREAS, memcache_print(GLOBAL, "init: all memory areas:\n"););
#endif
#ifdef RETURN_AFTER_CALL
/* Find the bottom of the stack of the initial (native) entry */
ostd->stack_bottom_pc = find_stack_bottom();
LOG(THREAD, LOG_ALL, 1, "Stack bottom pc = " PFX "\n", ostd->stack_bottom_pc);
#endif
/* now that we've walked memory print all modules */
LOG(GLOBAL, LOG_VMAREAS, 2, "Module list after memory walk\n");
DOLOG(1, LOG_VMAREAS, {
if (add_modules)
print_modules(GLOBAL, DUMP_NOT_XML);
});
return count;
}
/* assumed to be called after find_dynamo_library_vm_areas() */
int
find_executable_vm_areas(void)
{
int count;
memquery_iter_t iter;
memquery_iterator_start(&iter, NULL, true /*may alloc*/);
count = os_walk_address_space(&iter, true);
memquery_iterator_stop(&iter);
STATS_ADD(num_app_code_modules, count);
/* now that we have the modules set up, query libc */
get_libc_errno_location(true /*force init*/);
return count;
}
/* initializes dynamorio library bounds.
* does not use any heap.
* assumed to be called prior to find_executable_vm_areas.
*/
int
find_dynamo_library_vm_areas(void)
{
#ifndef STATIC_LIBRARY
/* We didn't add inside get_dynamo_library_bounds b/c it was called pre-alloc.
* We don't bother to break down the sub-regions.
* Assumption: we don't need to have the protection flags for DR sub-regions.
* For static library builds, DR's code is in the exe and isn't considered
* to be a DR area.
*/
add_dynamo_vm_area(get_dynamorio_dll_start(), get_dynamorio_dll_end(),
MEMPROT_READ | MEMPROT_WRITE | MEMPROT_EXEC,
true /* from image */ _IF_DEBUG(dynamorio_library_filepath));
#endif
#ifdef VMX86_SERVER
if (os_in_vmkernel_userworld())
vmk_add_vmklib_to_dynamo_areas();
#endif
return 1;
}
bool
get_stack_bounds(dcontext_t *dcontext, byte **base, byte **top)
{
os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field;
if (ostd->stack_base == NULL) {
/* initialize on-demand since don't have app esp handy in os_thread_init()
* FIXME: the comment here -- ignoring it for now, if hit cases confirming
* it the right thing will be to merge adjacent rwx regions and assume
* their union is the stack -- otherwise have to have special stack init
* routine called from x86.asm new_thread_dynamo_start and internal_dynamo_start,
* and the latter is not a do-once...
*/
size_t size = 0;
bool ok;
/* store stack info at thread startup, since stack can get fragmented in
* /proc/self/maps w/ later mprotects and it can be hard to piece together later
*/
if (IF_MEMQUERY_ELSE(false, DYNAMO_OPTION(use_all_memory_areas))) {
ok = get_memory_info((app_pc)get_mcontext(dcontext)->xsp, &ostd->stack_base,
&size, NULL);
} else {
ok = get_memory_info_from_os((app_pc)get_mcontext(dcontext)->xsp,
&ostd->stack_base, &size, NULL);
}
if (!ok) {
/* This can happen with dr_prepopulate_cache() before we start running
* the app.
*/
ASSERT(!dynamo_started);
return false;
}
ostd->stack_top = ostd->stack_base + size;
LOG(THREAD, LOG_THREADS, 1, "App stack is " PFX "-" PFX "\n", ostd->stack_base,
ostd->stack_top);
}
if (base != NULL)
*base = ostd->stack_base;
if (top != NULL)
*top = ostd->stack_top;
return true;
}
#ifdef RETURN_AFTER_CALL
initial_call_stack_status_t
at_initial_stack_bottom(dcontext_t *dcontext, app_pc target_pc)
{
/* We can't rely exclusively on finding the true stack bottom
* b/c we can't always walk the call stack (PR 608990) so we
* use the image entry as our primary trigger
*/
if (executable_start != NULL /*defensive*/ && reached_image_entry_yet()) {
return INITIAL_STACK_EMPTY;
} else {
/* If our stack walk ends early we could have false positives, but
* that's better than false negatives if we miss the image entry
* or we were unable to find the executable_start
*/
os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field;
if (target_pc == ostd->stack_bottom_pc) {
return INITIAL_STACK_BOTTOM_REACHED;
} else {
return INITIAL_STACK_BOTTOM_NOT_REACHED;
}
}
}
#endif /* RETURN_AFTER_CALL */
/* Uses our cached data structures (if in use, else raw query) to retrieve memory info */
bool
query_memory_ex(const byte *pc, OUT dr_mem_info_t *out_info)
{
#ifdef HAVE_MEMINFO_QUERY
return query_memory_ex_from_os(pc, out_info);
#else
return memcache_query_memory(pc, out_info);
#endif
}
bool
query_memory_cur_base(const byte *pc, OUT dr_mem_info_t *info)
{
return query_memory_ex(pc, info);
}
/* Use our cached data structures (if in use, else raw query) to retrieve memory info */
bool
get_memory_info(const byte *pc, byte **base_pc, size_t *size,
uint *prot /* OUT optional, returns MEMPROT_* value */)
{
dr_mem_info_t info;
if (is_vmm_reserved_address((byte *)pc, 1, NULL, NULL)) {
if (!query_memory_ex_from_os(pc, &info) || info.type == DR_MEMTYPE_FREE)
return false;
} else {
if (!query_memory_ex(pc, &info) || info.type == DR_MEMTYPE_FREE)
return false;
}
if (base_pc != NULL)
*base_pc = info.base_pc;
if (size != NULL)
*size = info.size;
if (prot != NULL)
*prot = info.prot;
return true;
}
/* We assume that this routine might be called instead of query_memory_ex()
* b/c the caller is in a fragile location and cannot acquire locks, so
* we try to do the same here.
*/
bool
query_memory_ex_from_os(const byte *pc, OUT dr_mem_info_t *info)
{
bool have_type = false;
bool res = memquery_from_os(pc, info, &have_type);
if (!res) {
/* No other failure types for now */
info->type = DR_MEMTYPE_ERROR;
} else if (res && !have_type) {
/* We pass 0 instead of info->size b/c even if marked as +r we can still
* get SIGBUS if beyond end of mmapped file: not uncommon if querying
* in middle of library load before .bss fully set up (PR 528744).
* However, if there is no fault handler, is_elf_so_header's safe_read will
* recurse to here, so in that case we use info->size but we assume
* it's only at init or exit and so not in the middle of a load
* and less likely to be querying a random mmapped file.
* The cleaner fix is to allow safe_read to work w/o a dcontext or
* fault handling: i#350/PR 529066.
*/
if (TEST(MEMPROT_READ, info->prot) &&
module_is_header(info->base_pc, fault_handling_initialized ? 0 : info->size))
info->type = DR_MEMTYPE_IMAGE;
else {
/* FIXME: won't quite match find_executable_vm_areas marking as
* image: can be doubly-mapped so; don't want to count vdso; etc.
*/
info->type = DR_MEMTYPE_DATA;
}
}
return res;
}
bool
get_memory_info_from_os(const byte *pc, byte **base_pc, size_t *size,
uint *prot /* OUT optional, returns MEMPROT_* value */)
{
dr_mem_info_t info;
if (!query_memory_ex_from_os(pc, &info) || info.type == DR_MEMTYPE_FREE)
return false;
if (base_pc != NULL)
*base_pc = info.base_pc;
if (size != NULL)
*size = info.size;
if (prot != NULL)
*prot = info.prot;
return true;
}
/* in utils.c, exported only for our hack! */
extern void
deadlock_avoidance_unlock(mutex_t *lock, bool ownable);
void
mutex_wait_contended_lock(mutex_t *lock, priv_mcontext_t *mc)
{
dcontext_t *dcontext = get_thread_private_dcontext();
bool set_client_safe_for_synch =
((dcontext != NULL) && IS_CLIENT_THREAD(dcontext) &&
((mutex_t *)dcontext->client_data->client_grab_mutex == lock));
if (mc != NULL) {
ASSERT(dcontext != NULL);
/* set_safe_for_sync can't be true at the same time as passing
* an mcontext to return into: nothing would be able to reset the
* client_thread_safe_for_sync flag.
*/
ASSERT(!set_client_safe_for_synch);
*get_mcontext(dcontext) = *mc;
}
/* i#96/PR 295561: use futex(2) if available */
if (ksynch_kernel_support()) {
/* Try to get the lock. If already held, it's fine to store any value
* > LOCK_SET_STATE (we don't rely on paired incs/decs) so that
* the next unlocker will call mutex_notify_released_lock().
*/
ptr_int_t res;
#ifndef LINUX /* we actually don't use this for Linux: see below */
KSYNCH_TYPE *event = mutex_get_contended_event(lock);
ASSERT(event != NULL && ksynch_var_initialized(event));
#endif
while (atomic_exchange_int(&lock->lock_requests, LOCK_CONTENDED_STATE) !=
LOCK_FREE_STATE) {
if (set_client_safe_for_synch)
dcontext->client_data->client_thread_safe_for_synch = true;
if (mc != NULL)
set_synch_state(dcontext, THREAD_SYNCH_VALID_MCONTEXT);
/* Unfortunately the synch semantics are different for Linux vs Mac.
* We have to use lock_requests as the futex to avoid waiting if
* lock_requests changes, while on Mac the underlying synch prevents
* a wait there.
*/
#ifdef LINUX
/* We'll abort the wait if lock_requests has changed at all.
* We can't have a series of changes that result in no apparent
* change w/o someone acquiring the lock, b/c
* mutex_notify_released_lock() sets lock_requests to LOCK_FREE_STATE.
*/
res = ksynch_wait(&lock->lock_requests, LOCK_CONTENDED_STATE, 0);
#else
res = ksynch_wait(event, 0, 0);
#endif
if (res != 0 && res != -EWOULDBLOCK)
os_thread_yield();
if (set_client_safe_for_synch)
dcontext->client_data->client_thread_safe_for_synch = false;
if (mc != NULL)
set_synch_state(dcontext, THREAD_SYNCH_NONE);
/* we don't care whether properly woken (res==0), var mismatch
* (res==-EWOULDBLOCK), or error: regardless, someone else
* could have acquired the lock, so we try again
*/
}
} else {
/* we now have to undo our earlier request */
atomic_dec_and_test(&lock->lock_requests);
while (!d_r_mutex_trylock(lock)) {
if (set_client_safe_for_synch)
dcontext->client_data->client_thread_safe_for_synch = true;
if (mc != NULL)
set_synch_state(dcontext, THREAD_SYNCH_VALID_MCONTEXT);
os_thread_yield();
if (set_client_safe_for_synch)
dcontext->client_data->client_thread_safe_for_synch = false;
if (mc != NULL)
set_synch_state(dcontext, THREAD_SYNCH_NONE);
}
#ifdef DEADLOCK_AVOIDANCE
/* HACK: trylock's success causes it to do DEADLOCK_AVOIDANCE_LOCK, so to
* avoid two in a row (causes assertion on owner) we unlock here
* In the future we will remove the trylock here and this will go away.
*/
deadlock_avoidance_unlock(lock, true);
#endif
}
return;
}
void
mutex_notify_released_lock(mutex_t *lock)
{
/* i#96/PR 295561: use futex(2) if available. */
if (ksynch_kernel_support()) {
/* Set to LOCK_FREE_STATE to avoid concurrent lock attempts from
* resulting in a futex_wait value match w/o anyone owning the lock
*/
lock->lock_requests = LOCK_FREE_STATE;
/* No reason to wake multiple threads: just one */
#ifdef LINUX
ksynch_wake(&lock->lock_requests);
#else
ksynch_wake(&lock->contended_event);
#endif
} /* else nothing to do */
}
/* read_write_lock_t implementation doesn't expect the contention path
helpers to guarantee the lock is held (unlike mutexes) so simple
yields are still acceptable.
*/
void
rwlock_wait_contended_writer(read_write_lock_t *rwlock)
{
os_thread_yield();
}
void
rwlock_notify_writer(read_write_lock_t *rwlock)
{
/* nothing to do here */
}
void
rwlock_wait_contended_reader(read_write_lock_t *rwlock)
{
os_thread_yield();
}
void
rwlock_notify_readers(read_write_lock_t *rwlock)
{
/* nothing to do here */
}
/***************************************************************************/
/* events are un-signaled when successfully waited upon. */
typedef struct linux_event_t {
/* Any function that sets this flag must also notify possibly waiting
* thread(s). See i#96/PR 295561.
*/
KSYNCH_TYPE signaled;
mutex_t lock;
bool broadcast;
} linux_event_t;
/* FIXME: this routine will need to have a macro wrapper to let us
* assign different ranks to all events for DEADLOCK_AVOIDANCE.
* Currently a single rank seems to work.
*/
event_t
create_event(void)
{
event_t e = (event_t)global_heap_alloc(sizeof(linux_event_t) HEAPACCT(ACCT_OTHER));
ksynch_init_var(&e->signaled);
ASSIGN_INIT_LOCK_FREE(e->lock, event_lock); /* FIXME: pass the event name here */
e->broadcast = false;
return e;
}
event_t
create_broadcast_event(void)
{
event_t e = create_event();
e->broadcast = true;
return e;
}
void
destroy_event(event_t e)
{
DELETE_LOCK(e->lock);
ksynch_free_var(&e->signaled);
global_heap_free(e, sizeof(linux_event_t) HEAPACCT(ACCT_OTHER));
}
void
signal_event(event_t e)
{
d_r_mutex_lock(&e->lock);
ksynch_set_value(&e->signaled, 1);
if (e->broadcast)
ksynch_wake_all(&e->signaled);
else
ksynch_wake(&e->signaled);
LOG(THREAD_GET, LOG_THREADS, 3, "thread " TIDFMT " signalling event " PFX "\n",
d_r_get_thread_id(), e);
d_r_mutex_unlock(&e->lock);
}
void
reset_event(event_t e)
{
d_r_mutex_lock(&e->lock);
ksynch_set_value(&e->signaled, 0);
LOG(THREAD_GET, LOG_THREADS, 3, "thread " TIDFMT " resetting event " PFX "\n",
d_r_get_thread_id(), e);
d_r_mutex_unlock(&e->lock);
}
bool
wait_for_event(event_t e, int timeout_ms)
{
#ifdef DEBUG
dcontext_t *dcontext = get_thread_private_dcontext();
#endif
uint64 start_time, cur_time;
if (timeout_ms > 0)
start_time = query_time_millis();
/* Use a user-space event on Linux, a kernel event on Windows. */
LOG(THREAD, LOG_THREADS, 3, "thread " TIDFMT " waiting for event " PFX "\n",
d_r_get_thread_id(), e);
do {
if (ksynch_get_value(&e->signaled) == 1) {
d_r_mutex_lock(&e->lock);
if (ksynch_get_value(&e->signaled) == 0) {
/* some other thread beat us to it */
LOG(THREAD, LOG_THREADS, 3,
"thread " TIDFMT " was beaten to event " PFX "\n",
d_r_get_thread_id(), e);
d_r_mutex_unlock(&e->lock);
} else {
if (!e->broadcast) {
/* reset the event */
ksynch_set_value(&e->signaled, 0);
}
d_r_mutex_unlock(&e->lock);
LOG(THREAD, LOG_THREADS, 3,
"thread " TIDFMT " finished waiting for event " PFX "\n",
d_r_get_thread_id(), e);
return true;
}
} else {
/* Waits only if the signaled flag is not set as 1. Return value
* doesn't matter because the flag will be re-checked.
*/
ksynch_wait(&e->signaled, 0, timeout_ms);
}
if (ksynch_get_value(&e->signaled) == 0) {
/* If it still has to wait, give up the cpu. */
os_thread_yield();
}
if (timeout_ms > 0)
cur_time = query_time_millis();
} while (timeout_ms <= 0 || cur_time - start_time < timeout_ms);
return false;
}
/***************************************************************************
* DIRECTORY ITERATOR
*/
/* These structs are written to the buf that we pass to getdents. We can
* iterate them by adding d_reclen to the current buffer offset and interpreting
* that as the next entry.
*/
struct linux_dirent {
#ifdef SYS_getdents
/* Adapted from struct old_linux_dirent in linux/fs/readdir.c: */
unsigned long d_ino;
unsigned long d_off;
unsigned short d_reclen;
char d_name[];
#else
/* Adapted from struct linux_dirent64 in linux/include/linux/dirent.h: */
uint64 d_ino;
int64 d_off;
unsigned short d_reclen;
unsigned char d_type;
char d_name[];
#endif
};
#define CURRENT_DIRENT(iter) ((struct linux_dirent *)(&iter->buf[iter->off]))
static void
os_dir_iterator_start(dir_iterator_t *iter, file_t fd)
{
iter->fd = fd;
iter->off = 0;
iter->end = 0;
}
static bool
os_dir_iterator_next(dir_iterator_t *iter)
{
#ifdef MACOS
/* We can use SYS_getdirentries, but do we even need a dir iterator?
* On Linux it's only used to enumerate /proc/pid/task.
*/
ASSERT_NOT_IMPLEMENTED(false);
return false;
#else
if (iter->off < iter->end) {
/* Have existing dents, get the next offset. */
iter->off += CURRENT_DIRENT(iter)->d_reclen;
ASSERT(iter->off <= iter->end);
}
if (iter->off == iter->end) {
/* Do a getdents syscall. Unlike when reading a file, the kernel will
* not read a partial linux_dirent struct, so we don't need to shift the
* left over bytes to the buffer start. See the getdents manpage for
* the example code that this is based on.
*/
iter->off = 0;
# ifdef SYS_getdents
iter->end =
dynamorio_syscall(SYS_getdents, 3, iter->fd, iter->buf, sizeof(iter->buf));
# else
iter->end =
dynamorio_syscall(SYS_getdents64, 3, iter->fd, iter->buf, sizeof(iter->buf));
# endif
ASSERT(iter->end <= sizeof(iter->buf));
if (iter->end <= 0) { /* No more dents, or error. */
iter->name = NULL;
if (iter->end < 0) {
LOG(GLOBAL, LOG_SYSCALLS, 1, "getdents syscall failed with errno %d\n",
-iter->end);
}
return false;
}
}
iter->name = CURRENT_DIRENT(iter)->d_name;
return true;
#endif
}
/***************************************************************************
* THREAD TAKEOVER
*/
/* Record used to synchronize thread takeover. */
typedef struct _takeover_record_t {
thread_id_t tid;
event_t event;
} takeover_record_t;
/* When attempting thread takeover, we store an array of thread id and event
* pairs here. Each thread we signal is supposed to enter DR control and signal
* this event after it has added itself to all_threads.
*
* XXX: What we really want is to be able to use SYS_rt_tgsigqueueinfo (Linux >=
* 2.6.31) to pass the event_t to each thread directly, rather than using this
* side data structure.
*/
static takeover_record_t *thread_takeover_records;
static uint num_thread_takeover_records;
/* This is the dcontext of the thread that initiated the takeover. We read the
* owning_thread and signal_field threads from it in the signaled threads to
* set up siginfo sharing.
*/
static dcontext_t *takeover_dcontext;
/* Lists active threads in the process.
* XXX: The /proc man page says /proc/pid/task is only available if the main
* thread is still alive, but experiments on 2.6.38 show otherwise.
*/
static thread_id_t *
os_list_threads(dcontext_t *dcontext, uint *num_threads_out)
{
dir_iterator_t iter;
file_t task_dir;
uint tids_alloced = 10;
uint num_threads = 0;
thread_id_t *new_tids;
thread_id_t *tids;
ASSERT(num_threads_out != NULL);
#ifdef MACOS
/* XXX i#58: NYI.
* We may want SYS_proc_info with PROC_INFO_PID_INFO and PROC_PIDLISTTHREADS,
* or is that just BSD threads and instead we want process_set_tasks()
* and task_info() as in 7.3.1.3 in Singh's OSX book?
*/
*num_threads_out = 0;
return NULL;
#endif
tids =
HEAP_ARRAY_ALLOC(dcontext, thread_id_t, tids_alloced, ACCT_THREAD_MGT, PROTECTED);
task_dir = os_open_directory("/proc/self/task", OS_OPEN_READ);
ASSERT(task_dir != INVALID_FILE);
os_dir_iterator_start(&iter, task_dir);
while (os_dir_iterator_next(&iter)) {
thread_id_t tid;
DEBUG_DECLARE(int r;)
if (strcmp(iter.name, ".") == 0 || strcmp(iter.name, "..") == 0)
continue;
IF_DEBUG(r =)
sscanf(iter.name, "%u", &tid);
ASSERT_MESSAGE(CHKLVL_ASSERTS, "failed to parse /proc/pid/task entry", r == 1);
if (tid <= 0)
continue;
if (num_threads == tids_alloced) {
/* realloc, essentially. Less expensive than counting first. */
new_tids = HEAP_ARRAY_ALLOC(dcontext, thread_id_t, tids_alloced * 2,
ACCT_THREAD_MGT, PROTECTED);
memcpy(new_tids, tids, sizeof(thread_id_t) * tids_alloced);
HEAP_ARRAY_FREE(dcontext, tids, thread_id_t, tids_alloced, ACCT_THREAD_MGT,
PROTECTED);
tids = new_tids;
tids_alloced *= 2;
}
tids[num_threads++] = tid;
}
ASSERT(iter.end == 0); /* No reading errors. */
os_close(task_dir);
/* realloc back down to num_threads for caller simplicity. */
new_tids =
HEAP_ARRAY_ALLOC(dcontext, thread_id_t, num_threads, ACCT_THREAD_MGT, PROTECTED);
memcpy(new_tids, tids, sizeof(thread_id_t) * num_threads);
HEAP_ARRAY_FREE(dcontext, tids, thread_id_t, tids_alloced, ACCT_THREAD_MGT,
PROTECTED);
tids = new_tids;
*num_threads_out = num_threads;
return tids;
}
/* List the /proc/self/task directory and add all unknown thread ids to the
* all_threads hashtable in dynamo.c. Returns true if we found any unknown
* threads and false otherwise. We assume that since we don't know about them
* they are not under DR and have no dcontexts.
*/
bool
os_take_over_all_unknown_threads(dcontext_t *dcontext)
{
uint i;
uint num_threads;
thread_id_t *tids;
uint threads_to_signal = 0, threads_timed_out = 0;
/* We do not want to re-takeover a thread that's in between notifying us on
* the last call to this routine and getting onto the all_threads list as
* we'll self-interpret our own code leading to a lot of problems.
* XXX: should we use an event to avoid this inefficient loop? We expect
* this to only happen in rare cases during attach when threads are in flux.
*/
while (uninit_thread_count > 0) /* relying on volatile */
os_thread_yield();
/* This can only happen if we had already taken over a thread, because there is
* full synchronization at detach. The same thread may now already be on its way
* to exit, and its thread record might be gone already and make it look like a
* new native thread below. If we rely on the thread to self-detect that it was
* interrupted at a DR address we may run into a deadlock (i#2694). In order to
* avoid this, we wait here. This is expected to be uncommon, and can only happen
* with very short-lived threads.
* XXX: if this loop turns out to be too inefficient, we could support detecting
* the lock function's address bounds along w/ is_dynamo_address.
*/
while (exiting_thread_count > 0)
os_thread_yield();
d_r_mutex_lock(&thread_initexit_lock);
CLIENT_ASSERT(thread_takeover_records == NULL,
"Only one thread should attempt app take over!");
#ifdef LINUX
/* Check this thread for rseq in between setup and start. */
if (rseq_is_registered_for_current_thread())
rseq_locate_rseq_regions();
#endif
/* Find tids for which we have no thread record, meaning they are not under
* our control. Shift them to the beginning of the tids array.
*/
tids = os_list_threads(dcontext, &num_threads);
if (tids == NULL) {
d_r_mutex_unlock(&thread_initexit_lock);
return false; /* have to assume no unknown */
}
for (i = 0; i < num_threads; i++) {
thread_record_t *tr = thread_lookup(tids[i]);
if (tr == NULL ||
/* Re-takeover known threads that are currently native as well.
* XXX i#95: we need a synchall-style loop for known threads as
* they can be in DR for syscall hook handling.
* Update: we now remove the hook for start/stop: but native_exec
* or other individual threads going native could still hit this.
*/
(is_thread_currently_native(tr) && !IS_CLIENT_THREAD(tr->dcontext)))
tids[threads_to_signal++] = tids[i];
}
LOG(GLOBAL, LOG_THREADS, 1, "TAKEOVER: %d threads to take over\n", threads_to_signal);
if (threads_to_signal > 0) {
takeover_record_t *records;
/* Assuming pthreads, prepare signal_field for sharing. */
handle_clone(dcontext, PTHREAD_CLONE_FLAGS);
/* Create records with events for all the threads we want to signal. */
LOG(GLOBAL, LOG_THREADS, 1, "TAKEOVER: publishing takeover records\n");
records = HEAP_ARRAY_ALLOC(dcontext, takeover_record_t, threads_to_signal,
ACCT_THREAD_MGT, PROTECTED);
for (i = 0; i < threads_to_signal; i++) {
LOG(GLOBAL, LOG_THREADS, 1, "TAKEOVER: will signal thread " TIDFMT "\n",
tids[i]);
records[i].tid = tids[i];
records[i].event = create_event();
}
/* Publish the records and the initial take over dcontext. */
thread_takeover_records = records;
num_thread_takeover_records = threads_to_signal;
takeover_dcontext = dcontext;
/* Signal the other threads. */
for (i = 0; i < threads_to_signal; i++) {
thread_signal(get_process_id(), records[i].tid, SUSPEND_SIGNAL);
}
d_r_mutex_unlock(&thread_initexit_lock);
/* Wait for all the threads we signaled. */
ASSERT_OWN_NO_LOCKS();
for (i = 0; i < threads_to_signal; i++) {
static const int progress_period = 50;
if (i % progress_period == 0) {
char buf[16];
/* +1 to include the attach request thread to match the final msg. */
snprintf(buf, BUFFER_SIZE_ELEMENTS(buf), "%d/%d", i + 1,
threads_to_signal + 1);
NULL_TERMINATE_BUFFER(buf);
SYSLOG(SYSLOG_VERBOSE, INFO_ATTACHED, 3, buf, get_application_name(),
get_application_pid());
}
/* We split the wait up so that we'll break early on an exited thread. */
static const int wait_ms = 25;
int max_attempts =
/* Integer division rounding down is fine since we always wait 25ms. */
DYNAMO_OPTION(takeover_timeout_ms) / wait_ms;
int attempts = 0;
while (!wait_for_event(records[i].event, wait_ms)) {
/* The thread may have exited (i#2601). We assume no tid re-use. */
char task[64];
snprintf(task, BUFFER_SIZE_ELEMENTS(task), "/proc/self/task/%d", tids[i]);
NULL_TERMINATE_BUFFER(task);
if (!os_file_exists(task, false /*!is dir*/)) {
SYSLOG_INTERNAL_WARNING_ONCE("thread exited while attaching");
break;
}
if (++attempts > max_attempts) {
if (DYNAMO_OPTION(unsafe_ignore_takeover_timeout)) {
SYSLOG(
SYSLOG_VERBOSE, THREAD_TAKEOVER_TIMED_OUT, 3,
get_application_name(), get_application_pid(),
"Continuing since -unsafe_ignore_takeover_timeout is set.");
++threads_timed_out;
} else {
SYSLOG(
SYSLOG_VERBOSE, THREAD_TAKEOVER_TIMED_OUT, 3,
get_application_name(), get_application_pid(),
"Aborting. Use -unsafe_ignore_takeover_timeout to ignore.");
REPORT_FATAL_ERROR_AND_EXIT(FAILED_TO_TAKE_OVER_THREADS, 2,
get_application_name(),
get_application_pid());
}
break;
}
/* Else try again. */
}
}
/* Now that we've taken over the other threads, we can safely free the
* records and reset the shared globals.
*/
d_r_mutex_lock(&thread_initexit_lock);
LOG(GLOBAL, LOG_THREADS, 1,
"TAKEOVER: takeover complete, unpublishing records\n");
thread_takeover_records = NULL;
num_thread_takeover_records = 0;
takeover_dcontext = NULL;
for (i = 0; i < threads_to_signal; i++) {
destroy_event(records[i].event);
}
HEAP_ARRAY_FREE(dcontext, records, takeover_record_t, threads_to_signal,
ACCT_THREAD_MGT, PROTECTED);
}
d_r_mutex_unlock(&thread_initexit_lock);
HEAP_ARRAY_FREE(dcontext, tids, thread_id_t, num_threads, ACCT_THREAD_MGT, PROTECTED);
ASSERT(threads_to_signal >= threads_timed_out);
return (threads_to_signal - threads_timed_out) > 0;
}
bool
os_thread_re_take_over(void)
{
#ifdef X86
/* i#2089: is_thread_initialized() will fail for a currently-native app.
* We bypass the magic field checks here of is_thread_tls_initialized().
* XXX: should this be inside is_thread_initialized()? But that may mislead
* other callers: the caller has to restore the TLs. Some old code also
* used get_thread_private_dcontext() being NULL to indicate an unknown thread:
* that should also call here.
*/
if (!is_thread_initialized() && is_thread_tls_allocated()) {
/* It's safe to call thread_lookup() for ourself. */
thread_record_t *tr = thread_lookup(get_sys_thread_id());
if (tr != NULL) {
ASSERT(is_thread_currently_native(tr));
LOG(GLOBAL, LOG_THREADS, 1, "\tretakeover for cur-native thread " TIDFMT "\n",
get_sys_thread_id());
LOG(tr->dcontext->logfile, LOG_THREADS, 1,
"\nretakeover for cur-native thread " TIDFMT "\n", get_sys_thread_id());
os_swap_dr_tls(tr->dcontext, false /*to dr*/);
ASSERT(is_thread_initialized());
return true;
}
}
#endif
return false;
}
static void
os_thread_signal_taken_over(void)
{
thread_id_t mytid;
event_t event = NULL;
uint i;
/* Wake up the thread that initiated the take over. */
mytid = d_r_get_thread_id();
ASSERT(thread_takeover_records != NULL);
for (i = 0; i < num_thread_takeover_records; i++) {
if (thread_takeover_records[i].tid == mytid) {
event = thread_takeover_records[i].event;
break;
}
}
ASSERT_MESSAGE(CHKLVL_ASSERTS, "mytid not present in takeover records!",
event != NULL);
signal_event(event);
}
/* Takes over the current thread from the signal handler. We notify the thread
* that signaled us by signalling our event in thread_takeover_records.
* If it returns, it returns false, and the thread should be let go.
*/
bool
os_thread_take_over(priv_mcontext_t *mc, kernel_sigset_t *sigset)
{
dcontext_t *dcontext;
priv_mcontext_t *dc_mc;
LOG(GLOBAL, LOG_THREADS, 1, "TAKEOVER: received signal in thread " TIDFMT "\n",
get_sys_thread_id());
/* Do standard DR thread initialization. Mirrors code in
* create_clone_record and new_thread_setup, except we're not putting a
* clone record on the dstack.
*/
os_thread_re_take_over();
if (!is_thread_initialized()) {
/* If this is a thread on its way to init, don't self-interp (i#2688). */
if (is_dynamo_address(mc->pc)) {
os_thread_signal_taken_over();
return false;
}
dcontext = init_thread_with_shared_siginfo(mc, takeover_dcontext);
ASSERT(dcontext != NULL);
} else {
/* Re-takeover a thread that we let go native */
dcontext = get_thread_private_dcontext();
ASSERT(dcontext != NULL);
}
signal_set_mask(dcontext, sigset);
signal_swap_mask(dcontext, true /*to app*/);
dynamo_thread_under_dynamo(dcontext);
dc_mc = get_mcontext(dcontext);
*dc_mc = *mc;
dcontext->whereami = DR_WHERE_APP;
dcontext->next_tag = mc->pc;
os_thread_signal_taken_over();
DOLOG(2, LOG_TOP, {
byte *cur_esp;
GET_STACK_PTR(cur_esp);
LOG(THREAD, LOG_TOP, 2,
"%s: next_tag=" PFX ", cur xsp=" PFX ", mc->xsp=" PFX "\n", __FUNCTION__,
dcontext->next_tag, cur_esp, mc->xsp);
});
#ifdef LINUX
/* See whether we should initiate lazy rseq handling, and avoid treating
* regions as rseq when the rseq syscall is never set up.
*/
if (rseq_is_registered_for_current_thread()) {
rseq_locate_rseq_regions();
rseq_thread_attach(dcontext);
}
#endif
/* Start interpreting from the signal context. */
call_switch_stack(dcontext, dcontext->dstack, (void (*)(void *))d_r_dispatch,
NULL /*not on d_r_initstack*/, false /*shouldn't return*/);
ASSERT_NOT_REACHED();
return true; /* make compiler happy */
}
bool
os_thread_take_over_suspended_native(dcontext_t *dcontext)
{
os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field;
if (!is_thread_currently_native(dcontext->thread_record) ||
ksynch_get_value(&ostd->suspended) < 0)
return false;
/* Thread is sitting in suspend signal loop so we just set a flag
* for when it resumes:
*/
/* XXX: there's no event for a client to trigger this on so not yet
* tested. i#721 may help.
*/
ASSERT_NOT_TESTED();
ostd->retakeover = true;
return true;
}
/* Called for os-specific takeover of a secondary thread from the one
* that called dr_app_setup().
*/
dcontext_t *
os_thread_take_over_secondary(priv_mcontext_t *mc)
{
thread_record_t **list;
int num_threads;
int i;
dcontext_t *dcontext;
/* We want to share with the thread that called dr_app_setup. */
d_r_mutex_lock(&thread_initexit_lock);
get_list_of_threads(&list, &num_threads);
ASSERT(num_threads >= 1);
for (i = 0; i < num_threads; i++) {
/* Find a thread that's already set up */
if (is_thread_signal_info_initialized(list[i]->dcontext))
break;
}
ASSERT(i < num_threads);
ASSERT(list[i]->id != get_sys_thread_id());
/* Assuming pthreads, prepare signal_field for sharing. */
handle_clone(list[i]->dcontext, PTHREAD_CLONE_FLAGS);
dcontext = init_thread_with_shared_siginfo(mc, list[i]->dcontext);
d_r_mutex_unlock(&thread_initexit_lock);
global_heap_free(list,
num_threads * sizeof(thread_record_t *) HEAPACCT(ACCT_THREAD_MGT));
return dcontext;
}
/***************************************************************************/
uint
os_random_seed(void)
{
uint seed;
/* reading from /dev/urandom for a non-blocking random */
int urand = os_open("/dev/urandom", OS_OPEN_READ);
DEBUG_DECLARE(int read =) os_read(urand, &seed, sizeof(seed));
ASSERT(read == sizeof(seed));
os_close(urand);
return seed;
}
#ifdef RCT_IND_BRANCH
/* Analyze a range in a possibly new module
* return false if not a code section in a module
* otherwise returns true and adds all valid targets for rct_ind_branch_check
*/
bool
rct_analyze_module_at_violation(dcontext_t *dcontext, app_pc target_pc)
{
/* FIXME: note that this will NOT find the data section corresponding to the given PC
* we don't yet have a corresponding get_allocation_size or an ELF header walk routine
* on linux
*/
app_pc code_start;
size_t code_size;
uint prot;
if (!get_memory_info(target_pc, &code_start, &code_size, &prot))
return false;
/* TODO: in almost all cases expect the region at module_base+module_size to be
* the corresponding data section.
* Writable yet initialized data indeed needs to be processed.
*/
if (code_size > 0) {
app_pc code_end = code_start + code_size;
app_pc data_start;
size_t data_size;
ASSERT(TESTALL(MEMPROT_READ | MEMPROT_EXEC, prot)); /* code */
if (!get_memory_info(code_end, &data_start, &data_size, &prot))
return false;
ASSERT(data_start == code_end);
ASSERT(TESTALL(MEMPROT_READ | MEMPROT_WRITE, prot)); /* data */
app_pc text_start = code_start;
app_pc text_end = data_start + data_size;
/* TODO: performance: do this only in case relocation info is not present */
DEBUG_DECLARE(uint found =)
find_address_references(dcontext, text_start, text_end, code_start, code_end);
LOG(GLOBAL, LOG_RCT, 2, PFX "-" PFX " : %d ind targets of %d code size",
text_start, text_end, found, code_size);
return true;
}
return false;
}
# ifdef X64
bool
rct_add_rip_rel_addr(dcontext_t *dcontext, app_pc tgt _IF_DEBUG(app_pc src))
{
/* FIXME PR 276762: not implemented */
return false;
}
# endif
#endif /* RCT_IND_BRANCH */
#ifdef HOT_PATCHING_INTERFACE
void *
get_drmarker_hotp_policy_status_table()
{
ASSERT_NOT_IMPLEMENTED(false);
return NULL;
}
void
set_drmarker_hotp_policy_status_table(void *new_table)
{
ASSERT_NOT_IMPLEMENTED(false);
}
byte *
hook_text(byte *hook_code_buf, const app_pc image_addr, intercept_function_t hook_func,
const void *callee_arg, const after_intercept_action_t action_after,
const bool abort_if_hooked, const bool ignore_cti, byte **app_code_copy_p,
byte **alt_exit_tgt_p)
{
ASSERT_NOT_IMPLEMENTED(false);
return NULL;
}
void
unhook_text(byte *hook_code_buf, app_pc image_addr)
{
ASSERT_NOT_IMPLEMENTED(false);
}
void
insert_jmp_at_tramp_entry(dcontext_t *dcontext, byte *trampoline, byte *target)
{
ASSERT_NOT_IMPLEMENTED(false);
}
#endif /* HOT_PATCHING_INTERFACE */
bool
aslr_is_possible_attack(app_pc target)
{
/* FIXME: ASLR not implemented */
return false;
}
app_pc
aslr_possible_preferred_address(app_pc target_addr)
{
/* FIXME: ASLR not implemented */
return NULL;
}
void
take_over_primary_thread()
{
/* nothing to do here */
}
bool
os_current_user_directory(char *directory_prefix /* INOUT */, uint directory_len,
bool create)
{
/* XXX: could share some of this code w/ corresponding windows routine */
uid_t uid = dynamorio_syscall(SYS_getuid, 0);
char *directory = directory_prefix;
char *dirend = directory_prefix + strlen(directory_prefix);
snprintf(dirend, directory_len - (dirend - directory_prefix), "%cdpc-%d", DIRSEP,
uid);
directory_prefix[directory_len - 1] = '\0';
if (!os_file_exists(directory, true /*is dir*/) && create) {
/* XXX: we should ensure we do not follow symlinks */
/* XXX: should add support for CREATE_DIR_FORCE_OWNER */
if (!os_create_dir(directory, CREATE_DIR_REQUIRE_NEW)) {
LOG(GLOBAL, LOG_CACHE, 2, "\terror creating per-user dir %s\n", directory);
return false;
} else {
LOG(GLOBAL, LOG_CACHE, 2, "\tcreated per-user dir %s\n", directory);
}
}
return true;
}
bool
os_validate_user_owned(file_t file_or_directory_handle)
{
/* note on Linux this scheme should never be used */
ASSERT(false && "chown Alice evilfile");
return false;
}
bool
os_check_option_compatibility(void)
{
/* no options are Linux OS version dependent */
return false;
}
#ifdef X86_32
/* Emulate uint64 modulo and division by uint32 on ia32.
* XXX: Does *not* handle 64-bit divisors!
*/
static uint64
uint64_divmod(uint64 dividend, uint64 divisor64, uint32 *remainder)
{
/* Assumes little endian, which x86 is. */
union {
uint64 v64;
struct {
uint32 lo;
uint32 hi;
};
} res;
uint32 upper;
uint32 divisor = (uint32)divisor64;
/* Our uses don't use large divisors. */
ASSERT(divisor64 <= UINT_MAX && "divisor is larger than uint32 can hold");
/* Divide out the high bits first. */
res.v64 = dividend;
upper = res.hi;
res.hi = upper / divisor;
upper %= divisor;
/* Use the unsigned div instruction, which uses EDX:EAX to form a 64-bit
* dividend. We only get a 32-bit quotient out, which is why we divide out
* the high bits first. The quotient will fit in EAX.
*
* DIV r/m32 F7 /6 Unsigned divide EDX:EAX by r/m32, with result stored
* in EAX <- Quotient, EDX <- Remainder.
* inputs:
* EAX = res.lo
* EDX = upper
* rm = divisor
* outputs:
* res.lo = EAX
* *remainder = EDX
* The outputs precede the inputs in gcc inline asm syntax, and so to put
* inputs in EAX and EDX we use "0" and "1".
*/
asm("divl %2"
: "=a"(res.lo), "=d"(*remainder)
: "rm"(divisor), "0"(res.lo), "1"(upper));
return res.v64;
}
/* Match libgcc's prototype. */
uint64
__udivdi3(uint64 dividend, uint64 divisor)
{
uint32 remainder;
return uint64_divmod(dividend, divisor, &remainder);
}
/* Match libgcc's prototype. */
uint64
__umoddi3(uint64 dividend, uint64 divisor)
{
uint32 remainder;
uint64_divmod(dividend, divisor, &remainder);
return (uint64)remainder;
}
/* Same thing for signed. */
static int64
int64_divmod(int64 dividend, int64 divisor64, int *remainder)
{
union {
int64 v64;
struct {
int lo;
int hi;
};
} res;
int upper;
int divisor = (int)divisor64;
/* Our uses don't use large divisors. */
ASSERT(divisor64 <= INT_MAX && divisor64 >= INT_MIN && "divisor too large for int");
/* Divide out the high bits first. */
res.v64 = dividend;
upper = res.hi;
res.hi = upper / divisor;
upper %= divisor;
/* Like above but with the signed div instruction, which does a signed divide
* on edx:eax by r/m32 => quotient in eax, remainder in edx.
*/
asm("idivl %2"
: "=a"(res.lo), "=d"(*remainder)
: "rm"(divisor), "0"(res.lo), "1"(upper));
return res.v64;
}
/* Match libgcc's prototype. */
int64
__divdi3(int64 dividend, int64 divisor)
{
int remainder;
return int64_divmod(dividend, divisor, &remainder);
}
/* __moddi3 is coming from third_party/libgcc for x86 as well as arm. */
#elif defined(ARM)
/* i#1566: for ARM, __aeabi versions are used instead of udivdi3 and umoddi3.
* We link with __aeabi routines from libgcc via third_party/libgcc.
*/
#endif /* X86_32 */
/****************************************************************************
* Tests
*/
#if defined(STANDALONE_UNIT_TEST)
void
test_uint64_divmod(void)
{
# ifdef X86_32
uint64 quotient;
uint32 remainder;
/* Simple division below 2^32. */
quotient = uint64_divmod(9, 3, &remainder);
EXPECT(quotient == 3, true);
EXPECT(remainder == 0, true);
quotient = uint64_divmod(10, 3, &remainder);
EXPECT(quotient == 3, true);
EXPECT(remainder == 1, true);
/* Division when upper bits are less than the divisor. */
quotient = uint64_divmod(45ULL << 31, 1U << 31, &remainder);
EXPECT(quotient == 45, true);
EXPECT(remainder == 0, true);
/* Division when upper bits are greater than the divisor. */
quotient = uint64_divmod(45ULL << 32, 15, &remainder);
EXPECT(quotient == 3ULL << 32, true);
EXPECT(remainder == 0, true);
quotient = uint64_divmod((45ULL << 32) + 13, 15, &remainder);
EXPECT(quotient == 3ULL << 32, true);
EXPECT(remainder == 13, true);
/* Try calling the intrinsics. Don't divide by powers of two, gcc will
* lower that to a shift.
*/
quotient = (45ULL << 32);
quotient /= 15;
EXPECT(quotient == (3ULL << 32), true);
quotient = (45ULL << 32) + 13;
remainder = quotient % 15;
EXPECT(remainder == 13, true);
# endif /* X86_32 */
}
void
unit_test_os(void)
{
test_uint64_divmod();
}
#endif /* STANDALONE_UNIT_TEST */
| 1 | 25,720 | I would avoid the term "errno" as it implies the glibc variable. Use error_code or status or something. | DynamoRIO-dynamorio | c |
@@ -69,7 +69,7 @@ func (i *inbound) Channel() *tchannel.Channel {
return i.ch
}
-func (i *inbound) Start(h transport.Handler) error {
+func (i *inbound) Start(h transport.Handler, d transport.Dependencies) error {
sc := i.ch.GetSubChannel(i.ch.ServiceName())
existing := sc.GetHandlers()
sc.SetHandler(handler{existing, h}) | 1 | // Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package tchannel
import (
"net"
"github.com/yarpc/yarpc-go/transport"
"github.com/uber/tchannel-go"
)
// Inbound is a TChannel Inbound.
type Inbound interface {
transport.Inbound
// Returns the underlying Channel for this Inbound.
Channel() *tchannel.Channel
}
// InboundOption configures Inbound.
type InboundOption func(*inbound)
// ListenAddr changes the address on which the TChannel server will listen for
// connections. By default, the server listens on an OS-assigned port.
//
// This option has no effect if the Chanel provided to NewInbound is already
// listening for connections when Start() is called.
func ListenAddr(addr string) InboundOption {
return func(i *inbound) { i.addr = addr }
}
// NewInbound builds a new TChannel inbound from the given Channel. Existing
// methods registered on the channel remain registered and are preferred when
// a call is received.
func NewInbound(ch *tchannel.Channel, opts ...InboundOption) Inbound {
i := &inbound{ch: ch}
for _, opt := range opts {
opt(i)
}
return i
}
type inbound struct {
ch *tchannel.Channel
addr string
listener net.Listener
}
func (i *inbound) Channel() *tchannel.Channel {
return i.ch
}
func (i *inbound) Start(h transport.Handler) error {
sc := i.ch.GetSubChannel(i.ch.ServiceName())
existing := sc.GetHandlers()
sc.SetHandler(handler{existing, h})
if i.ch.State() == tchannel.ChannelListening {
// Channel.Start() was called before RPC.Start(). We still want to
// update the Handler and what i.addr means, but nothing else.
i.addr = i.ch.PeerInfo().HostPort
return nil
}
// Default to ListenIP if addr wasn't given.
addr := i.addr
if addr == "" {
listenIP, err := tchannel.ListenIP()
if err != nil {
return err
}
addr = listenIP.String() + ":0"
// TODO(abg): Find a way to export this to users
}
// TODO(abg): If addr was just the port (":4040"), we want to use
// ListenIP() + ":4040" rather than just ":4040".
var err error
i.listener, err = net.Listen("tcp", addr)
if err != nil {
return err
}
i.addr = i.listener.Addr().String() // in case it changed
if err := i.ch.Serve(i.listener); err != nil {
return err
}
return nil
}
func (i *inbound) Stop() error {
i.ch.Close()
return nil
}
| 1 | 10,408 | @abhinav @kriskowal what do you think about putting the handler on `d`? | yarpc-yarpc-go | go |
@@ -91,7 +91,7 @@ type BackupOptions struct {
ExcludeLargerThan string
Stdin bool
StdinFilename string
- Tags []string
+ Tags restic.TagList
Host string
FilesFrom []string
TimeStamp string | 1 | package main
import (
"bufio"
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
"runtime"
"strconv"
"strings"
"time"
"github.com/spf13/cobra"
tomb "gopkg.in/tomb.v2"
"github.com/restic/restic/internal/archiver"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/fs"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/textfile"
"github.com/restic/restic/internal/ui"
"github.com/restic/restic/internal/ui/json"
"github.com/restic/restic/internal/ui/termstatus"
)
var cmdBackup = &cobra.Command{
Use: "backup [flags] FILE/DIR [FILE/DIR] ...",
Short: "Create a new backup of files and/or directories",
Long: `
The "backup" command creates a new snapshot and saves the files and directories
given as the arguments.
EXIT STATUS
===========
Exit status is 0 if the command was successful.
Exit status is 1 if there was a fatal error (no snapshot created).
Exit status is 3 if some source data could not be read (incomplete snapshot created).
`,
PreRun: func(cmd *cobra.Command, args []string) {
if backupOptions.Host == "" {
hostname, err := os.Hostname()
if err != nil {
debug.Log("os.Hostname() returned err: %v", err)
return
}
backupOptions.Host = hostname
}
},
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
if backupOptions.Stdin {
for _, filename := range backupOptions.FilesFrom {
if filename == "-" {
return errors.Fatal("cannot use both `--stdin` and `--files-from -`")
}
}
}
var t tomb.Tomb
term := termstatus.New(globalOptions.stdout, globalOptions.stderr, globalOptions.Quiet)
t.Go(func() error { term.Run(t.Context(globalOptions.ctx)); return nil })
err := runBackup(backupOptions, globalOptions, term, args)
if err != nil {
return err
}
t.Kill(nil)
return t.Wait()
},
}
// BackupOptions bundles all options for the backup command.
type BackupOptions struct {
Parent string
Force bool
Excludes []string
InsensitiveExcludes []string
ExcludeFiles []string
InsensitiveExcludeFiles []string
ExcludeOtherFS bool
ExcludeIfPresent []string
ExcludeCaches bool
ExcludeLargerThan string
Stdin bool
StdinFilename string
Tags []string
Host string
FilesFrom []string
TimeStamp string
WithAtime bool
IgnoreInode bool
UseFsSnapshot bool
}
var backupOptions BackupOptions
// ErrInvalidSourceData is used to report an incomplete backup
var ErrInvalidSourceData = errors.New("failed to read all source data during backup")
func init() {
cmdRoot.AddCommand(cmdBackup)
f := cmdBackup.Flags()
f.StringVar(&backupOptions.Parent, "parent", "", "use this parent `snapshot` (default: last snapshot in the repo that has the same target files/directories)")
f.BoolVarP(&backupOptions.Force, "force", "f", false, `force re-reading the target files/directories (overrides the "parent" flag)`)
f.StringArrayVarP(&backupOptions.Excludes, "exclude", "e", nil, "exclude a `pattern` (can be specified multiple times)")
f.StringArrayVar(&backupOptions.InsensitiveExcludes, "iexclude", nil, "same as --exclude `pattern` but ignores the casing of filenames")
f.StringArrayVar(&backupOptions.ExcludeFiles, "exclude-file", nil, "read exclude patterns from a `file` (can be specified multiple times)")
f.StringArrayVar(&backupOptions.InsensitiveExcludeFiles, "iexclude-file", nil, "same as --exclude-file but ignores casing of `file`names in patterns")
f.BoolVarP(&backupOptions.ExcludeOtherFS, "one-file-system", "x", false, "exclude other file systems")
f.StringArrayVar(&backupOptions.ExcludeIfPresent, "exclude-if-present", nil, "takes `filename[:header]`, exclude contents of directories containing filename (except filename itself) if header of that file is as provided (can be specified multiple times)")
f.BoolVar(&backupOptions.ExcludeCaches, "exclude-caches", false, `excludes cache directories that are marked with a CACHEDIR.TAG file. See https://bford.info/cachedir/ for the Cache Directory Tagging Standard`)
f.StringVar(&backupOptions.ExcludeLargerThan, "exclude-larger-than", "", "max `size` of the files to be backed up (allowed suffixes: k/K, m/M, g/G, t/T)")
f.BoolVar(&backupOptions.Stdin, "stdin", false, "read backup from stdin")
f.StringVar(&backupOptions.StdinFilename, "stdin-filename", "stdin", "`filename` to use when reading from stdin")
f.StringArrayVar(&backupOptions.Tags, "tag", nil, "add a `tag` for the new snapshot (can be specified multiple times)")
f.StringVarP(&backupOptions.Host, "host", "H", "", "set the `hostname` for the snapshot manually. To prevent an expensive rescan use the \"parent\" flag")
f.StringVar(&backupOptions.Host, "hostname", "", "set the `hostname` for the snapshot manually")
f.MarkDeprecated("hostname", "use --host")
f.StringArrayVar(&backupOptions.FilesFrom, "files-from", nil, "read the files to backup from `file` (can be combined with file args/can be specified multiple times)")
f.StringVar(&backupOptions.TimeStamp, "time", "", "`time` of the backup (ex. '2012-11-01 22:08:41') (default: now)")
f.BoolVar(&backupOptions.WithAtime, "with-atime", false, "store the atime for all files and directories")
f.BoolVar(&backupOptions.IgnoreInode, "ignore-inode", false, "ignore inode number changes when checking for modified files")
if runtime.GOOS == "windows" {
f.BoolVar(&backupOptions.UseFsSnapshot, "use-fs-snapshot", false, "use filesystem snapshot where possible (currently only Windows VSS)")
}
}
// filterExisting returns a slice of all existing items, or an error if no
// items exist at all.
func filterExisting(items []string) (result []string, err error) {
for _, item := range items {
_, err := fs.Lstat(item)
if err != nil && os.IsNotExist(errors.Cause(err)) {
Warnf("%v does not exist, skipping\n", item)
continue
}
result = append(result, item)
}
if len(result) == 0 {
return nil, errors.Fatal("all target directories/files do not exist")
}
return
}
// readFromFile will read all lines from the given filename and return them as
// a string array, if filename is empty readFromFile returns and empty string
// array. If filename is a dash (-), readFromFile will read the lines from the
// standard input.
func readLinesFromFile(filename string) ([]string, error) {
if filename == "" {
return nil, nil
}
var (
data []byte
err error
)
if filename == "-" {
data, err = ioutil.ReadAll(os.Stdin)
} else {
data, err = textfile.Read(filename)
}
if err != nil {
return nil, err
}
var lines []string
scanner := bufio.NewScanner(bytes.NewReader(data))
for scanner.Scan() {
line := strings.TrimSpace(scanner.Text())
// ignore empty lines
if line == "" {
continue
}
// strip comments
if strings.HasPrefix(line, "#") {
continue
}
lines = append(lines, line)
}
if err := scanner.Err(); err != nil {
return nil, err
}
return lines, nil
}
// Check returns an error when an invalid combination of options was set.
func (opts BackupOptions) Check(gopts GlobalOptions, args []string) error {
if gopts.password == "" {
for _, filename := range opts.FilesFrom {
if filename == "-" {
return errors.Fatal("unable to read password from stdin when data is to be read from stdin, use --password-file or $RESTIC_PASSWORD")
}
}
}
if opts.Stdin {
if len(opts.FilesFrom) > 0 {
return errors.Fatal("--stdin and --files-from cannot be used together")
}
if len(args) > 0 {
return errors.Fatal("--stdin was specified and files/dirs were listed as arguments")
}
}
return nil
}
// collectRejectByNameFuncs returns a list of all functions which may reject data
// from being saved in a snapshot based on path only
func collectRejectByNameFuncs(opts BackupOptions, repo *repository.Repository, targets []string) (fs []RejectByNameFunc, err error) {
// exclude restic cache
if repo.Cache != nil {
f, err := rejectResticCache(repo)
if err != nil {
return nil, err
}
fs = append(fs, f)
}
// add patterns from file
if len(opts.ExcludeFiles) > 0 {
excludes, err := readExcludePatternsFromFiles(opts.ExcludeFiles)
if err != nil {
return nil, err
}
opts.Excludes = append(opts.Excludes, excludes...)
}
if len(opts.InsensitiveExcludeFiles) > 0 {
excludes, err := readExcludePatternsFromFiles(opts.InsensitiveExcludeFiles)
if err != nil {
return nil, err
}
opts.InsensitiveExcludes = append(opts.InsensitiveExcludes, excludes...)
}
if len(opts.InsensitiveExcludes) > 0 {
fs = append(fs, rejectByInsensitivePattern(opts.InsensitiveExcludes))
}
if len(opts.Excludes) > 0 {
fs = append(fs, rejectByPattern(opts.Excludes))
}
if opts.ExcludeCaches {
opts.ExcludeIfPresent = append(opts.ExcludeIfPresent, "CACHEDIR.TAG:Signature: 8a477f597d28d172789f06886806bc55")
}
for _, spec := range opts.ExcludeIfPresent {
f, err := rejectIfPresent(spec)
if err != nil {
return nil, err
}
fs = append(fs, f)
}
return fs, nil
}
// collectRejectFuncs returns a list of all functions which may reject data
// from being saved in a snapshot based on path and file info
func collectRejectFuncs(opts BackupOptions, repo *repository.Repository, targets []string) (fs []RejectFunc, err error) {
// allowed devices
if opts.ExcludeOtherFS && !opts.Stdin {
f, err := rejectByDevice(targets)
if err != nil {
return nil, err
}
fs = append(fs, f)
}
if len(opts.ExcludeLargerThan) != 0 && !opts.Stdin {
f, err := rejectBySize(opts.ExcludeLargerThan)
if err != nil {
return nil, err
}
fs = append(fs, f)
}
return fs, nil
}
// readExcludePatternsFromFiles reads all exclude files and returns the list of
// exclude patterns. For each line, leading and trailing white space is removed
// and comment lines are ignored. For each remaining pattern, environment
// variables are resolved. For adding a literal dollar sign ($), write $$ to
// the file.
func readExcludePatternsFromFiles(excludeFiles []string) ([]string, error) {
getenvOrDollar := func(s string) string {
if s == "$" {
return "$"
}
return os.Getenv(s)
}
var excludes []string
for _, filename := range excludeFiles {
err := func() (err error) {
data, err := textfile.Read(filename)
if err != nil {
return err
}
scanner := bufio.NewScanner(bytes.NewReader(data))
for scanner.Scan() {
line := strings.TrimSpace(scanner.Text())
// ignore empty lines
if line == "" {
continue
}
// strip comments
if strings.HasPrefix(line, "#") {
continue
}
line = os.Expand(line, getenvOrDollar)
excludes = append(excludes, line)
}
return scanner.Err()
}()
if err != nil {
return nil, err
}
}
return excludes, nil
}
// collectTargets returns a list of target files/dirs from several sources.
func collectTargets(opts BackupOptions, args []string) (targets []string, err error) {
if opts.Stdin {
return nil, nil
}
var lines []string
for _, file := range opts.FilesFrom {
fromfile, err := readLinesFromFile(file)
if err != nil {
return nil, err
}
// expand wildcards
for _, line := range fromfile {
var expanded []string
expanded, err := filepath.Glob(line)
if err != nil {
return nil, errors.WithMessage(err, fmt.Sprintf("pattern: %s", line))
}
if len(expanded) == 0 {
Warnf("pattern %q does not match any files, skipping\n", line)
}
lines = append(lines, expanded...)
}
}
// merge files from files-from into normal args so we can reuse the normal
// args checks and have the ability to use both files-from and args at the
// same time
args = append(args, lines...)
if len(args) == 0 && !opts.Stdin {
return nil, errors.Fatal("nothing to backup, please specify target files/dirs")
}
targets = args
targets, err = filterExisting(targets)
if err != nil {
return nil, err
}
return targets, nil
}
// parent returns the ID of the parent snapshot. If there is none, nil is
// returned.
func findParentSnapshot(ctx context.Context, repo restic.Repository, opts BackupOptions, targets []string) (parentID *restic.ID, err error) {
// Force using a parent
if !opts.Force && opts.Parent != "" {
id, err := restic.FindSnapshot(ctx, repo, opts.Parent)
if err != nil {
return nil, errors.Fatalf("invalid id %q: %v", opts.Parent, err)
}
parentID = &id
}
// Find last snapshot to set it as parent, if not already set
if !opts.Force && parentID == nil {
id, err := restic.FindLatestSnapshot(ctx, repo, targets, []restic.TagList{}, []string{opts.Host})
if err == nil {
parentID = &id
} else if err != restic.ErrNoSnapshotFound {
return nil, err
}
}
return parentID, nil
}
func runBackup(opts BackupOptions, gopts GlobalOptions, term *termstatus.Terminal, args []string) error {
err := opts.Check(gopts, args)
if err != nil {
return err
}
targets, err := collectTargets(opts, args)
if err != nil {
return err
}
timeStamp := time.Now()
if opts.TimeStamp != "" {
timeStamp, err = time.ParseInLocation(TimeFormat, opts.TimeStamp, time.Local)
if err != nil {
return errors.Fatalf("error in time option: %v\n", err)
}
}
var t tomb.Tomb
if gopts.verbosity >= 2 && !gopts.JSON {
Verbosef("open repository\n")
}
repo, err := OpenRepository(gopts)
if err != nil {
return err
}
type ArchiveProgressReporter interface {
CompleteItem(item string, previous, current *restic.Node, s archiver.ItemStats, d time.Duration)
StartFile(filename string)
CompleteBlob(filename string, bytes uint64)
ScannerError(item string, fi os.FileInfo, err error) error
ReportTotal(item string, s archiver.ScanStats)
SetMinUpdatePause(d time.Duration)
Run(ctx context.Context) error
Error(item string, fi os.FileInfo, err error) error
Finish(snapshotID restic.ID)
// ui.StdioWrapper
Stdout() io.WriteCloser
Stderr() io.WriteCloser
// ui.Message
E(msg string, args ...interface{})
P(msg string, args ...interface{})
V(msg string, args ...interface{})
VV(msg string, args ...interface{})
}
var p ArchiveProgressReporter
if gopts.JSON {
p = json.NewBackup(term, gopts.verbosity)
} else {
p = ui.NewBackup(term, gopts.verbosity)
}
// use the terminal for stdout/stderr
prevStdout, prevStderr := gopts.stdout, gopts.stderr
defer func() {
gopts.stdout, gopts.stderr = prevStdout, prevStderr
}()
gopts.stdout, gopts.stderr = p.Stdout(), p.Stderr()
if s, ok := os.LookupEnv("RESTIC_PROGRESS_FPS"); ok {
fps, err := strconv.Atoi(s)
if err == nil && fps >= 1 {
if fps > 60 {
fps = 60
}
p.SetMinUpdatePause(time.Second / time.Duration(fps))
}
}
t.Go(func() error { return p.Run(t.Context(gopts.ctx)) })
if !gopts.JSON {
p.V("lock repository")
}
lock, err := lockRepo(gopts.ctx, repo)
defer unlockRepo(lock)
if err != nil {
return err
}
// rejectByNameFuncs collect functions that can reject items from the backup based on path only
rejectByNameFuncs, err := collectRejectByNameFuncs(opts, repo, targets)
if err != nil {
return err
}
// rejectFuncs collect functions that can reject items from the backup based on path and file info
rejectFuncs, err := collectRejectFuncs(opts, repo, targets)
if err != nil {
return err
}
if !gopts.JSON {
p.V("load index files")
}
err = repo.LoadIndex(gopts.ctx)
if err != nil {
return err
}
parentSnapshotID, err := findParentSnapshot(gopts.ctx, repo, opts, targets)
if err != nil {
return err
}
if !gopts.JSON {
if parentSnapshotID != nil {
p.P("using parent snapshot %v\n", parentSnapshotID.Str())
} else {
p.P("no parent snapshot found, will read all files\n")
}
}
selectByNameFilter := func(item string) bool {
for _, reject := range rejectByNameFuncs {
if reject(item) {
return false
}
}
return true
}
selectFilter := func(item string, fi os.FileInfo) bool {
for _, reject := range rejectFuncs {
if reject(item, fi) {
return false
}
}
return true
}
var targetFS fs.FS = fs.Local{}
if runtime.GOOS == "windows" && opts.UseFsSnapshot {
if err = fs.HasSufficientPrivilegesForVSS(); err != nil {
return err
}
errorHandler := func(item string, err error) error {
return p.Error(item, nil, err)
}
messageHandler := func(msg string, args ...interface{}) {
if !gopts.JSON {
p.P(msg, args...)
}
}
localVss := fs.NewLocalVss(errorHandler, messageHandler)
defer localVss.DeleteSnapshots()
targetFS = localVss
}
if opts.Stdin {
if !gopts.JSON {
p.V("read data from stdin")
}
filename := path.Join("/", opts.StdinFilename)
targetFS = &fs.Reader{
ModTime: timeStamp,
Name: filename,
Mode: 0644,
ReadCloser: os.Stdin,
}
targets = []string{filename}
}
sc := archiver.NewScanner(targetFS)
sc.SelectByName = selectByNameFilter
sc.Select = selectFilter
sc.Error = p.ScannerError
sc.Result = p.ReportTotal
if !gopts.JSON {
p.V("start scan on %v", targets)
}
t.Go(func() error { return sc.Scan(t.Context(gopts.ctx), targets) })
arch := archiver.New(repo, targetFS, archiver.Options{})
arch.SelectByName = selectByNameFilter
arch.Select = selectFilter
arch.WithAtime = opts.WithAtime
success := true
arch.Error = func(item string, fi os.FileInfo, err error) error {
success = false
return p.Error(item, fi, err)
}
arch.CompleteItem = p.CompleteItem
arch.StartFile = p.StartFile
arch.CompleteBlob = p.CompleteBlob
arch.IgnoreInode = opts.IgnoreInode
if parentSnapshotID == nil {
parentSnapshotID = &restic.ID{}
}
snapshotOpts := archiver.SnapshotOptions{
Excludes: opts.Excludes,
Tags: opts.Tags,
Time: timeStamp,
Hostname: opts.Host,
ParentSnapshot: *parentSnapshotID,
}
if !gopts.JSON {
p.V("start backup on %v", targets)
}
_, id, err := arch.Snapshot(gopts.ctx, targets, snapshotOpts)
if err != nil {
return errors.Fatalf("unable to save snapshot: %v", err)
}
// cleanly shutdown all running goroutines
t.Kill(nil)
// let's see if one returned an error
err = t.Wait()
// Report finished execution
p.Finish(id)
if !gopts.JSON {
p.P("snapshot %s saved\n", id.Str())
}
if !success {
return ErrInvalidSourceData
}
// Return error if any
return err
}
| 1 | 12,335 | Why `restic.TagLists` and no `restic.TagList` ? Because I'm using `restic.TagList` here and using `restic.TagLists` is a lot of changes | restic-restic | go |
@@ -50,6 +50,8 @@ func TestWait(t *testing.T) {
cancel()
return ctx
}(),
+ // Ensure the timer and context do not end simultaneously.
+ delay: 1 * time.Hour,
expected: context.Canceled,
},
} | 1 | // Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package retry
import (
"context"
"errors"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestWait(t *testing.T) {
tests := []struct {
ctx context.Context
delay time.Duration
expected error
}{
{
ctx: context.Background(),
delay: time.Duration(0),
expected: nil,
},
{
ctx: context.Background(),
delay: time.Duration(1),
expected: nil,
},
{
ctx: context.Background(),
delay: time.Duration(-1),
expected: nil,
},
{
ctx: func() context.Context {
ctx, cancel := context.WithCancel(context.Background())
cancel()
return ctx
}(),
expected: context.Canceled,
},
}
for _, test := range tests {
assert.Equal(t, test.expected, wait(test.ctx, test.delay))
}
}
func TestNonRetryableError(t *testing.T) {
ev := func(error) (bool, time.Duration) { return false, 0 }
reqFunc := Config{
Enabled: true,
InitialInterval: 1 * time.Nanosecond,
MaxInterval: 1 * time.Nanosecond,
// Never stop retrying.
MaxElapsedTime: 0,
}.RequestFunc(ev)
ctx := context.Background()
assert.NoError(t, reqFunc(ctx, func(context.Context) error {
return nil
}))
assert.ErrorIs(t, reqFunc(ctx, func(context.Context) error {
return assert.AnError
}), assert.AnError)
}
func TestThrottledRetry(t *testing.T) {
// Ensure the throttle delay is used by making longer than backoff delay.
throttleDelay, backoffDelay := time.Second, time.Nanosecond
ev := func(error) (bool, time.Duration) {
// Retry everything with a throttle delay.
return true, throttleDelay
}
reqFunc := Config{
Enabled: true,
InitialInterval: backoffDelay,
MaxInterval: backoffDelay,
// Never stop retrying.
MaxElapsedTime: 0,
}.RequestFunc(ev)
origWait := waitFunc
var done bool
waitFunc = func(_ context.Context, delay time.Duration) error {
assert.Equal(t, throttleDelay, delay, "retry not throttled")
// Try twice to ensure call is attempted again after delay.
if done {
return assert.AnError
}
done = true
return nil
}
defer func() { waitFunc = origWait }()
ctx := context.Background()
assert.ErrorIs(t, reqFunc(ctx, func(context.Context) error {
return errors.New("not this error")
}), assert.AnError)
}
func TestBackoffRetry(t *testing.T) {
ev := func(error) (bool, time.Duration) { return true, 0 }
delay := time.Nanosecond
reqFunc := Config{
Enabled: true,
InitialInterval: delay,
MaxInterval: delay,
// Never stop retrying.
MaxElapsedTime: 0,
}.RequestFunc(ev)
origWait := waitFunc
var done bool
waitFunc = func(_ context.Context, d time.Duration) error {
assert.Equal(t, delay, d, "retry not backoffed")
// Try twice to ensure call is attempted again after delay.
if done {
return assert.AnError
}
done = true
return nil
}
defer func() { waitFunc = origWait }()
ctx := context.Background()
assert.ErrorIs(t, reqFunc(ctx, func(context.Context) error {
return errors.New("not this error")
}), assert.AnError)
}
func TestThrottledRetryGreaterThanMaxElapsedTime(t *testing.T) {
// Ensure the throttle delay is used by making longer than backoff delay.
tDelay, bDelay := time.Hour, time.Nanosecond
ev := func(error) (bool, time.Duration) { return true, tDelay }
reqFunc := Config{
Enabled: true,
InitialInterval: bDelay,
MaxInterval: bDelay,
MaxElapsedTime: tDelay - (time.Nanosecond),
}.RequestFunc(ev)
ctx := context.Background()
assert.Contains(t, reqFunc(ctx, func(context.Context) error {
return assert.AnError
}).Error(), "max retry time would elapse: ")
}
func TestMaxElapsedTime(t *testing.T) {
ev := func(error) (bool, time.Duration) { return true, 0 }
delay := time.Nanosecond
reqFunc := Config{
Enabled: true,
// InitialInterval > MaxElapsedTime means immediate return.
InitialInterval: 2 * delay,
MaxElapsedTime: delay,
}.RequestFunc(ev)
ctx := context.Background()
assert.Contains(t, reqFunc(ctx, func(context.Context) error {
return assert.AnError
}).Error(), "max retry time elapsed: ")
}
func TestRetryNotEnabled(t *testing.T) {
ev := func(error) (bool, time.Duration) {
t.Error("evaluated retry when not enabled")
return false, 0
}
reqFunc := Config{}.RequestFunc(ev)
ctx := context.Background()
assert.NoError(t, reqFunc(ctx, func(context.Context) error {
return nil
}))
assert.ErrorIs(t, reqFunc(ctx, func(context.Context) error {
return assert.AnError
}), assert.AnError)
}
| 1 | 16,722 | An hour seems a significant amount of time to wait for within a test, does it need to be that long? | open-telemetry-opentelemetry-go | go |
@@ -204,7 +204,7 @@ func (d *RPCFactory) CreateFrontendGRPCConnection(hostName string) *grpc.ClientC
}
}
- return d.dial(hostName, tlsClientConfig)
+ return d.dial(hostName, tlsClientConfig, false)
}
// CreateInternodeGRPCConnection creates connection for gRPC calls | 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package rpc
import (
"crypto/tls"
"fmt"
"net"
"sync"
"github.com/uber/tchannel-go"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"go.temporal.io/server/common/config"
"go.temporal.io/server/common/convert"
"go.temporal.io/server/common/log"
"go.temporal.io/server/common/log/tag"
"go.temporal.io/server/common/rpc/encryption"
)
// RPCFactory is an implementation of service.RPCFactory interface
type RPCFactory struct {
config *config.RPC
serviceName string
logger log.Logger
sync.Mutex
grpcListener net.Listener
ringpopChannel *tchannel.Channel
tlsFactory encryption.TLSConfigProvider
}
// NewFactory builds a new RPCFactory
// conforming to the underlying configuration
func NewFactory(cfg *config.RPC, sName string, logger log.Logger, tlsProvider encryption.TLSConfigProvider) *RPCFactory {
return newFactory(cfg, sName, logger, tlsProvider)
}
func newFactory(cfg *config.RPC, sName string, logger log.Logger, tlsProvider encryption.TLSConfigProvider) *RPCFactory {
factory := &RPCFactory{config: cfg, serviceName: sName, logger: logger, tlsFactory: tlsProvider}
return factory
}
func (d *RPCFactory) GetFrontendGRPCServerOptions() ([]grpc.ServerOption, error) {
var opts []grpc.ServerOption
if d.tlsFactory != nil {
serverConfig, err := d.tlsFactory.GetFrontendServerConfig()
if err != nil {
return nil, err
}
if serverConfig == nil {
return opts, nil
}
opts = append(opts, grpc.Creds(credentials.NewTLS(serverConfig)))
}
return opts, nil
}
func (d *RPCFactory) GetFrontendClientTlsConfig() (*tls.Config, error) {
if d.tlsFactory != nil {
return d.tlsFactory.GetFrontendClientConfig()
}
return nil, nil
}
func (d *RPCFactory) GetInternodeGRPCServerOptions() ([]grpc.ServerOption, error) {
var opts []grpc.ServerOption
if d.tlsFactory != nil {
serverConfig, err := d.tlsFactory.GetInternodeServerConfig()
if err != nil {
return nil, err
}
if serverConfig == nil {
return opts, nil
}
opts = append(opts, grpc.Creds(credentials.NewTLS(serverConfig)))
}
return opts, nil
}
func (d *RPCFactory) GetInternodeClientTlsConfig() (*tls.Config, error) {
if d.tlsFactory != nil {
return d.tlsFactory.GetInternodeClientConfig()
}
return nil, nil
}
// GetGRPCListener returns cached dispatcher for gRPC inbound or creates one
func (d *RPCFactory) GetGRPCListener() net.Listener {
if d.grpcListener != nil {
return d.grpcListener
}
d.Lock()
defer d.Unlock()
if d.grpcListener == nil {
hostAddress := net.JoinHostPort(getListenIP(d.config, d.logger).String(), convert.IntToString(d.config.GRPCPort))
var err error
d.grpcListener, err = net.Listen("tcp", hostAddress)
if err != nil {
d.logger.Fatal("Failed to start gRPC listener", tag.Error(err), tag.Service(d.serviceName), tag.Address(hostAddress))
}
d.logger.Info("Created gRPC listener", tag.Service(d.serviceName), tag.Address(hostAddress))
}
return d.grpcListener
}
// GetRingpopChannel return a cached ringpop dispatcher
func (d *RPCFactory) GetRingpopChannel() *tchannel.Channel {
if d.ringpopChannel != nil {
return d.ringpopChannel
}
d.Lock()
defer d.Unlock()
if d.ringpopChannel == nil {
ringpopServiceName := fmt.Sprintf("%v-ringpop", d.serviceName)
ringpopHostAddress := net.JoinHostPort(getListenIP(d.config, d.logger).String(), convert.IntToString(d.config.MembershipPort))
var err error
d.ringpopChannel, err = tchannel.NewChannel(ringpopServiceName, nil)
if err != nil {
d.logger.Fatal("Failed to create ringpop TChannel", tag.Error(err))
}
err = d.ringpopChannel.ListenAndServe(ringpopHostAddress)
if err != nil {
d.logger.Fatal("Failed to start ringpop listener", tag.Error(err), tag.Address(ringpopHostAddress))
}
}
return d.ringpopChannel
}
func (d *RPCFactory) getTLSFactory() encryption.TLSConfigProvider {
return d.tlsFactory
}
func getListenIP(cfg *config.RPC, logger log.Logger) net.IP {
if cfg.BindOnLocalHost && len(cfg.BindOnIP) > 0 {
logger.Fatal("ListenIP failed, bindOnLocalHost and bindOnIP are mutually exclusive")
}
if cfg.BindOnLocalHost {
return net.IPv4(127, 0, 0, 1)
}
if len(cfg.BindOnIP) > 0 {
ip := net.ParseIP(cfg.BindOnIP)
if ip != nil {
return ip
}
logger.Fatal("ListenIP failed, unable to parse bindOnIP value", tag.Address(cfg.BindOnIP))
}
ip, err := config.ListenIP()
if err != nil {
logger.Fatal("ListenIP failed", tag.Error(err))
}
return ip
}
// CreateFrontendGRPCConnection creates connection for gRPC calls
func (d *RPCFactory) CreateFrontendGRPCConnection(hostName string) *grpc.ClientConn {
var tlsClientConfig *tls.Config
var err error
if d.tlsFactory != nil {
tlsClientConfig, err = d.tlsFactory.GetFrontendClientConfig()
if err != nil {
d.logger.Fatal("Failed to create tls config for grpc connection", tag.Error(err))
}
}
return d.dial(hostName, tlsClientConfig)
}
// CreateInternodeGRPCConnection creates connection for gRPC calls
func (d *RPCFactory) CreateInternodeGRPCConnection(hostName string) *grpc.ClientConn {
var tlsClientConfig *tls.Config
var err error
if d.tlsFactory != nil {
tlsClientConfig, err = d.tlsFactory.GetInternodeClientConfig()
if err != nil {
d.logger.Fatal("Failed to create tls config for grpc connection", tag.Error(err))
}
}
return d.dial(hostName, tlsClientConfig)
}
func (d *RPCFactory) dial(hostName string, tlsClientConfig *tls.Config) *grpc.ClientConn {
connection, err := Dial(hostName, tlsClientConfig, d.logger)
if err != nil {
d.logger.Fatal("Failed to create gRPC connection", tag.Error(err))
}
return connection
}
func getBroadcastAddressFromConfig(serverCfg *config.Global, cfg *config.RPC, logger log.Logger) string {
if serverCfg.Membership.BroadcastAddress != "" {
return serverCfg.Membership.BroadcastAddress
} else {
return getListenIP(cfg, logger).String()
}
}
func (d *RPCFactory) GetTLSConfigProvider() encryption.TLSConfigProvider {
return d.tlsFactory
}
| 1 | 11,946 | Nit: (newish to golang, don't hesitate to tell me I'm wrong here, but ...) I'd have written this code, just for readability: keepAlive := false return d.dial(hostName, tlsClientConfig, keepAlive) ... or even return d.dial(hostName, tlsClientConfig, false) // keepAlive=false Personally I think Golang should have included named args; I think it's one of the things python got right. | temporalio-temporal | go |
@@ -13,6 +13,7 @@ namespace Datadog.Trace.Configuration
AspNetMvc,
AspNetWebApi2,
GraphQL,
+ Msmq,
MongoDb,
XUnit,
NUnit, | 1 | // ReSharper disable InconsistentNaming - Name is used for integration names
namespace Datadog.Trace.Configuration
{
internal enum IntegrationIds
{
HttpMessageHandler,
HttpSocketsHandler,
WinHttpHandler,
CurlHandler,
AspNetCore,
AdoNet,
AspNet,
AspNetMvc,
AspNetWebApi2,
GraphQL,
MongoDb,
XUnit,
NUnit,
MsTestV2,
Wcf,
WebRequest,
ElasticsearchNet5,
ElasticsearchNet, // NOTE: keep this name without the 6 to avoid breaking changes
ServiceStackRedis,
StackExchangeRedis,
ServiceRemoting,
RabbitMQ,
}
}
| 1 | 20,392 | I don't think it would cause any version conflict, but I'd feel safer if you declared it last, so that the numerical id of the existing integrations don't change | DataDog-dd-trace-dotnet | .cs |
@@ -135,6 +135,9 @@ func (c *certificateRequestManager) updateCertificateStatus(ctx context.Context,
ready = cmmeta.ConditionTrue
reason = "Ready"
message = "Certificate is up to date and has not expired"
+ case apiutil.CertificateRequestHasInvalidRequest(req):
+ reason = "InvalidRequest"
+ message = fmt.Sprintf("The CertificateRequest %q is an invalid request", req.Name)
case req != nil:
reason = "InProgress"
message = fmt.Sprintf("Waiting for CertificateRequest %q to complete", req.Name) | 1 | /*
Copyright 2019 The Jetstack cert-manager contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package certificates
import (
"context"
"crypto/ecdsa"
"crypto/rsa"
"crypto/x509"
"encoding/pem"
"fmt"
"reflect"
"strings"
"time"
"github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
apiutil "github.com/jetstack/cert-manager/pkg/api/util"
cmapi "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1alpha2"
cmmeta "github.com/jetstack/cert-manager/pkg/apis/meta/v1"
cmlisters "github.com/jetstack/cert-manager/pkg/client/listers/certmanager/v1alpha2"
logf "github.com/jetstack/cert-manager/pkg/logs"
"github.com/jetstack/cert-manager/pkg/metrics"
"github.com/jetstack/cert-manager/pkg/util/errors"
"github.com/jetstack/cert-manager/pkg/util/kube"
"github.com/jetstack/cert-manager/pkg/util/pki"
)
func (c *certificateRequestManager) ProcessItem(ctx context.Context, key string) error {
log := logf.FromContext(ctx)
crt, err := getCertificateForKey(ctx, key, c.certificateLister)
if apierrors.IsNotFound(err) {
log.Error(err, "certificate resource not found for key", "key", key)
return nil
}
if crt == nil {
log.Info("certificate resource not found for key", "key", key)
return nil
}
if err != nil {
return err
}
log = logf.WithResource(log, crt)
ctx = logf.NewContext(ctx, log)
updatedCert := crt.DeepCopy()
defer metrics.Default.UpdateCertificateExpiry(updatedCert, c.secretLister)
defer metrics.Default.UpdateCertificateStatus(updatedCert)
err = c.processCertificate(ctx, updatedCert)
log.V(logf.DebugLevel).Info("check if certificate status update is required")
updateStatusErr := c.updateCertificateStatus(ctx, crt, updatedCert)
return utilerrors.NewAggregate([]error{err, updateStatusErr})
}
func (c *certificateRequestManager) updateCertificateStatus(ctx context.Context, old, crt *cmapi.Certificate) error {
log := logf.FromContext(ctx)
secretExists := true
certs, key, err := kube.SecretTLSKeyPair(ctx, c.secretLister, crt.Namespace, crt.Spec.SecretName)
if err != nil {
if !apierrors.IsNotFound(err) && !errors.IsInvalidData(err) {
return err
}
if apierrors.IsNotFound(err) {
secretExists = false
}
}
reqs, err := findCertificateRequestsForCertificate(log, crt, c.certificateRequestLister)
if err != nil {
return err
}
var req *cmapi.CertificateRequest
if len(reqs) == 1 {
req = reqs[0]
}
var cert *x509.Certificate
var certExpired bool
if len(certs) > 0 {
cert = certs[0]
certExpired = cert.NotAfter.Before(c.clock.Now())
}
var matches bool
var matchErrs []string
if key != nil && cert != nil {
secret, err := c.secretLister.Secrets(crt.Namespace).Get(crt.Spec.SecretName)
if err != nil {
return err
}
matches, matchErrs = certificateMatchesSpec(crt, key, cert, secret)
}
isTempCert := isTemporaryCertificate(cert)
// begin setting certificate status fields
if !matches || isTempCert {
crt.Status.NotAfter = nil
} else {
metaNotAfter := metav1.NewTime(cert.NotAfter)
crt.Status.NotAfter = &metaNotAfter
}
// Derive & set 'Ready' condition on Certificate resource
ready := cmmeta.ConditionFalse
reason := ""
message := ""
switch {
case !secretExists || key == nil:
reason = "NotFound"
message = "Certificate does not exist"
case matches && !isTempCert && !certExpired:
ready = cmmeta.ConditionTrue
reason = "Ready"
message = "Certificate is up to date and has not expired"
case req != nil:
reason = "InProgress"
message = fmt.Sprintf("Waiting for CertificateRequest %q to complete", req.Name)
case cert == nil:
reason = "Pending"
message = "Certificate pending issuance"
case !matches:
reason = "DoesNotMatch"
message = strings.Join(matchErrs, ", ")
case certExpired:
reason = "Expired"
message = fmt.Sprintf("Certificate has expired on %s", cert.NotAfter.Format(time.RFC822))
case isTempCert:
reason = "TemporaryCertificate"
message = "Certificate issuance in progress. Temporary certificate issued."
default:
// theoretically, it should not be possible to reach this state.
// practically, we may have missed some edge cases above.
// print a dump of the current state as a log message so that users can
// discover, share and attempt to resolve bugs in this area of code easily.
log.Info("unknown certificate state",
"secret_exists", secretExists,
"matches", matches,
"is_temp_cert", isTempCert,
"cert_expired", certExpired,
"key_is_nil", key == nil,
"req_is_nil", req == nil,
"cert_is_nil", cert == nil,
)
ready = cmmeta.ConditionFalse
reason = "Unknown"
message = "Unknown certificate status. Please open an issue and share your controller logs."
}
apiutil.SetCertificateCondition(crt, cmapi.CertificateConditionReady, ready, reason, message)
_, err = updateCertificateStatus(ctx, c.cmClient, old, crt)
if err != nil {
return err
}
return nil
}
// processCertificate is the core method that is called in the manager.
// It accepts a Certificate resource, and checks to see if the certificate
// requires re-issuance.
func (c *certificateRequestManager) processCertificate(ctx context.Context, crt *cmapi.Certificate) error {
log := logf.FromContext(ctx)
dbg := log.V(logf.DebugLevel)
// The certificate request name is a product of the certificate's spec,
// which makes it unique and predictable.
// First we compute what we expect it to be.
expectedReqName, err := apiutil.ComputeCertificateRequestName(crt)
if err != nil {
return fmt.Errorf("internal error hashing certificate spec: %v", err)
}
// Clean up any 'owned' CertificateRequest resources that do not have the
// expected name computed above
err = c.cleanupExistingCertificateRequests(log, crt, expectedReqName)
if err != nil {
return err
}
// Fetch a copy of the existing Secret resource
existingSecret, err := c.secretLister.Secrets(crt.Namespace).Get(crt.Spec.SecretName)
if apierrors.IsNotFound(err) {
// If the secret does not exist, generate a new private key and store it.
dbg.Info("existing secret not found, generating and storing private key")
return c.generateAndStorePrivateKey(ctx, crt, nil)
}
if err != nil {
return err
}
log = logf.WithRelatedResource(log, existingSecret)
ctx = logf.NewContext(ctx, log)
// If the Secret does not contain a private key, generate one and update
// the Secret resource
existingKey := existingSecret.Data[corev1.TLSPrivateKeyKey]
if len(existingKey) == 0 {
log.Info("existing private key not found in Secret, generate a new private key")
return c.generateAndStorePrivateKey(ctx, crt, existingSecret)
}
// Ensure the the private key has the correct key algorithm and key size.
dbg.Info("validating private key has correct keyAlgorithm/keySize")
validKey, err := validatePrivateKeyUpToDate(log, existingKey, crt)
// If tls.key contains invalid data, we regenerate a new private key
if errors.IsInvalidData(err) {
log.Info("existing private key data is invalid, generating a new private key")
return c.generateAndStorePrivateKey(ctx, crt, existingSecret)
}
if err != nil {
return err
}
// If the private key is not 'up to date', we generate a new private key
if !validKey {
log.Info("existing private key does not match requirements specified on Certificate resource, generating new private key")
return c.generateAndStorePrivateKey(ctx, crt, existingSecret)
}
// Attempt to fetch the CertificateRequest with the expected name computed above.
dbg.Info("checking for existing CertificateRequest for Certificate")
existingReq, err := c.certificateRequestLister.CertificateRequests(crt.Namespace).Get(expectedReqName)
// Allow IsNotFound errors, later on we check if existingReq == nil and if
// it is, we create a new CertificateRequest resource.
if err != nil && !apierrors.IsNotFound(err) {
return err
}
if existingReq != nil {
dbg.Info("found existing certificate request for Certificate", "request_name", existingReq.Name)
log = logf.WithRelatedResource(log, existingReq)
}
needsIssue := true
// Parse the existing certificate
existingCert := existingSecret.Data[corev1.TLSCertKey]
if len(existingCert) > 0 {
// Here we check to see if the existing certificate 'matches' the spec
// of the Certificate resource.
// This includes checking if dnsNames, commonName, organization etc.
// are up to date, as well as validating that the stored private key is
// a valid partner to the stored certificate.
var matchErrs []string
dbg.Info("checking if existing certificate stored in Secret resource is not expiring soon and matches certificate spec")
needsIssue, matchErrs, err = c.certificateRequiresIssuance(ctx, crt, existingKey, existingCert, existingSecret)
if err != nil && !errors.IsInvalidData(err) {
return err
}
// If the certificate data is invalid, we require a re-issuance.
// The private key should never be invalid at this point as we already
// check it above.
if errors.IsInvalidData(err) {
dbg.Info("existing secret contains invalid certificate data")
needsIssue = true
}
if !needsIssue {
dbg.Info("existing certificate does not need re-issuance")
} else {
dbg.Info("will attempt to issue certificate", "reason", matchErrs)
}
}
// Exit early if the certificate doesn't need issuing to save extra work
if !needsIssue {
if existingReq != nil {
dbg.Info("skipping issuing certificate data into Secret resource as existing issued certificate is still valid")
}
// Before exiting, ensure that the Secret resource's metadata is up to
// date. If it isn't, it will be updated.
updated, err := c.ensureSecretMetadataUpToDate(ctx, existingSecret, crt)
if err != nil {
return err
}
if updated {
log.Info("updated Secret resource metadata as it was out of date")
}
// As the Certificate has been validated as Ready, schedule a renewal
// for near the expiry date.
scheduleRenewal(ctx, c.secretLister, c.calculateDurationUntilRenew, c.scheduledWorkQueue.Add, crt)
log.Info("certificate does not require re-issuance. certificate renewal scheduled near expiry time.")
return nil
}
// Attempt to decode the private key.
// This shouldn't fail as we already validate the private key is valid above.
dbg.Info("decoding existing private key")
privateKey, err := pki.DecodePrivateKeyBytes(existingKey)
if err != nil {
return err
}
// Attempt to decode the existing certificate.
// We tolerate invalid data errors as we will issue a certificate if the
// data is invalid.
dbg.Info("attempting to decode existing certificate")
existingX509Cert, err := pki.DecodeX509CertificateBytes(existingCert)
if err != nil && !errors.IsInvalidData(err) {
return err
}
if errors.IsInvalidData(err) {
dbg.Info("existing certificate data is invalid, continuing...")
}
// Handling for 'temporary certificates'
if certificateHasTemporaryCertificateAnnotation(crt) {
// Issue a temporary certificate if the current certificate is empty or the
// private key is not valid for the current certificate.
if existingX509Cert == nil {
log.Info("no existing certificate data found in secret, issuing temporary certificate")
return c.issueTemporaryCertificate(ctx, existingSecret, crt, existingKey)
}
matches, err := pki.PublicKeyMatchesCertificate(privateKey.Public(), existingX509Cert)
if err != nil || !matches {
log.Info("private key for certificate does not match, issuing temporary certificate")
return c.issueTemporaryCertificate(ctx, existingSecret, crt, existingKey)
}
log.Info("not issuing temporary certificate as existing certificate is sufficient")
// Ensure the secret metadata is up to date
updated, err := c.ensureSecretMetadataUpToDate(ctx, existingSecret, crt)
if err != nil {
return err
}
// Only return early if an update actually occurred, otherwise continue.
if updated {
log.Info("updated Secret resource metadata as it was out of date")
return nil
}
}
if existingReq == nil {
// If no existing CertificateRequest resource exists, we must create one
log.Info("no existing CertificateRequest resource exists, creating new request...")
req, err := c.buildCertificateRequest(log, crt, expectedReqName, existingKey)
if err != nil {
return err
}
req, err = c.cmClient.CertmanagerV1alpha2().CertificateRequests(crt.Namespace).Create(req)
if err != nil {
return err
}
c.recorder.Eventf(crt, corev1.EventTypeNormal, "Requested", "Created new CertificateRequest resource %q", req.Name)
log.Info("created certificate request", "request_name", req.Name)
return nil
}
// Validate the CertificateRequest's CSR is valid
log.Info("validating existing CSR data")
x509CSR, err := pki.DecodeX509CertificateRequestBytes(existingReq.Spec.CSRPEM)
// TODO: handle InvalidData
if err != nil {
return err
}
// Ensure the stored private key is a 'pair' to the CSR
publicKeyMatches, err := pki.PublicKeyMatchesCSR(privateKey.Public(), x509CSR)
if err != nil {
return err
}
// if the stored private key does not pair with the CSR on the
// CertificateRequest resource, delete the resource as we won't be able to
// do anything with the certificate if it is issued
if !publicKeyMatches {
log.Info("stored private key is not valid for CSR stored on existing CertificateRequest, recreating CertificateRequest resource")
err := c.cmClient.CertmanagerV1alpha2().CertificateRequests(existingReq.Namespace).Delete(existingReq.Name, nil)
if err != nil {
return err
}
c.recorder.Eventf(crt, corev1.EventTypeNormal, "PrivateKeyLost", "Lost private key for CertificateRequest %q, deleting old resource", existingReq.Name)
log.Info("deleted existing CertificateRequest as the stored private key does not match the CSR")
return nil
}
reason := apiutil.CertificateRequestReadyReason(existingReq)
// Determine the status reason of the CertificateRequest and process accordingly
switch reason {
// If the CertificateRequest exists but has failed then we check the failure
// time. If the failure time doesn't exist or is over an hour in the past
// then delete the request so it can be re-created on the next sync. If the
// failure time is less than an hour in the past then schedule this owning
// Certificate for a re-sync in an hour.
case cmapi.CertificateRequestReasonFailed:
if existingReq.Status.FailureTime == nil || c.clock.Since(existingReq.Status.FailureTime.Time) > time.Hour {
log.Info("deleting failed certificate request")
err := c.cmClient.CertmanagerV1alpha2().CertificateRequests(existingReq.Namespace).Delete(existingReq.Name, nil)
if err != nil {
return err
}
c.recorder.Eventf(crt, corev1.EventTypeNormal, "CertificateRequestRetry", "The failed CertificateRequest %q will be retried now", existingReq.Name)
return nil
}
log.Info("the failed existing certificate request failed less than an hour ago, will be scheduled for reprocessing in an hour")
key, err := keyFunc(crt)
if err != nil {
log.Error(err, "error getting key for certificate resource")
return nil
}
// We don't fire an event here as this could be called multiple times in quick succession
c.scheduledWorkQueue.Add(key, time.Hour)
return nil
// If the CertificateRequest is in a Ready state then we can decode,
// verify, and check whether it needs renewal
case cmapi.CertificateRequestReasonIssued:
log.Info("CertificateRequest is in a Ready state, issuing certificate...")
// Decode the certificate bytes so we can ensure the certificate is valid
log.Info("decoding certificate data")
x509Cert, err := pki.DecodeX509CertificateBytes(existingReq.Status.Certificate)
if err != nil {
return err
}
// Check if the Certificate requires renewal according to the renewBefore
// specified on the Certificate resource.
log.Info("checking if certificate stored on CertificateRequest is up to date")
if c.certificateNeedsRenew(ctx, x509Cert, crt) {
log.Info("certificate stored on CertificateRequest needs renewal, so deleting the old CertificateRequest resource")
err := c.cmClient.CertmanagerV1alpha2().CertificateRequests(existingReq.Namespace).Delete(existingReq.Name, nil)
if err != nil {
return err
}
return nil
}
// If certificate stored on CertificateRequest is not expiring soon, copy
// across the status.certificate field into the Secret resource.
log.Info("CertificateRequest contains a valid certificate for issuance. Issuing certificate...")
_, err = c.updateSecretData(ctx, crt, existingSecret, secretData{pk: existingKey, cert: existingReq.Status.Certificate, ca: existingReq.Status.CA})
if err != nil {
return err
}
c.recorder.Eventf(crt, corev1.EventTypeNormal, "Issued", "Certificate issued successfully")
return nil
// If it is not Ready _OR_ Failed then we return and wait for informer
// updates to re-trigger processing.
default:
log.Info("CertificateRequest is not in a final state, waiting until CertificateRequest is complete", "state", reason)
return nil
}
}
// updateSecretData will ensure the Secret resource contains the given secret
// data as well as appropriate metadata.
// If the given 'existingSecret' is nil, a new Secret resource will be created.
// Otherwise, the existing resource will be updated.
// The first return argument will be true if the resource was updated/created
// without error.
// updateSecretData will also update deprecated annotations if they exist.
func (c *certificateRequestManager) updateSecretData(ctx context.Context, crt *cmapi.Certificate, existingSecret *corev1.Secret, data secretData) (bool, error) {
s := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: crt.Spec.SecretName,
Namespace: crt.Namespace,
},
Type: corev1.SecretTypeTLS,
}
// s will be overwritten by 'existingSecret' if existingSecret is non-nil
if c.enableSecretOwnerReferences {
s.OwnerReferences = []metav1.OwnerReference{*metav1.NewControllerRef(crt, certificateGvk)}
}
if existingSecret != nil {
s = existingSecret
}
newSecret := s.DeepCopy()
err := setSecretValues(ctx, crt, newSecret, secretData{pk: data.pk, cert: data.cert, ca: data.ca})
if err != nil {
return false, err
}
if reflect.DeepEqual(s, newSecret) {
return false, nil
}
if existingSecret == nil {
_, err = c.kubeClient.CoreV1().Secrets(newSecret.Namespace).Create(newSecret)
if err != nil {
return false, err
}
return true, nil
}
_, err = c.kubeClient.CoreV1().Secrets(newSecret.Namespace).Update(newSecret)
if err != nil {
return false, err
}
return true, nil
}
func (c *certificateRequestManager) ensureSecretMetadataUpToDate(ctx context.Context, s *corev1.Secret, crt *cmapi.Certificate) (bool, error) {
pk := s.Data[corev1.TLSPrivateKeyKey]
cert := s.Data[corev1.TLSCertKey]
ca := s.Data[cmmeta.TLSCAKey]
updated, err := c.updateSecretData(ctx, crt, s, secretData{pk: pk, cert: cert, ca: ca})
if err != nil || !updated {
return updated, err
}
c.recorder.Eventf(crt, corev1.EventTypeNormal, "UpdateMeta", "Updated metadata on Secret resource")
return true, nil
}
func (c *certificateRequestManager) issueTemporaryCertificate(ctx context.Context, secret *corev1.Secret, crt *cmapi.Certificate, key []byte) error {
tempCertData, err := c.localTemporarySigner(crt, key)
if err != nil {
return err
}
newSecret := secret.DeepCopy()
err = setSecretValues(ctx, crt, newSecret, secretData{pk: key, cert: tempCertData})
if err != nil {
return err
}
newSecret, err = c.kubeClient.CoreV1().Secrets(newSecret.Namespace).Update(newSecret)
if err != nil {
return err
}
c.recorder.Eventf(crt, corev1.EventTypeNormal, "TempCert", "Issued temporary certificate")
return nil
}
func (c *certificateRequestManager) certificateRequiresIssuance(ctx context.Context, crt *cmapi.Certificate, keyBytes, certBytes []byte, secret *corev1.Secret) (bool, []string, error) {
key, err := pki.DecodePrivateKeyBytes(keyBytes)
if err != nil {
return false, nil, err
}
cert, err := pki.DecodeX509CertificateBytes(certBytes)
if err != nil {
return false, nil, err
}
if isTemporaryCertificate(cert) {
return true, nil, nil
}
matches, matchErrs := certificateMatchesSpec(crt, key, cert, secret)
if !matches {
return true, matchErrs, nil
}
needsRenew := c.certificateNeedsRenew(ctx, cert, crt)
return needsRenew, []string{"Certificate is expiring soon"}, nil
}
type generateCSRFn func(*cmapi.Certificate, []byte) ([]byte, error)
func generateCSRImpl(crt *cmapi.Certificate, pk []byte) ([]byte, error) {
csr, err := pki.GenerateCSR(crt)
if err != nil {
return nil, err
}
signer, err := pki.DecodePrivateKeyBytes(pk)
if err != nil {
return nil, err
}
csrDER, err := pki.EncodeCSR(csr, signer)
if err != nil {
return nil, err
}
csrPEM := pem.EncodeToMemory(&pem.Block{
Type: "CERTIFICATE REQUEST", Bytes: csrDER,
})
return csrPEM, nil
}
func (c *certificateRequestManager) buildCertificateRequest(log logr.Logger, crt *cmapi.Certificate, name string, pk []byte) (*cmapi.CertificateRequest, error) {
csrPEM, err := c.generateCSR(crt, pk)
if err != nil {
return nil, err
}
annotations := make(map[string]string, len(crt.Annotations)+2)
for k, v := range crt.Annotations {
annotations[k] = v
}
annotations[cmapi.CRPrivateKeyAnnotationKey] = crt.Spec.SecretName
annotations[cmapi.CertificateNameKey] = crt.Name
cr := &cmapi.CertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: crt.Namespace,
OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(crt, certificateGvk)},
Annotations: annotations,
Labels: crt.Labels,
},
Spec: cmapi.CertificateRequestSpec{
CSRPEM: csrPEM,
Duration: crt.Spec.Duration,
IssuerRef: crt.Spec.IssuerRef,
IsCA: crt.Spec.IsCA,
Usages: crt.Spec.Usages,
},
}
return cr, nil
}
func (c *certificateRequestManager) cleanupExistingCertificateRequests(log logr.Logger, crt *cmapi.Certificate, retain string) error {
reqs, err := findCertificateRequestsForCertificate(log, crt, c.certificateRequestLister)
if err != nil {
return err
}
for _, req := range reqs {
log := logf.WithRelatedResource(log, req)
if req.Name == retain {
log.V(logf.DebugLevel).Info("skipping deleting CertificateRequest as it is up to date for the certificate spec")
continue
}
err = c.cmClient.CertmanagerV1alpha2().CertificateRequests(req.Namespace).Delete(req.Name, nil)
if err != nil {
return err
}
log.Info("deleted no longer required CertificateRequest")
}
return nil
}
func findCertificateRequestsForCertificate(log logr.Logger, crt *cmapi.Certificate, lister cmlisters.CertificateRequestLister) ([]*cmapi.CertificateRequest, error) {
log.V(logf.DebugLevel).Info("finding existing CertificateRequest resources for Certificate")
reqs, err := lister.CertificateRequests(crt.Namespace).List(labels.Everything())
if err != nil {
return nil, err
}
var candidates []*cmapi.CertificateRequest
for _, req := range reqs {
log := logf.WithRelatedResource(log, req)
if metav1.IsControlledBy(req, crt) {
log.V(logf.DebugLevel).Info("found CertificateRequest resource for Certificate")
candidates = append(candidates, &(*req))
}
}
return candidates, nil
}
// validatePrivateKeyUpToDate will evaluate the private key data in pk and
// ensure it is 'up to date' and matches the specification of the key as
// required by the given Certificate resource.
// It returns false if the private key isn't up to date, e.g. the Certificate
// resource specifies a different keyEncoding, keyAlgorithm or keySize.
func validatePrivateKeyUpToDate(log logr.Logger, pk []byte, crt *cmapi.Certificate) (bool, error) {
signer, err := pki.DecodePrivateKeyBytes(pk)
if err != nil {
return false, err
}
// TODO: check keyEncoding
wantedAlgorithm := crt.Spec.KeyAlgorithm
if wantedAlgorithm == "" {
// in-memory defaulting of the key algorithm to RSA
// TODO: remove this in favour of actual defaulting in a mutating webhook
wantedAlgorithm = cmapi.RSAKeyAlgorithm
}
switch wantedAlgorithm {
case cmapi.RSAKeyAlgorithm:
_, ok := signer.(*rsa.PrivateKey)
if !ok {
log.Info("expected private key's algorithm to be RSA but it is not")
return false, nil
}
// TODO: check keySize
case cmapi.ECDSAKeyAlgorithm:
_, ok := signer.(*ecdsa.PrivateKey)
if !ok {
log.Info("expected private key's algorithm to be ECDSA but it is not")
return false, nil
}
// TODO: check keySize
}
return true, nil
}
func (c *certificateRequestManager) generateAndStorePrivateKey(ctx context.Context, crt *cmapi.Certificate, s *corev1.Secret) error {
keyData, err := c.generatePrivateKeyBytes(ctx, crt)
if err != nil {
// TODO: handle permanent failures caused by invalid spec
return err
}
updated, err := c.updateSecretData(ctx, crt, s, secretData{pk: keyData})
if err != nil {
return err
}
if !updated {
return nil
}
c.recorder.Eventf(crt, corev1.EventTypeNormal, "GeneratedKey", "Generated a new private key")
return nil
}
type generatePrivateKeyBytesFn func(context.Context, *cmapi.Certificate) ([]byte, error)
func generatePrivateKeyBytesImpl(ctx context.Context, crt *cmapi.Certificate) ([]byte, error) {
signer, err := pki.GeneratePrivateKeyForCertificate(crt)
if err != nil {
return nil, err
}
keyData, err := pki.EncodePrivateKey(signer, crt.Spec.KeyEncoding)
if err != nil {
return nil, err
}
return keyData, nil
}
// secretData is a structure wrapping private key, certificate and CA data
type secretData struct {
pk, cert, ca []byte
}
// setSecretValues will update the Secret resource 's' with the data contained
// in the given secretData.
// It will update labels and annotations on the Secret resource appropriately.
// The Secret resource 's' must be non-nil, although may be a resource that does
// not exist in the Kubernetes apiserver yet.
// setSecretValues will NOT actually update the resource in the apiserver.
// If updating an existing Secret resource returned by an api client 'lister',
// make sure to DeepCopy the object first to avoid modifying data in-cache.
// It will also update depreciated issuer name and kind annotations if they exist.
func setSecretValues(ctx context.Context, crt *cmapi.Certificate, s *corev1.Secret, data secretData) error {
// initialize the `Data` field if it is nil
if s.Data == nil {
s.Data = make(map[string][]byte)
}
s.Data[corev1.TLSPrivateKeyKey] = data.pk
s.Data[corev1.TLSCertKey] = data.cert
s.Data[cmmeta.TLSCAKey] = data.ca
if s.Annotations == nil {
s.Annotations = make(map[string]string)
}
s.Annotations[cmapi.CertificateNameKey] = crt.Name
s.Annotations[cmapi.IssuerNameAnnotationKey] = crt.Spec.IssuerRef.Name
s.Annotations[cmapi.IssuerKindAnnotationKey] = apiutil.IssuerKind(crt.Spec.IssuerRef)
// If deprecated annotations exist with any value, then they too shall be
// updated
if _, ok := s.Annotations[cmapi.DeprecatedIssuerNameAnnotationKey]; ok {
s.Annotations[cmapi.DeprecatedIssuerNameAnnotationKey] = crt.Spec.IssuerRef.Name
}
if _, ok := s.Annotations[cmapi.DeprecatedIssuerKindAnnotationKey]; ok {
s.Annotations[cmapi.DeprecatedIssuerKindAnnotationKey] = apiutil.IssuerKind(crt.Spec.IssuerRef)
}
// if the certificate data is empty, clear the subject related annotations
if len(data.cert) == 0 {
delete(s.Annotations, cmapi.CommonNameAnnotationKey)
delete(s.Annotations, cmapi.AltNamesAnnotationKey)
delete(s.Annotations, cmapi.IPSANAnnotationKey)
delete(s.Annotations, cmapi.URISANAnnotationKey)
} else {
x509Cert, err := pki.DecodeX509CertificateBytes(data.cert)
// TODO: handle InvalidData here?
if err != nil {
return err
}
s.Annotations[cmapi.CommonNameAnnotationKey] = x509Cert.Subject.CommonName
s.Annotations[cmapi.AltNamesAnnotationKey] = strings.Join(x509Cert.DNSNames, ",")
s.Annotations[cmapi.IPSANAnnotationKey] = strings.Join(pki.IPAddressesToString(x509Cert.IPAddresses), ",")
s.Annotations[cmapi.URISANAnnotationKey] = strings.Join(pki.URLsToString(x509Cert.URIs), ",")
}
return nil
}
| 1 | 20,128 | Can we provide some more useful actionable info here? `fmt.Sprintf("The certificate request could not be completed due to invalid request options: %s", req.Status.Conditions[InvalidRequestCondition].Message)` | jetstack-cert-manager | go |
@@ -155,6 +155,7 @@ var (
API: API{
UseRDS: false,
Port: 14014,
+ Web3Port: 15014, // TODO: port 15014 is used for testing
TpsWindow: 10,
GasStation: GasStation{
SuggestBlockWindow: 20, | 1 | // Copyright (c) 2019 IoTeX Foundation
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package config
import (
"crypto/ecdsa"
"math/big"
"os"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/iotexproject/go-pkgs/crypto"
"github.com/iotexproject/iotex-election/committee"
"github.com/pkg/errors"
uconfig "go.uber.org/config"
"go.uber.org/zap"
"github.com/iotexproject/iotex-address/address"
"github.com/iotexproject/iotex-core/blockchain/genesis"
"github.com/iotexproject/iotex-core/db"
"github.com/iotexproject/iotex-core/dispatcher"
"github.com/iotexproject/iotex-core/p2p"
"github.com/iotexproject/iotex-core/pkg/log"
"github.com/iotexproject/iotex-core/pkg/tracer"
"github.com/iotexproject/iotex-core/pkg/unit"
)
// IMPORTANT: to define a config, add a field or a new config type to the existing config types. In addition, provide
// the default value in Default var.
var (
_evmNetworkID uint32
loadChainID sync.Once
)
const (
// RollDPoSScheme means randomized delegated proof of stake
RollDPoSScheme = "ROLLDPOS"
// StandaloneScheme means that the node creates a block periodically regardless of others (if there is any)
StandaloneScheme = "STANDALONE"
// NOOPScheme means that the node does not create only block
NOOPScheme = "NOOP"
)
const (
// GatewayPlugin is the plugin of accepting user API requests and serving blockchain data to users
GatewayPlugin = iota
)
type strs []string
func (ss *strs) String() string {
return strings.Join(*ss, ",")
}
func (ss *strs) Set(str string) error {
*ss = append(*ss, str)
return nil
}
// Dardanelles consensus config
const (
SigP256k1 = "secp256k1"
SigP256sm2 = "p256sm2"
)
var (
// Default is the default config
Default = Config{
Plugins: make(map[int]interface{}),
SubLogs: make(map[string]log.GlobalConfig),
Network: p2p.DefaultConfig,
Chain: Chain{
ChainDBPath: "/var/data/chain.db",
TrieDBPatchFile: "/var/data/trie.db.patch",
TrieDBPath: "/var/data/trie.db",
IndexDBPath: "/var/data/index.db",
BloomfilterIndexDBPath: "/var/data/bloomfilter.index.db",
CandidateIndexDBPath: "/var/data/candidate.index.db",
StakingIndexDBPath: "/var/data/staking.index.db",
ID: 1,
EVMNetworkID: 4689,
Address: "",
ProducerPrivKey: generateRandomKey(SigP256k1),
SignatureScheme: []string{SigP256k1},
EmptyGenesis: false,
GravityChainDB: db.Config{DbPath: "/var/data/poll.db", NumRetries: 10},
Committee: committee.Config{
GravityChainAPIs: []string{},
},
EnableTrielessStateDB: true,
EnableStateDBCaching: false,
EnableArchiveMode: false,
EnableAsyncIndexWrite: true,
EnableSystemLogIndexer: false,
EnableStakingProtocol: true,
EnableStakingIndexer: false,
CompressBlock: false,
AllowedBlockGasResidue: 10000,
MaxCacheSize: 0,
PollInitialCandidatesInterval: 10 * time.Second,
StateDBCacheSize: 1000,
WorkingSetCacheSize: 20,
},
ActPool: ActPool{
MaxNumActsPerPool: 32000,
MaxGasLimitPerPool: 320000000,
MaxNumActsPerAcct: 2000,
ActionExpiry: 10 * time.Minute,
MinGasPriceStr: big.NewInt(unit.Qev).String(),
BlackList: []string{},
},
Consensus: Consensus{
Scheme: StandaloneScheme,
RollDPoS: RollDPoS{
FSM: ConsensusTiming{
UnmatchedEventTTL: 3 * time.Second,
UnmatchedEventInterval: 100 * time.Millisecond,
AcceptBlockTTL: 4 * time.Second,
AcceptProposalEndorsementTTL: 2 * time.Second,
AcceptLockEndorsementTTL: 2 * time.Second,
CommitTTL: 2 * time.Second,
EventChanSize: 10000,
},
ToleratedOvertime: 2 * time.Second,
Delay: 5 * time.Second,
ConsensusDBPath: "/var/data/consensus.db",
},
},
DardanellesUpgrade: DardanellesUpgrade{
UnmatchedEventTTL: 2 * time.Second,
UnmatchedEventInterval: 100 * time.Millisecond,
AcceptBlockTTL: 2 * time.Second,
AcceptProposalEndorsementTTL: time.Second,
AcceptLockEndorsementTTL: time.Second,
CommitTTL: time.Second,
BlockInterval: 5 * time.Second,
Delay: 2 * time.Second,
},
BlockSync: BlockSync{
Interval: 30 * time.Second,
ProcessSyncRequestTTL: 10 * time.Second,
BufferSize: 200,
IntervalSize: 20,
MaxRepeat: 3,
RepeatDecayStep: 1,
},
Dispatcher: dispatcher.DefaultConfig,
API: API{
UseRDS: false,
Port: 14014,
TpsWindow: 10,
GasStation: GasStation{
SuggestBlockWindow: 20,
DefaultGas: uint64(unit.Qev),
Percentile: 60,
},
RangeQueryLimit: 1000,
},
System: System{
Active: true,
HeartbeatInterval: 10 * time.Second,
HTTPStatsPort: 8080,
HTTPAdminPort: 9009,
StartSubChainInterval: 10 * time.Second,
SystemLogDBPath: "/var/data/systemlog.db",
},
DB: db.Config{
NumRetries: 3,
MaxCacheSize: 64,
BlockStoreBatchSize: 16,
V2BlocksToSplitDB: 1000000,
Compressor: "Snappy",
CompressLegacy: false,
SplitDBSizeMB: 0,
SplitDBHeight: 900000,
HistoryStateRetention: 2000,
},
Indexer: Indexer{
RangeBloomFilterNumElements: 100000,
RangeBloomFilterSize: 1200000,
RangeBloomFilterNumHash: 8,
},
Genesis: genesis.Default,
}
// ErrInvalidCfg indicates the invalid config value
ErrInvalidCfg = errors.New("invalid config value")
// Validates is the collection config validation functions
Validates = []Validate{
ValidateRollDPoS,
ValidateArchiveMode,
ValidateDispatcher,
ValidateAPI,
ValidateActPool,
ValidateForkHeights,
}
)
// Network is the config struct for network package
type (
// Chain is the config struct for blockchain package
Chain struct {
ChainDBPath string `yaml:"chainDBPath"`
TrieDBPatchFile string `yaml:"trieDBPatchFile"`
TrieDBPath string `yaml:"trieDBPath"`
IndexDBPath string `yaml:"indexDBPath"`
BloomfilterIndexDBPath string `yaml:"bloomfilterIndexDBPath"`
CandidateIndexDBPath string `yaml:"candidateIndexDBPath"`
StakingIndexDBPath string `yaml:"stakingIndexDBPath"`
ID uint32 `yaml:"id"`
EVMNetworkID uint32 `yaml:"evmNetworkID"`
Address string `yaml:"address"`
ProducerPrivKey string `yaml:"producerPrivKey"`
SignatureScheme []string `yaml:"signatureScheme"`
EmptyGenesis bool `yaml:"emptyGenesis"`
GravityChainDB db.Config `yaml:"gravityChainDB"`
Committee committee.Config `yaml:"committee"`
EnableTrielessStateDB bool `yaml:"enableTrielessStateDB"`
// EnableStateDBCaching enables cachedStateDBOption
EnableStateDBCaching bool `yaml:"enableStateDBCaching"`
// EnableArchiveMode is only meaningful when EnableTrielessStateDB is false
EnableArchiveMode bool `yaml:"enableArchiveMode"`
// EnableAsyncIndexWrite enables writing the block actions' and receipts' index asynchronously
EnableAsyncIndexWrite bool `yaml:"enableAsyncIndexWrite"`
// deprecated
EnableSystemLogIndexer bool `yaml:"enableSystemLog"`
// EnableStakingProtocol enables staking protocol
EnableStakingProtocol bool `yaml:"enableStakingProtocol"`
// EnableStakingIndexer enables staking indexer
EnableStakingIndexer bool `yaml:"enableStakingIndexer"`
// deprecated by DB.CompressBlock
CompressBlock bool `yaml:"compressBlock"`
// AllowedBlockGasResidue is the amount of gas remained when block producer could stop processing more actions
AllowedBlockGasResidue uint64 `yaml:"allowedBlockGasResidue"`
// MaxCacheSize is the max number of blocks that will be put into an LRU cache. 0 means disabled
MaxCacheSize int `yaml:"maxCacheSize"`
// PollInitialCandidatesInterval is the config for committee init db
PollInitialCandidatesInterval time.Duration `yaml:"pollInitialCandidatesInterval"`
// StateDBCacheSize is the max size of statedb LRU cache
StateDBCacheSize int `yaml:"stateDBCacheSize"`
// WorkingSetCacheSize is the max size of workingset cache in state factory
WorkingSetCacheSize uint64 `yaml:"workingSetCacheSize"`
}
// Consensus is the config struct for consensus package
Consensus struct {
// There are three schemes that are supported
Scheme string `yaml:"scheme"`
RollDPoS RollDPoS `yaml:"rollDPoS"`
}
// BlockSync is the config struct for the BlockSync
BlockSync struct {
Interval time.Duration `yaml:"interval"` // update duration
ProcessSyncRequestTTL time.Duration `yaml:"processSyncRequestTTL"`
BufferSize uint64 `yaml:"bufferSize"`
IntervalSize uint64 `yaml:"intervalSize"`
// MaxRepeat is the maximal number of repeat of a block sync request
MaxRepeat int `yaml:"maxRepeat"`
// RepeatDecayStep is the step for repeat number decreasing by 1
RepeatDecayStep int `yaml:"repeatDecayStep"`
}
// DardanellesUpgrade is the config for dardanelles upgrade
DardanellesUpgrade struct {
UnmatchedEventTTL time.Duration `yaml:"unmatchedEventTTL"`
UnmatchedEventInterval time.Duration `yaml:"unmatchedEventInterval"`
AcceptBlockTTL time.Duration `yaml:"acceptBlockTTL"`
AcceptProposalEndorsementTTL time.Duration `yaml:"acceptProposalEndorsementTTL"`
AcceptLockEndorsementTTL time.Duration `yaml:"acceptLockEndorsementTTL"`
CommitTTL time.Duration `yaml:"commitTTL"`
BlockInterval time.Duration `yaml:"blockInterval"`
Delay time.Duration `yaml:"delay"`
}
// RollDPoS is the config struct for RollDPoS consensus package
RollDPoS struct {
FSM ConsensusTiming `yaml:"fsm"`
ToleratedOvertime time.Duration `yaml:"toleratedOvertime"`
Delay time.Duration `yaml:"delay"`
ConsensusDBPath string `yaml:"consensusDBPath"`
}
// ConsensusTiming defines a set of time durations used in fsm and event queue size
ConsensusTiming struct {
EventChanSize uint `yaml:"eventChanSize"`
UnmatchedEventTTL time.Duration `yaml:"unmatchedEventTTL"`
UnmatchedEventInterval time.Duration `yaml:"unmatchedEventInterval"`
AcceptBlockTTL time.Duration `yaml:"acceptBlockTTL"`
AcceptProposalEndorsementTTL time.Duration `yaml:"acceptProposalEndorsementTTL"`
AcceptLockEndorsementTTL time.Duration `yaml:"acceptLockEndorsementTTL"`
CommitTTL time.Duration `yaml:"commitTTL"`
}
// API is the api service config
API struct {
UseRDS bool `yaml:"useRDS"`
Port int `yaml:"port"`
TpsWindow int `yaml:"tpsWindow"`
GasStation GasStation `yaml:"gasStation"`
RangeQueryLimit uint64 `yaml:"rangeQueryLimit"`
Tracer tracer.Config `yaml:"tracer"`
}
// GasStation is the gas station config
GasStation struct {
SuggestBlockWindow int `yaml:"suggestBlockWindow"`
DefaultGas uint64 `yaml:"defaultGas"`
Percentile int `yaml:"Percentile"`
}
// System is the system config
System struct {
// Active is the status of the node. True means active and false means stand-by
Active bool `yaml:"active"`
HeartbeatInterval time.Duration `yaml:"heartbeatInterval"`
// HTTPProfilingPort is the port number to access golang performance profiling data of a blockchain node. It is
// 0 by default, meaning performance profiling has been disabled
HTTPAdminPort int `yaml:"httpAdminPort"`
HTTPStatsPort int `yaml:"httpStatsPort"`
StartSubChainInterval time.Duration `yaml:"startSubChainInterval"`
SystemLogDBPath string `yaml:"systemLogDBPath"`
}
// ActPool is the actpool config
ActPool struct {
// MaxNumActsPerPool indicates maximum number of actions the whole actpool can hold
MaxNumActsPerPool uint64 `yaml:"maxNumActsPerPool"`
// MaxGasLimitPerPool indicates maximum gas limit the whole actpool can hold
MaxGasLimitPerPool uint64 `yaml:"maxGasLimitPerPool"`
// MaxNumActsPerAcct indicates maximum number of actions an account queue can hold
MaxNumActsPerAcct uint64 `yaml:"maxNumActsPerAcct"`
// ActionExpiry defines how long an action will be kept in action pool.
ActionExpiry time.Duration `yaml:"actionExpiry"`
// MinGasPriceStr defines the minimal gas price the delegate will accept for an action
MinGasPriceStr string `yaml:"minGasPrice"`
// BlackList lists the account address that are banned from initiating actions
BlackList []string `yaml:"blackList"`
}
// Indexer is the config for indexer
Indexer struct {
// RangeBloomFilterNumElements is the number of elements each rangeBloomfilter will store in bloomfilterIndexer
RangeBloomFilterNumElements uint64 `yaml:"rangeBloomFilterNumElements"`
// RangeBloomFilterSize is the size (in bits) of rangeBloomfilter
RangeBloomFilterSize uint64 `yaml:"rangeBloomFilterSize"`
// RangeBloomFilterNumHash is the number of hash functions of rangeBloomfilter
RangeBloomFilterNumHash uint64 `yaml:"rangeBloomFilterNumHash"`
}
// Config is the root config struct, each package's config should be put as its sub struct
Config struct {
Plugins map[int]interface{} `ymal:"plugins"`
Network p2p.Network `yaml:"network"`
Chain Chain `yaml:"chain"`
ActPool ActPool `yaml:"actPool"`
Consensus Consensus `yaml:"consensus"`
DardanellesUpgrade DardanellesUpgrade `yaml:"dardanellesUpgrade"`
BlockSync BlockSync `yaml:"blockSync"`
Dispatcher dispatcher.Config `yaml:"dispatcher"`
API API `yaml:"api"`
System System `yaml:"system"`
DB db.Config `yaml:"db"`
Indexer Indexer `yaml:"indexer"`
Log log.GlobalConfig `yaml:"log"`
SubLogs map[string]log.GlobalConfig `yaml:"subLogs"`
Genesis genesis.Genesis `yaml:"genesis"`
}
// Validate is the interface of validating the config
Validate func(Config) error
)
// New creates a config instance. It first loads the default configs. If the config path is not empty, it will read from
// the file and override the default configs. By default, it will apply all validation functions. To bypass validation,
// use DoNotValidate instead.
func New(configPaths []string, _plugins []string, validates ...Validate) (Config, error) {
opts := make([]uconfig.YAMLOption, 0)
opts = append(opts, uconfig.Static(Default))
opts = append(opts, uconfig.Expand(os.LookupEnv))
for _, path := range configPaths {
if path != "" {
opts = append(opts, uconfig.File(path))
}
}
yaml, err := uconfig.NewYAML(opts...)
if err != nil {
return Config{}, errors.Wrap(err, "failed to init config")
}
var cfg Config
if err := yaml.Get(uconfig.Root).Populate(&cfg); err != nil {
return Config{}, errors.Wrap(err, "failed to unmarshal YAML config to struct")
}
// set network master key to private key
if cfg.Network.MasterKey == "" {
cfg.Network.MasterKey = cfg.Chain.ProducerPrivKey
}
// set plugins
for _, plugin := range _plugins {
switch strings.ToLower(plugin) {
case "gateway":
cfg.Plugins[GatewayPlugin] = nil
default:
return Config{}, errors.Errorf("Plugin %s is not supported", plugin)
}
}
// By default, the config needs to pass all the validation
if len(validates) == 0 {
validates = Validates
}
for _, validate := range validates {
if err := validate(cfg); err != nil {
return Config{}, errors.Wrap(err, "failed to validate config")
}
}
return cfg, nil
}
// NewSub create config for sub chain.
func NewSub(configPaths []string, validates ...Validate) (Config, error) {
opts := make([]uconfig.YAMLOption, 0)
opts = append(opts, uconfig.Static(Default))
opts = append(opts, uconfig.Expand(os.LookupEnv))
for _, path := range configPaths {
if path != "" {
opts = append(opts, uconfig.File(path))
}
}
yaml, err := uconfig.NewYAML(opts...)
if err != nil {
return Config{}, errors.Wrap(err, "failed to init config")
}
var cfg Config
if err := yaml.Get(uconfig.Root).Populate(&cfg); err != nil {
return Config{}, errors.Wrap(err, "failed to unmarshal YAML config to struct")
}
// By default, the config needs to pass all the validation
if len(validates) == 0 {
validates = Validates
}
for _, validate := range validates {
if err := validate(cfg); err != nil {
return Config{}, errors.Wrap(err, "failed to validate config")
}
}
return cfg, nil
}
// SetEVMNetworkID sets the extern chain ID
func SetEVMNetworkID(id uint32) {
loadChainID.Do(func() {
_evmNetworkID = id
})
}
// EVMNetworkID returns the extern chain ID
func EVMNetworkID() uint32 {
return atomic.LoadUint32(&_evmNetworkID)
}
// ProducerAddress returns the configured producer address derived from key
func (cfg Config) ProducerAddress() address.Address {
sk := cfg.ProducerPrivateKey()
addr := sk.PublicKey().Address()
if addr == nil {
log.L().Panic(
"Error when constructing producer address",
zap.Error(errors.New("failed to get address")),
)
}
return addr
}
// ProducerPrivateKey returns the configured private key
func (cfg Config) ProducerPrivateKey() crypto.PrivateKey {
sk, err := crypto.HexStringToPrivateKey(cfg.Chain.ProducerPrivKey)
if err != nil {
log.L().Panic(
"Error when decoding private key",
zap.Error(err),
)
}
if !cfg.whitelistSignatureScheme(sk) {
log.L().Panic("The private key's signature scheme is not whitelisted")
}
return sk
}
func (cfg Config) whitelistSignatureScheme(sk crypto.PrivateKey) bool {
var sigScheme string
switch sk.EcdsaPrivateKey().(type) {
case *ecdsa.PrivateKey:
sigScheme = SigP256k1
case *crypto.P256sm2PrvKey:
sigScheme = SigP256sm2
}
if sigScheme == "" {
return false
}
for _, e := range cfg.Chain.SignatureScheme {
if sigScheme == e {
// signature scheme is whitelisted
return true
}
}
return false
}
func generateRandomKey(scheme string) string {
// generate a random key
switch scheme {
case SigP256k1:
sk, _ := crypto.GenerateKey()
return sk.HexString()
case SigP256sm2:
sk, _ := crypto.GenerateKeySm2()
return sk.HexString()
}
return ""
}
// MinGasPrice returns the minimal gas price threshold
func (ap ActPool) MinGasPrice() *big.Int {
mgp, ok := big.NewInt(0).SetString(ap.MinGasPriceStr, 10)
if !ok {
log.S().Panicf("Error when parsing minimal gas price string: %s", ap.MinGasPriceStr)
}
return mgp
}
// ValidateDispatcher validates the dispatcher configs
func ValidateDispatcher(cfg Config) error {
if cfg.Dispatcher.ActionChanSize <= 0 || cfg.Dispatcher.BlockChanSize <= 0 || cfg.Dispatcher.BlockSyncChanSize <= 0 {
return errors.Wrap(ErrInvalidCfg, "dispatcher chan size should be greater than 0")
}
if cfg.Dispatcher.ProcessSyncRequestInterval < 0 {
return errors.Wrap(ErrInvalidCfg, "dispatcher processSyncRequestInterval should not be less than 0")
}
return nil
}
// ValidateRollDPoS validates the roll-DPoS configs
func ValidateRollDPoS(cfg Config) error {
if cfg.Consensus.Scheme != RollDPoSScheme {
return nil
}
rollDPoS := cfg.Consensus.RollDPoS
fsm := rollDPoS.FSM
if fsm.EventChanSize <= 0 {
return errors.Wrap(ErrInvalidCfg, "roll-DPoS event chan size should be greater than 0")
}
return nil
}
// ValidateArchiveMode validates the state factory setting
func ValidateArchiveMode(cfg Config) error {
if !cfg.Chain.EnableArchiveMode || !cfg.Chain.EnableTrielessStateDB {
return nil
}
return errors.Wrap(ErrInvalidCfg, "Archive mode is incompatible with trieless state DB")
}
// ValidateAPI validates the api configs
func ValidateAPI(cfg Config) error {
if cfg.API.TpsWindow <= 0 {
return errors.Wrap(ErrInvalidCfg, "tps window is not a positive integer when the api is enabled")
}
return nil
}
// ValidateActPool validates the given config
func ValidateActPool(cfg Config) error {
maxNumActPerPool := cfg.ActPool.MaxNumActsPerPool
maxNumActPerAcct := cfg.ActPool.MaxNumActsPerAcct
if maxNumActPerPool <= 0 || maxNumActPerAcct <= 0 {
return errors.Wrap(
ErrInvalidCfg,
"maximum number of actions per pool or per account cannot be zero or negative",
)
}
if maxNumActPerPool < maxNumActPerAcct {
return errors.Wrap(
ErrInvalidCfg,
"maximum number of actions per pool cannot be less than maximum number of actions per account",
)
}
return nil
}
// ValidateForkHeights validates the forked heights
func ValidateForkHeights(cfg Config) error {
hu := cfg.Genesis
switch {
case hu.PacificBlockHeight > hu.AleutianBlockHeight:
return errors.Wrap(ErrInvalidCfg, "Pacific is heigher than Aleutian")
case hu.AleutianBlockHeight > hu.BeringBlockHeight:
return errors.Wrap(ErrInvalidCfg, "Aleutian is heigher than Bering")
case hu.BeringBlockHeight > hu.CookBlockHeight:
return errors.Wrap(ErrInvalidCfg, "Bering is heigher than Cook")
case hu.CookBlockHeight > hu.DardanellesBlockHeight:
return errors.Wrap(ErrInvalidCfg, "Cook is heigher than Dardanelles")
case hu.DardanellesBlockHeight > hu.DaytonaBlockHeight:
return errors.Wrap(ErrInvalidCfg, "Dardanelles is heigher than Daytona")
case hu.DaytonaBlockHeight > hu.EasterBlockHeight:
return errors.Wrap(ErrInvalidCfg, "Daytona is heigher than Easter")
case hu.EasterBlockHeight > hu.FbkMigrationBlockHeight:
return errors.Wrap(ErrInvalidCfg, "Easter is heigher than FairbankMigration")
case hu.FbkMigrationBlockHeight > hu.FairbankBlockHeight:
return errors.Wrap(ErrInvalidCfg, "FairbankMigration is heigher than Fairbank")
case hu.FairbankBlockHeight > hu.GreenlandBlockHeight:
return errors.Wrap(ErrInvalidCfg, "Fairbank is heigher than Greenland")
case hu.GreenlandBlockHeight > hu.IcelandBlockHeight:
return errors.Wrap(ErrInvalidCfg, "Greenland is heigher than Iceland")
case hu.IcelandBlockHeight > hu.JutlandBlockHeight:
return errors.Wrap(ErrInvalidCfg, "Iceland is heigher than Jutland")
case hu.JutlandBlockHeight > hu.KamchatkaBlockHeight:
return errors.Wrap(ErrInvalidCfg, "Jutland is heigher than Kamchatka")
case hu.KamchatkaBlockHeight > hu.LordHoweBlockHeight:
return errors.Wrap(ErrInvalidCfg, "Kamchatka is heigher than LordHowe")
}
return nil
}
// DoNotValidate validates the given config
func DoNotValidate(cfg Config) error { return nil }
| 1 | 24,128 | yeah, can change to 8545 | iotexproject-iotex-core | go |
@@ -16,6 +16,11 @@ use Ergonode\BatchAction\Domain\Entity\BatchActionId;
use Ergonode\SharedKernel\Domain\AggregateId;
use Symfony\Component\Messenger\Stamp\HandledStamp;
use Ergonode\BatchAction\Domain\Repository\BatchActionRepositoryInterface;
+use Ergonode\Core\Application\Security\User\CachedUser;
+use Ergonode\Account\Domain\Repository\UserRepositoryInterface;
+use Ergonode\SharedKernel\Domain\Aggregate\UserId;
+use Ergonode\BatchAction\Domain\Event\BatchActionEndedEvent;
+use Ergonode\Core\Application\Messenger\Stamp\UserStamp;
class BatchActionTransport implements TransportInterface
{ | 1 | <?php
/**
* Copyright © Ergonode Sp. z o.o. All rights reserved.
* See LICENSE.txt for license details.
*/
declare(strict_types=1);
namespace Ergonode\BatchAction\Application\Transport;
use Symfony\Component\Messenger\Transport\TransportInterface;
use Doctrine\DBAL\Connection;
use Symfony\Component\Messenger\Envelope;
use Ergonode\BatchAction\Domain\Command\ProcessBatchActionEntryCommand;
use Ergonode\BatchAction\Domain\Entity\BatchActionId;
use Ergonode\SharedKernel\Domain\AggregateId;
use Symfony\Component\Messenger\Stamp\HandledStamp;
use Ergonode\BatchAction\Domain\Repository\BatchActionRepositoryInterface;
class BatchActionTransport implements TransportInterface
{
private Connection $connection;
private BatchActionRepositoryInterface $repository;
public function __construct(Connection $connection, BatchActionRepositoryInterface $repository)
{
$this->connection = $connection;
$this->repository = $repository;
}
public function get(): iterable
{
$this->connection->beginTransaction();
$result = [];
$record = $this->connection->executeQuery(
'SELECT batch_action_id, resource_id
FROM batch_action_entry
WHERE processed_at is NULL LIMIT 1 FOR UPDATE SKIP LOCKED'
)->fetchAssociative();
if (!empty($record)) {
echo ($record['resource_id']).PHP_EOL;
$result[] = new Envelope(
new ProcessBatchActionEntryCommand(
new BatchActionId($record['batch_action_id']),
new AggregateId($record['resource_id'])
)
);
} else {
$this->connection->commit();
}
return $result;
}
public function ack(Envelope $envelope): void
{
/** @var ProcessBatchActionEntryCommand $message */
$message = $envelope->getMessage();
/** @var HandledStamp $stamp */
$stamp = $envelope->last(HandledStamp::class);
$this->repository->markEntry($message->getId(), $message->getResourceId(), $stamp->getResult());
$this->connection->commit();
}
public function reject(Envelope $envelope): void
{
$this->connection->commit();
}
public function send(Envelope $envelope): Envelope
{
return $envelope;
}
}
| 1 | 9,624 | As far as I can see you're not rollbacking transaction anywhere. This could potentially cause further issues(and very hard to debug ones) with not synchronized transactions between message handling. Is the transaction required in this place anyway? You seem only to be fetching data here. ack method is performing some updates so it makes sense | ergonode-backend | php |
@@ -16,9 +16,14 @@
package io.servicecomb.transport.rest.vertx;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.stereotype.Component;
+
import io.servicecomb.core.Const;
import io.servicecomb.core.Invocation;
import io.servicecomb.core.transport.AbstractTransport;
+import io.servicecomb.foundation.common.net.NetUtils;
import io.servicecomb.foundation.common.net.URIEndpointObject;
import io.servicecomb.foundation.vertx.SimpleJsonObject;
import io.servicecomb.foundation.vertx.VertxUtils; | 1 | /*
* Copyright 2017 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.servicecomb.transport.rest.vertx;
import io.servicecomb.core.Const;
import io.servicecomb.core.Invocation;
import io.servicecomb.core.transport.AbstractTransport;
import io.servicecomb.foundation.common.net.URIEndpointObject;
import io.servicecomb.foundation.vertx.SimpleJsonObject;
import io.servicecomb.foundation.vertx.VertxUtils;
import io.servicecomb.swagger.invocation.AsyncResponse;
import io.servicecomb.transport.rest.client.RestTransportClient;
import io.servicecomb.transport.rest.client.RestTransportClientManager;
import io.vertx.core.DeploymentOptions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
@Component
public class VertxRestTransport extends AbstractTransport {
private static final Logger log = LoggerFactory.getLogger(VertxRestTransport.class);
@Override
public String getName() {
return Const.RESTFUL;
}
@Override
public boolean init() throws Exception {
// 部署transport server
DeploymentOptions options = new DeploymentOptions().setInstances(TransportConfig.getThreadCount());
setListenAddressWithoutSchema(TransportConfig.getAddress());
SimpleJsonObject json = new SimpleJsonObject();
json.put(ENDPOINT_KEY, getEndpoint());
options.setConfig(json);
return VertxUtils.blockDeploy(transportVertx, RestServerVerticle.class, options) && deployClient();
}
private boolean deployClient() {
return RestTransportClientManager.INSTANCE.getRestTransportClient(true) != null &&
RestTransportClientManager.INSTANCE.getRestTransportClient(false) != null;
}
@Override
public void send(Invocation invocation, AsyncResponse asyncResp) throws Exception {
URIEndpointObject endpoint = (URIEndpointObject) invocation.getEndpoint().getAddress();
RestTransportClient client =
RestTransportClientManager.INSTANCE.getRestTransportClient(endpoint.isSslEnabled());
log.debug("Sending request by rest to endpoint {}:{}", endpoint.getHostOrIp(), endpoint.getPort());
client.send(invocation, asyncResp);
}
}
| 1 | 6,714 | What's the meaning of null URIEndpointObject? | apache-servicecomb-java-chassis | java |
@@ -4,14 +4,17 @@ import os
import sys
import logging
import logging.config
-from six.moves import input, configparser
+
from kinto.core import scripts
from pyramid.scripts import pserve
from pyramid.paster import bootstrap
from kinto import __version__
from kinto.config import init
-CONFIG_FILE = 'config/kinto.ini'
+DEFAULT_CONFIG_FILE = 'config/kinto.ini'
+DEFAULT_PORT = 8888
+DEFAULT_LOG_LEVEL = logging.INFO
+DEFAULT_LOG_FORMAT = "%(levelname)-5.5s %(message)s"
def main(args=None): | 1 | from __future__ import print_function
import argparse
import os
import sys
import logging
import logging.config
from six.moves import input, configparser
from kinto.core import scripts
from pyramid.scripts import pserve
from pyramid.paster import bootstrap
from kinto import __version__
from kinto.config import init
CONFIG_FILE = 'config/kinto.ini'
def main(args=None):
"""The main routine."""
if args is None:
args = sys.argv[1:]
parser = argparse.ArgumentParser(description="Kinto commands")
parser.add_argument('--ini',
help='Application configuration file',
dest='ini_file',
required=False,
default=CONFIG_FILE)
parser.add_argument('--backend',
help='Specify backend',
dest='backend',
required=False,
default=None)
parser.add_argument('-v', '--version',
action='version', version=__version__,
help='Print the Kinto version and exit.')
subparsers = parser.add_subparsers(title='subcommands',
description='valid subcommands',
dest='subcommand',
help='init/start/migrate')
subparsers.required = True
parser_init = subparsers.add_parser('init')
parser_init.set_defaults(which='init')
parser_migrate = subparsers.add_parser('migrate')
parser_migrate.add_argument('--dry-run',
action='store_true',
help='Simulate the migration operations '
'and show information',
dest='dry_run',
required=False,
default=False)
parser_migrate.set_defaults(which='migrate')
parser_start = subparsers.add_parser('start')
parser_start.add_argument('--reload',
action='store_true',
help='Restart when code or config changes',
required=False,
default=False)
parser_start.add_argument('--port',
type=int,
help='Listening port number',
required=False,
default=8888)
parser_start.set_defaults(which='start')
parsed_args = vars(parser.parse_args(args))
config_file = parsed_args['ini_file']
if parsed_args['which'] == 'init':
if os.path.exists(config_file):
print("%s already exists." % config_file, file=sys.stderr)
return 1
backend = parsed_args['backend']
if not backend:
while True:
prompt = ("Select the backend you would like to use: "
"(1 - postgresql, 2 - redis, default - memory) ")
answer = input(prompt).strip()
try:
backends = {"1": "postgresql", "2": "redis", "": "memory"}
backend = backends[answer]
break
except KeyError:
pass
init(config_file, backend)
# Install postgresql libraries if necessary
if backend == "postgresql":
try:
import psycopg2 # NOQA
except ImportError:
import pip
pip.main(['install', "kinto[postgresql]"])
elif parsed_args['which'] == 'migrate':
dry_run = parsed_args['dry_run']
try:
logging.config.fileConfig(config_file)
except configparser.NoSectionError as e: # pragma: no cover
print(e)
env = bootstrap(config_file)
scripts.migrate(env, dry_run=dry_run)
elif parsed_args['which'] == 'start':
pserve_argv = ['pserve', config_file]
if parsed_args['reload']:
pserve_argv.append('--reload')
pserve_argv.append('http_port=%s' % parsed_args['port'])
pserve.main(pserve_argv)
return 0
| 1 | 9,521 | Why did you remove the time? It can be useful if some actions are ran in jobs? | Kinto-kinto | py |
@@ -32,7 +32,7 @@ import (
"github.com/ncw/rclone/lib/pacer"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors"
- mega "github.com/t3rm1n4l/go-mega"
+ "github.com/t3rm1n4l/go-mega"
)
const ( | 1 | // Package mega provides an interface to the Mega
// object storage system.
package mega
/*
Open questions
* Does mega support a content hash - what exactly are the mega hashes?
* Can mega support setting modification times?
Improvements:
* Uploads could be done in parallel
* Downloads would be more efficient done in one go
* Uploads would be more efficient with bigger chunks
* Looks like mega can support server side copy, but it isn't implemented in go-mega
* Upload can set modtime... - set as int64_t - can set ctime and mtime?
*/
import (
"fmt"
"io"
"path"
"strings"
"sync"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/pacer"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors"
mega "github.com/t3rm1n4l/go-mega"
)
const (
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
eventWaitTime = 500 * time.Millisecond
decayConstant = 2 // bigger for slower decay, exponential
)
var (
megaCacheMu sync.Mutex // mutex for the below
megaCache = map[string]*mega.Mega{} // cache logged in Mega's by user
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "mega",
Description: "Mega",
NewFs: NewFs,
Options: []fs.Option{{
Name: "user",
Help: "User name",
Required: true,
}, {
Name: "pass",
Help: "Password.",
Required: true,
IsPassword: true,
}, {
Name: "debug",
Help: `Output more debug from Mega.
If this flag is set (along with -vv) it will print further debugging
information from the mega backend.`,
Default: false,
Advanced: true,
}, {
Name: "hard_delete",
Help: `Delete files permanently rather than putting them into the trash.
Normally the mega backend will put all deletions into the trash rather
than permanently deleting them. If you specify this then rclone will
permanently delete objects instead.`,
Default: false,
Advanced: true,
}},
})
}
// Options defines the configuration for this backend
type Options struct {
User string `config:"user"`
Pass string `config:"pass"`
Debug bool `config:"debug"`
HardDelete bool `config:"hard_delete"`
}
// Fs represents a remote mega
type Fs struct {
name string // name of this remote
root string // the path we are working on
opt Options // parsed config options
features *fs.Features // optional features
srv *mega.Mega // the connection to the server
pacer *pacer.Pacer // pacer for API calls
rootNodeMu sync.Mutex // mutex for _rootNode
_rootNode *mega.Node // root node - call findRoot to use this
mkdirMu sync.Mutex // used to serialize calls to mkdir / rmdir
}
// Object describes a mega object
//
// Will definitely have info but maybe not meta
//
// Normally rclone would just store an ID here but go-mega and mega.nz
// expect you to build an entire tree of all the objects in memory.
// In this case we just store a pointer to the object.
type Object struct {
fs *Fs // what this object is part of
remote string // The remote path
info *mega.Node // pointer to the mega node
}
// ------------------------------------------------------------
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
return fmt.Sprintf("mega root '%s'", f.root)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// parsePath parses an mega 'url'
func parsePath(path string) (root string) {
root = strings.Trim(path, "/")
return
}
// shouldRetry returns a boolean as to whether this err deserves to be
// retried. It returns the err as a convenience
func shouldRetry(err error) (bool, error) {
// Let the mega library handle the low level retries
return false, err
/*
switch errors.Cause(err) {
case mega.EAGAIN, mega.ERATELIMIT, mega.ETEMPUNAVAIL:
return true, err
}
return fserrors.ShouldRetry(err), err
*/
}
// readMetaDataForPath reads the metadata from the path
func (f *Fs) readMetaDataForPath(remote string) (info *mega.Node, err error) {
rootNode, err := f.findRoot(false)
if err != nil {
return nil, err
}
return f.findObject(rootNode, remote)
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
if opt.Pass != "" {
var err error
opt.Pass, err = obscure.Reveal(opt.Pass)
if err != nil {
return nil, errors.Wrap(err, "couldn't decrypt password")
}
}
// cache *mega.Mega on username so we can re-use and share
// them between remotes. They are expensive to make as they
// contain all the objects and sharing the objects makes the
// move code easier as we don't have to worry about mixing
// them up between different remotes.
megaCacheMu.Lock()
defer megaCacheMu.Unlock()
srv := megaCache[opt.User]
if srv == nil {
srv = mega.New().SetClient(fshttp.NewClient(fs.Config))
srv.SetRetries(fs.Config.LowLevelRetries) // let mega do the low level retries
srv.SetLogger(func(format string, v ...interface{}) {
fs.Infof("*go-mega*", format, v...)
})
if opt.Debug {
srv.SetDebugger(func(format string, v ...interface{}) {
fs.Debugf("*go-mega*", format, v...)
})
}
err := srv.Login(opt.User, opt.Pass)
if err != nil {
return nil, errors.Wrap(err, "couldn't login")
}
megaCache[opt.User] = srv
}
root = parsePath(root)
f := &Fs{
name: name,
root: root,
opt: *opt,
srv: srv,
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
}
f.features = (&fs.Features{
DuplicateFiles: true,
CanHaveEmptyDirectories: true,
}).Fill(f)
// Find the root node and check if it is a file or not
_, err = f.findRoot(false)
switch err {
case nil:
// root node found and is a directory
case fs.ErrorDirNotFound:
// root node not found, so can't be a file
case fs.ErrorIsFile:
// root node is a file so point to parent directory
root = path.Dir(root)
if root == "." {
root = ""
}
f.root = root
return f, err
}
return f, nil
}
// splitNodePath splits nodePath into / separated parts, returning nil if it
// should refer to the root
func splitNodePath(nodePath string) (parts []string) {
nodePath = path.Clean(nodePath)
parts = strings.Split(nodePath, "/")
if len(parts) == 1 && (parts[0] == "." || parts[0] == "/") {
return nil
}
return parts
}
// findNode looks up the node for the path of the name given from the root given
//
// It returns mega.ENOENT if it wasn't found
func (f *Fs) findNode(rootNode *mega.Node, nodePath string) (*mega.Node, error) {
parts := splitNodePath(nodePath)
if parts == nil {
return rootNode, nil
}
nodes, err := f.srv.FS.PathLookup(rootNode, parts)
if err != nil {
return nil, err
}
return nodes[len(nodes)-1], nil
}
// findDir finds the directory rooted from the node passed in
func (f *Fs) findDir(rootNode *mega.Node, dir string) (node *mega.Node, err error) {
node, err = f.findNode(rootNode, dir)
if err == mega.ENOENT {
return nil, fs.ErrorDirNotFound
} else if err == nil && node.GetType() == mega.FILE {
return nil, fs.ErrorIsFile
}
return node, err
}
// findObject looks up the node for the object of the name given
func (f *Fs) findObject(rootNode *mega.Node, file string) (node *mega.Node, err error) {
node, err = f.findNode(rootNode, file)
if err == mega.ENOENT {
return nil, fs.ErrorObjectNotFound
} else if err == nil && node.GetType() != mega.FILE {
return nil, fs.ErrorNotAFile
}
return node, err
}
// lookupDir looks up the node for the directory of the name given
//
// if create is true it tries to create the root directory if not found
func (f *Fs) lookupDir(dir string) (*mega.Node, error) {
rootNode, err := f.findRoot(false)
if err != nil {
return nil, err
}
return f.findDir(rootNode, dir)
}
// lookupParentDir finds the parent node for the remote passed in
func (f *Fs) lookupParentDir(remote string) (dirNode *mega.Node, leaf string, err error) {
parent, leaf := path.Split(remote)
dirNode, err = f.lookupDir(parent)
return dirNode, leaf, err
}
// mkdir makes the directory and any parent directories for the
// directory of the name given
func (f *Fs) mkdir(rootNode *mega.Node, dir string) (node *mega.Node, err error) {
f.mkdirMu.Lock()
defer f.mkdirMu.Unlock()
parts := splitNodePath(dir)
if parts == nil {
return rootNode, nil
}
var i int
// look up until we find a directory which exists
for i = 0; i <= len(parts); i++ {
var nodes []*mega.Node
nodes, err = f.srv.FS.PathLookup(rootNode, parts[:len(parts)-i])
if err == nil {
if len(nodes) == 0 {
node = rootNode
} else {
node = nodes[len(nodes)-1]
}
break
}
if err != mega.ENOENT {
return nil, errors.Wrap(err, "mkdir lookup failed")
}
}
if err != nil {
return nil, errors.Wrap(err, "internal error: mkdir called with non existent root node")
}
// i is number of directories to create (may be 0)
// node is directory to create them from
for _, name := range parts[len(parts)-i:] {
// create directory called name in node
err = f.pacer.Call(func() (bool, error) {
node, err = f.srv.CreateDir(name, node)
return shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "mkdir create node failed")
}
}
return node, nil
}
// mkdirParent creates the parent directory of remote
func (f *Fs) mkdirParent(remote string) (dirNode *mega.Node, leaf string, err error) {
rootNode, err := f.findRoot(true)
if err != nil {
return nil, "", err
}
parent, leaf := path.Split(remote)
dirNode, err = f.mkdir(rootNode, parent)
return dirNode, leaf, err
}
// findRoot looks up the root directory node and returns it.
//
// if create is true it tries to create the root directory if not found
func (f *Fs) findRoot(create bool) (*mega.Node, error) {
f.rootNodeMu.Lock()
defer f.rootNodeMu.Unlock()
// Check if we haven't found it already
if f._rootNode != nil {
return f._rootNode, nil
}
// Check for pre-existing root
absRoot := f.srv.FS.GetRoot()
node, err := f.findDir(absRoot, f.root)
//log.Printf("findRoot findDir %p %v", node, err)
if err == nil {
f._rootNode = node
return node, nil
}
if !create || err != fs.ErrorDirNotFound {
return nil, err
}
//..not found so create the root directory
f._rootNode, err = f.mkdir(absRoot, f.root)
return f._rootNode, err
}
// clearRoot unsets the root directory
func (f *Fs) clearRoot() {
f.rootNodeMu.Lock()
f._rootNode = nil
f.rootNodeMu.Unlock()
//log.Printf("cleared root directory")
}
// Return an Object from a path
//
// If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(remote string, info *mega.Node) (fs.Object, error) {
o := &Object{
fs: f,
remote: remote,
}
var err error
if info != nil {
// Set info
err = o.setMetaData(info)
} else {
err = o.readMetaData() // reads info and meta, returning an error
}
if err != nil {
return nil, err
}
return o, nil
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(remote string) (fs.Object, error) {
return f.newObjectWithInfo(remote, nil)
}
// list the objects into the function supplied
//
// If directories is set it only sends directories
// User function to process a File item from listAll
//
// Should return true to finish processing
type listFn func(*mega.Node) bool
// Lists the directory required calling the user function on each item found
//
// If the user fn ever returns true then it early exits with found = true
func (f *Fs) list(dir *mega.Node, fn listFn) (found bool, err error) {
nodes, err := f.srv.FS.GetChildren(dir)
if err != nil {
return false, errors.Wrapf(err, "list failed")
}
for _, item := range nodes {
if fn(item) {
found = true
break
}
}
return
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
dirNode, err := f.lookupDir(dir)
if err != nil {
return nil, err
}
var iErr error
_, err = f.list(dirNode, func(info *mega.Node) bool {
remote := path.Join(dir, info.GetName())
switch info.GetType() {
case mega.FOLDER, mega.ROOT, mega.INBOX, mega.TRASH:
d := fs.NewDir(remote, info.GetTimeStamp()).SetID(info.GetHash())
entries = append(entries, d)
case mega.FILE:
o, err := f.newObjectWithInfo(remote, info)
if err != nil {
iErr = err
return true
}
entries = append(entries, o)
}
return false
})
if err != nil {
return nil, err
}
if iErr != nil {
return nil, iErr
}
return entries, nil
}
// Creates from the parameters passed in a half finished Object which
// must have setMetaData called on it
//
// Returns the dirNode, object, leaf and error
//
// Used to create new objects
func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Object, dirNode *mega.Node, leaf string, err error) {
dirNode, leaf, err = f.mkdirParent(remote)
if err != nil {
return nil, nil, leaf, err
}
// Temporary Object under construction
o = &Object{
fs: f,
remote: remote,
}
return o, dirNode, leaf, nil
}
// Put the object
//
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
// PutUnchecked uploads the object
//
// This will create a duplicate if we upload a new file without
// checking to see if there is one already - use Put() for that.
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
existingObj, err := f.newObjectWithInfo(src.Remote(), nil)
switch err {
case nil:
return existingObj, existingObj.Update(in, src, options...)
case fs.ErrorObjectNotFound:
// Not found so create it
return f.PutUnchecked(in, src)
default:
return nil, err
}
}
// PutUnchecked the object
//
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
// PutUnchecked uploads the object
//
// This will create a duplicate if we upload a new file without
// checking to see if there is one already - use Put() for that.
func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
remote := src.Remote()
size := src.Size()
modTime := src.ModTime()
o, _, _, err := f.createObject(remote, modTime, size)
if err != nil {
return nil, err
}
return o, o.Update(in, src, options...)
}
// Mkdir creates the directory if it doesn't exist
func (f *Fs) Mkdir(dir string) error {
rootNode, err := f.findRoot(true)
if err != nil {
return err
}
_, err = f.mkdir(rootNode, dir)
return errors.Wrap(err, "Mkdir failed")
}
// deleteNode removes a file or directory, observing useTrash
func (f *Fs) deleteNode(node *mega.Node) (err error) {
err = f.pacer.Call(func() (bool, error) {
err = f.srv.Delete(node, f.opt.HardDelete)
return shouldRetry(err)
})
return err
}
// purgeCheck removes the directory dir, if check is set then it
// refuses to do so if it has anything in
func (f *Fs) purgeCheck(dir string, check bool) error {
f.mkdirMu.Lock()
defer f.mkdirMu.Unlock()
rootNode, err := f.findRoot(false)
if err != nil {
return err
}
dirNode, err := f.findDir(rootNode, dir)
if err != nil {
return err
}
if check {
children, err := f.srv.FS.GetChildren(dirNode)
if err != nil {
return errors.Wrap(err, "purgeCheck GetChildren failed")
}
if len(children) > 0 {
return fs.ErrorDirectoryNotEmpty
}
}
waitEvent := f.srv.WaitEventsStart()
err = f.deleteNode(dirNode)
if err != nil {
return errors.Wrap(err, "delete directory node failed")
}
// Remove the root node if we just deleted it
if dirNode == rootNode {
f.clearRoot()
}
f.srv.WaitEvents(waitEvent, eventWaitTime)
return nil
}
// Rmdir deletes the root folder
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(dir string) error {
return f.purgeCheck(dir, true)
}
// Precision return the precision of this Fs
func (f *Fs) Precision() time.Duration {
return fs.ModTimeNotSupported
}
// Purge deletes all the files and the container
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge() error {
return f.purgeCheck("", false)
}
// move a file or folder (srcFs, srcRemote, info) to (f, dstRemote)
//
// info will be updates
func (f *Fs) move(dstRemote string, srcFs *Fs, srcRemote string, info *mega.Node) (err error) {
var (
dstFs = f
srcDirNode, dstDirNode *mega.Node
srcParent, dstParent string
srcLeaf, dstLeaf string
)
if dstRemote != "" {
// lookup or create the destination parent directory
dstDirNode, dstLeaf, err = dstFs.mkdirParent(dstRemote)
} else {
// find or create the parent of the root directory
absRoot := dstFs.srv.FS.GetRoot()
dstParent, dstLeaf = path.Split(dstFs.root)
dstDirNode, err = dstFs.mkdir(absRoot, dstParent)
}
if err != nil {
return errors.Wrap(err, "server side move failed to make dst parent dir")
}
if srcRemote != "" {
// lookup the existing parent directory
srcDirNode, srcLeaf, err = srcFs.lookupParentDir(srcRemote)
} else {
// lookup the existing root parent
absRoot := srcFs.srv.FS.GetRoot()
srcParent, srcLeaf = path.Split(srcFs.root)
srcDirNode, err = f.findDir(absRoot, srcParent)
}
if err != nil {
return errors.Wrap(err, "server side move failed to lookup src parent dir")
}
// move the object into its new directory if required
if srcDirNode != dstDirNode && srcDirNode.GetHash() != dstDirNode.GetHash() {
//log.Printf("move src %p %q dst %p %q", srcDirNode, srcDirNode.GetName(), dstDirNode, dstDirNode.GetName())
err = f.pacer.Call(func() (bool, error) {
err = f.srv.Move(info, dstDirNode)
return shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "server side move failed")
}
}
waitEvent := f.srv.WaitEventsStart()
// rename the object if required
if srcLeaf != dstLeaf {
//log.Printf("rename %q to %q", srcLeaf, dstLeaf)
err = f.pacer.Call(func() (bool, error) {
err = f.srv.Rename(info, dstLeaf)
return shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "server side rename failed")
}
}
f.srv.WaitEvents(waitEvent, eventWaitTime)
return nil
}
// Move src to this remote using server side move operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
dstFs := f
//log.Printf("Move %q -> %q", src.Remote(), remote)
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
// Do the move
err := f.move(remote, srcObj.fs, srcObj.remote, srcObj.info)
if err != nil {
return nil, err
}
// Create a destination object
dstObj := &Object{
fs: dstFs,
remote: remote,
info: srcObj.info,
}
return dstObj, nil
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
dstFs := f
srcFs, ok := src.(*Fs)
if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
// find the source
info, err := srcFs.lookupDir(srcRemote)
if err != nil {
return err
}
// check the destination doesn't exist
_, err = dstFs.lookupDir(dstRemote)
if err == nil {
return fs.ErrorDirExists
} else if err != fs.ErrorDirNotFound {
return errors.Wrap(err, "DirMove error while checking dest directory")
}
// Do the move
err = f.move(dstRemote, srcFs, srcRemote, info)
if err != nil {
return err
}
// Clear src if it was the root
if srcRemote == "" {
srcFs.clearRoot()
}
return nil
}
// DirCacheFlush an optional interface to flush internal directory cache
func (f *Fs) DirCacheFlush() {
// f.dirCache.ResetRoot()
// FIXME Flush the mega somehow?
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.None)
}
// PublicLink generates a public link to the remote path (usually readable by anyone)
func (f *Fs) PublicLink(remote string) (link string, err error) {
root, err := f.findRoot(false)
if err != nil {
return "", errors.Wrap(err, "PublicLink failed to find root node")
}
node, err := f.findNode(root, remote)
if err != nil {
return "", errors.Wrap(err, "PublicLink failed to find path")
}
link, err = f.srv.Link(node, true)
if err != nil {
return "", errors.Wrap(err, "PublicLink failed to create link")
}
return link, nil
}
// MergeDirs merges the contents of all the directories passed
// in into the first one and rmdirs the other directories.
func (f *Fs) MergeDirs(dirs []fs.Directory) error {
if len(dirs) < 2 {
return nil
}
// find dst directory
dstDir := dirs[0]
dstDirNode := f.srv.FS.HashLookup(dstDir.ID())
if dstDirNode == nil {
return errors.Errorf("MergeDirs failed to find node for: %v", dstDir)
}
for _, srcDir := range dirs[1:] {
// find src directory
srcDirNode := f.srv.FS.HashLookup(srcDir.ID())
if srcDirNode == nil {
return errors.Errorf("MergeDirs failed to find node for: %v", srcDir)
}
// list the the objects
infos := []*mega.Node{}
_, err := f.list(srcDirNode, func(info *mega.Node) bool {
infos = append(infos, info)
return false
})
if err != nil {
return errors.Wrapf(err, "MergeDirs list failed on %v", srcDir)
}
// move them into place
for _, info := range infos {
fs.Infof(srcDir, "merging %q", info.GetName())
err = f.pacer.Call(func() (bool, error) {
err = f.srv.Move(info, dstDirNode)
return shouldRetry(err)
})
if err != nil {
return errors.Wrapf(err, "MergeDirs move failed on %q in %v", info.GetName(), srcDir)
}
}
// rmdir (into trash) the now empty source directory
fs.Infof(srcDir, "removing empty directory")
err = f.deleteNode(srcDirNode)
if err != nil {
return errors.Wrapf(err, "MergeDirs move failed to rmdir %q", srcDir)
}
}
return nil
}
// About gets quota information
func (f *Fs) About() (*fs.Usage, error) {
var q mega.QuotaResp
var err error
err = f.pacer.Call(func() (bool, error) {
q, err = f.srv.GetQuota()
return shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to get Mega Quota")
}
usage := &fs.Usage{
Total: fs.NewUsageValue(int64(q.Mstrg)), // quota of bytes that can be used
Used: fs.NewUsageValue(int64(q.Cstrg)), // bytes in use
Free: fs.NewUsageValue(int64(q.Mstrg - q.Cstrg)), // bytes which can be uploaded before reaching the quota
}
return usage, nil
}
// ------------------------------------------------------------
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Hash returns the hashes of an object
func (o *Object) Hash(t hash.Type) (string, error) {
return "", hash.ErrUnsupported
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
return o.info.GetSize()
}
// setMetaData sets the metadata from info
func (o *Object) setMetaData(info *mega.Node) (err error) {
if info.GetType() != mega.FILE {
return fs.ErrorNotAFile
}
o.info = info
return nil
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
func (o *Object) readMetaData() (err error) {
if o.info != nil {
return nil
}
info, err := o.fs.readMetaDataForPath(o.remote)
if err != nil {
if err == fs.ErrorDirNotFound {
err = fs.ErrorObjectNotFound
}
return err
}
return o.setMetaData(info)
}
// ModTime returns the modification time of the object
//
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime() time.Time {
return o.info.GetTimeStamp()
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(modTime time.Time) error {
return fs.ErrorCantSetModTime
}
// Storable returns a boolean showing whether this object storable
func (o *Object) Storable() bool {
return true
}
// openObject represents a download in progress
type openObject struct {
mu sync.Mutex
o *Object
d *mega.Download
id int
skip int64
chunk []byte
closed bool
}
// get the next chunk
func (oo *openObject) getChunk() (err error) {
if oo.id >= oo.d.Chunks() {
return io.EOF
}
var chunk []byte
err = oo.o.fs.pacer.Call(func() (bool, error) {
chunk, err = oo.d.DownloadChunk(oo.id)
return shouldRetry(err)
})
if err != nil {
return err
}
oo.id++
oo.chunk = chunk
return nil
}
// Read reads up to len(p) bytes into p.
func (oo *openObject) Read(p []byte) (n int, err error) {
oo.mu.Lock()
defer oo.mu.Unlock()
if oo.closed {
return 0, errors.New("read on closed file")
}
// Skip data at the start if requested
for oo.skip > 0 {
_, size, err := oo.d.ChunkLocation(oo.id)
if err != nil {
return 0, err
}
if oo.skip < int64(size) {
break
}
oo.id++
oo.skip -= int64(size)
}
if len(oo.chunk) == 0 {
err = oo.getChunk()
if err != nil {
return 0, err
}
if oo.skip > 0 {
oo.chunk = oo.chunk[oo.skip:]
oo.skip = 0
}
}
n = copy(p, oo.chunk)
oo.chunk = oo.chunk[n:]
return n, nil
}
// Close closed the file - MAC errors are reported here
func (oo *openObject) Close() (err error) {
oo.mu.Lock()
defer oo.mu.Unlock()
if oo.closed {
return nil
}
err = oo.o.fs.pacer.Call(func() (bool, error) {
err = oo.d.Finish()
return shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "failed to finish download")
}
oo.closed = true
return nil
}
// Open an object for read
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
var offset, limit int64 = 0, -1
for _, option := range options {
switch x := option.(type) {
case *fs.SeekOption:
offset = x.Offset
case *fs.RangeOption:
offset, limit = x.Decode(o.Size())
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
var d *mega.Download
err = o.fs.pacer.Call(func() (bool, error) {
d, err = o.fs.srv.NewDownload(o.info)
return shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "open download file failed")
}
oo := &openObject{
o: o,
d: d,
skip: offset,
}
return readers.NewLimitedReadCloser(oo, limit), nil
}
// Update the object with the contents of the io.Reader, modTime and size
//
// If existing is set then it updates the object rather than creating a new one
//
// The new object may have been created if an error is returned
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
size := src.Size()
if size < 0 {
return errors.New("mega backend can't upload a file of unknown length")
}
//modTime := src.ModTime()
remote := o.Remote()
// Create the parent directory
dirNode, leaf, err := o.fs.mkdirParent(remote)
if err != nil {
return errors.Wrap(err, "update make parent dir failed")
}
var u *mega.Upload
err = o.fs.pacer.Call(func() (bool, error) {
u, err = o.fs.srv.NewUpload(dirNode, leaf, size)
return shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "upload file failed to create session")
}
// Upload the chunks
// FIXME do this in parallel
for id := 0; id < u.Chunks(); id++ {
_, chunkSize, err := u.ChunkLocation(id)
if err != nil {
return errors.Wrap(err, "upload failed to read chunk location")
}
chunk := make([]byte, chunkSize)
_, err = io.ReadFull(in, chunk)
if err != nil {
return errors.Wrap(err, "upload failed to read data")
}
err = o.fs.pacer.Call(func() (bool, error) {
err = u.UploadChunk(id, chunk)
return shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "upload file failed to upload chunk")
}
}
// Finish the upload
var info *mega.Node
err = o.fs.pacer.Call(func() (bool, error) {
info, err = u.Finish()
return shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "failed to finish upload")
}
// If the upload succeeded and the original object existed, then delete it
if o.info != nil {
err = o.fs.deleteNode(o.info)
if err != nil {
return errors.Wrap(err, "upload failed to remove old version")
}
o.info = nil
}
return o.setMetaData(info)
}
// Remove an object
func (o *Object) Remove() error {
err := o.fs.deleteNode(o.info)
if err != nil {
return errors.Wrap(err, "Remove object failed")
}
return nil
}
// ID returns the ID of the Object if known, or "" if not
func (o *Object) ID() string {
return o.info.GetHash()
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Purger = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.MergeDirser = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
)
| 1 | 8,103 | File is not `goimports`-ed (from `goimports`) | rclone-rclone | go |
@@ -7,12 +7,12 @@ import (
"math/big"
"strings"
- ethereum "github.com/sonm-io/go-ethereum"
- "github.com/sonm-io/go-ethereum/accounts/abi"
- "github.com/sonm-io/go-ethereum/accounts/abi/bind"
- "github.com/sonm-io/go-ethereum/common"
- "github.com/sonm-io/go-ethereum/core/types"
- "github.com/sonm-io/go-ethereum/event"
+ ethereum "github.com/ethereum/go-ethereum"
+ "github.com/ethereum/go-ethereum/accounts/abi"
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/event"
)
// BasicTokenABI is the input ABI used to generate the binding from. | 1 | // Code generated - DO NOT EDIT.
// This file is a generated binding and any manual changes will be lost.
package api
import (
"math/big"
"strings"
ethereum "github.com/sonm-io/go-ethereum"
"github.com/sonm-io/go-ethereum/accounts/abi"
"github.com/sonm-io/go-ethereum/accounts/abi/bind"
"github.com/sonm-io/go-ethereum/common"
"github.com/sonm-io/go-ethereum/core/types"
"github.com/sonm-io/go-ethereum/event"
)
// BasicTokenABI is the input ABI used to generate the binding from.
const BasicTokenABI = "[{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"name\":\"to\",\"type\":\"address\"},{\"indexed\":false,\"name\":\"value\",\"type\":\"uint256\"}],\"name\":\"Transfer\",\"type\":\"event\"},{\"constant\":true,\"inputs\":[],\"name\":\"totalSupply\",\"outputs\":[{\"name\":\"\",\"type\":\"uint256\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"_to\",\"type\":\"address\"},{\"name\":\"_value\",\"type\":\"uint256\"}],\"name\":\"transfer\",\"outputs\":[{\"name\":\"\",\"type\":\"bool\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":true,\"inputs\":[{\"name\":\"_owner\",\"type\":\"address\"}],\"name\":\"balanceOf\",\"outputs\":[{\"name\":\"\",\"type\":\"uint256\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"}]"
// BasicTokenBin is the compiled bytecode used for deploying new contracts.
const BasicTokenBin = `0x608060405234801561001057600080fd5b5061027c806100206000396000f3006080604052600436106100565763ffffffff7c010000000000000000000000000000000000000000000000000000000060003504166318160ddd811461005b57806370a0823114610082578063a9059cbb146100b0575b600080fd5b34801561006757600080fd5b506100706100f5565b60408051918252519081900360200190f35b34801561008e57600080fd5b5061007073ffffffffffffffffffffffffffffffffffffffff600435166100fb565b3480156100bc57600080fd5b506100e173ffffffffffffffffffffffffffffffffffffffff60043516602435610123565b604080519115158252519081900360200190f35b60015490565b73ffffffffffffffffffffffffffffffffffffffff1660009081526020819052604090205490565b600073ffffffffffffffffffffffffffffffffffffffff8316151561014757600080fd5b3360009081526020819052604090205482111561016357600080fd5b33600090815260208190526040902054610183908363ffffffff61022b16565b336000908152602081905260408082209290925573ffffffffffffffffffffffffffffffffffffffff8516815220546101c2908363ffffffff61023d16565b73ffffffffffffffffffffffffffffffffffffffff8416600081815260208181526040918290209390935580518581529051919233927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef9281900390910190a350600192915050565b60008282111561023757fe5b50900390565b8181018281101561024a57fe5b929150505600a165627a7a72305820d288a21974cc5b2c09c426758408b34628caab28400a968a5b9ef48f094a973f0029`
// DeployBasicToken deploys a new Ethereum contract, binding an instance of BasicToken to it.
func DeployBasicToken(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *BasicToken, error) {
parsed, err := abi.JSON(strings.NewReader(BasicTokenABI))
if err != nil {
return common.Address{}, nil, nil, err
}
address, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex(BasicTokenBin), backend)
if err != nil {
return common.Address{}, nil, nil, err
}
return address, tx, &BasicToken{BasicTokenCaller: BasicTokenCaller{contract: contract}, BasicTokenTransactor: BasicTokenTransactor{contract: contract}, BasicTokenFilterer: BasicTokenFilterer{contract: contract}}, nil
}
// BasicToken is an auto generated Go binding around an Ethereum contract.
type BasicToken struct {
BasicTokenCaller // Read-only binding to the contract
BasicTokenTransactor // Write-only binding to the contract
BasicTokenFilterer // Log filterer for contract events
}
// BasicTokenCaller is an auto generated read-only Go binding around an Ethereum contract.
type BasicTokenCaller struct {
contract *bind.BoundContract // Generic contract wrapper for the low level calls
}
// BasicTokenTransactor is an auto generated write-only Go binding around an Ethereum contract.
type BasicTokenTransactor struct {
contract *bind.BoundContract // Generic contract wrapper for the low level calls
}
// BasicTokenFilterer is an auto generated log filtering Go binding around an Ethereum contract events.
type BasicTokenFilterer struct {
contract *bind.BoundContract // Generic contract wrapper for the low level calls
}
// BasicTokenSession is an auto generated Go binding around an Ethereum contract,
// with pre-set call and transact options.
type BasicTokenSession struct {
Contract *BasicToken // Generic contract binding to set the session for
CallOpts bind.CallOpts // Call options to use throughout this session
TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session
}
// BasicTokenCallerSession is an auto generated read-only Go binding around an Ethereum contract,
// with pre-set call options.
type BasicTokenCallerSession struct {
Contract *BasicTokenCaller // Generic contract caller binding to set the session for
CallOpts bind.CallOpts // Call options to use throughout this session
}
// BasicTokenTransactorSession is an auto generated write-only Go binding around an Ethereum contract,
// with pre-set transact options.
type BasicTokenTransactorSession struct {
Contract *BasicTokenTransactor // Generic contract transactor binding to set the session for
TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session
}
// BasicTokenRaw is an auto generated low-level Go binding around an Ethereum contract.
type BasicTokenRaw struct {
Contract *BasicToken // Generic contract binding to access the raw methods on
}
// BasicTokenCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract.
type BasicTokenCallerRaw struct {
Contract *BasicTokenCaller // Generic read-only contract binding to access the raw methods on
}
// BasicTokenTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract.
type BasicTokenTransactorRaw struct {
Contract *BasicTokenTransactor // Generic write-only contract binding to access the raw methods on
}
// NewBasicToken creates a new instance of BasicToken, bound to a specific deployed contract.
func NewBasicToken(address common.Address, backend bind.ContractBackend) (*BasicToken, error) {
contract, err := bindBasicToken(address, backend, backend, backend)
if err != nil {
return nil, err
}
return &BasicToken{BasicTokenCaller: BasicTokenCaller{contract: contract}, BasicTokenTransactor: BasicTokenTransactor{contract: contract}, BasicTokenFilterer: BasicTokenFilterer{contract: contract}}, nil
}
// NewBasicTokenCaller creates a new read-only instance of BasicToken, bound to a specific deployed contract.
func NewBasicTokenCaller(address common.Address, caller bind.ContractCaller) (*BasicTokenCaller, error) {
contract, err := bindBasicToken(address, caller, nil, nil)
if err != nil {
return nil, err
}
return &BasicTokenCaller{contract: contract}, nil
}
// NewBasicTokenTransactor creates a new write-only instance of BasicToken, bound to a specific deployed contract.
func NewBasicTokenTransactor(address common.Address, transactor bind.ContractTransactor) (*BasicTokenTransactor, error) {
contract, err := bindBasicToken(address, nil, transactor, nil)
if err != nil {
return nil, err
}
return &BasicTokenTransactor{contract: contract}, nil
}
// NewBasicTokenFilterer creates a new log filterer instance of BasicToken, bound to a specific deployed contract.
func NewBasicTokenFilterer(address common.Address, filterer bind.ContractFilterer) (*BasicTokenFilterer, error) {
contract, err := bindBasicToken(address, nil, nil, filterer)
if err != nil {
return nil, err
}
return &BasicTokenFilterer{contract: contract}, nil
}
// bindBasicToken binds a generic wrapper to an already deployed contract.
func bindBasicToken(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {
parsed, err := abi.JSON(strings.NewReader(BasicTokenABI))
if err != nil {
return nil, err
}
return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil
}
// Call invokes the (constant) contract method with params as input values and
// sets the output to result. The result type might be a single field for simple
// returns, a slice of interfaces for anonymous returns and a struct for named
// returns.
func (_BasicToken *BasicTokenRaw) Call(opts *bind.CallOpts, result interface{}, method string, params ...interface{}) error {
return _BasicToken.Contract.BasicTokenCaller.contract.Call(opts, result, method, params...)
}
// Transfer initiates a plain transaction to move funds to the contract, calling
// its default method if one is available.
func (_BasicToken *BasicTokenRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {
return _BasicToken.Contract.BasicTokenTransactor.contract.Transfer(opts)
}
// Transact invokes the (paid) contract method with params as input values.
func (_BasicToken *BasicTokenRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) {
return _BasicToken.Contract.BasicTokenTransactor.contract.Transact(opts, method, params...)
}
// Call invokes the (constant) contract method with params as input values and
// sets the output to result. The result type might be a single field for simple
// returns, a slice of interfaces for anonymous returns and a struct for named
// returns.
func (_BasicToken *BasicTokenCallerRaw) Call(opts *bind.CallOpts, result interface{}, method string, params ...interface{}) error {
return _BasicToken.Contract.contract.Call(opts, result, method, params...)
}
// Transfer initiates a plain transaction to move funds to the contract, calling
// its default method if one is available.
func (_BasicToken *BasicTokenTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {
return _BasicToken.Contract.contract.Transfer(opts)
}
// Transact invokes the (paid) contract method with params as input values.
func (_BasicToken *BasicTokenTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) {
return _BasicToken.Contract.contract.Transact(opts, method, params...)
}
// BalanceOf is a free data retrieval call binding the contract method 0x70a08231.
//
// Solidity: function balanceOf(_owner address) constant returns(uint256)
func (_BasicToken *BasicTokenCaller) BalanceOf(opts *bind.CallOpts, _owner common.Address) (*big.Int, error) {
var (
ret0 = new(*big.Int)
)
out := ret0
err := _BasicToken.contract.Call(opts, out, "balanceOf", _owner)
return *ret0, err
}
// BalanceOf is a free data retrieval call binding the contract method 0x70a08231.
//
// Solidity: function balanceOf(_owner address) constant returns(uint256)
func (_BasicToken *BasicTokenSession) BalanceOf(_owner common.Address) (*big.Int, error) {
return _BasicToken.Contract.BalanceOf(&_BasicToken.CallOpts, _owner)
}
// BalanceOf is a free data retrieval call binding the contract method 0x70a08231.
//
// Solidity: function balanceOf(_owner address) constant returns(uint256)
func (_BasicToken *BasicTokenCallerSession) BalanceOf(_owner common.Address) (*big.Int, error) {
return _BasicToken.Contract.BalanceOf(&_BasicToken.CallOpts, _owner)
}
// TotalSupply is a free data retrieval call binding the contract method 0x18160ddd.
//
// Solidity: function totalSupply() constant returns(uint256)
func (_BasicToken *BasicTokenCaller) TotalSupply(opts *bind.CallOpts) (*big.Int, error) {
var (
ret0 = new(*big.Int)
)
out := ret0
err := _BasicToken.contract.Call(opts, out, "totalSupply")
return *ret0, err
}
// TotalSupply is a free data retrieval call binding the contract method 0x18160ddd.
//
// Solidity: function totalSupply() constant returns(uint256)
func (_BasicToken *BasicTokenSession) TotalSupply() (*big.Int, error) {
return _BasicToken.Contract.TotalSupply(&_BasicToken.CallOpts)
}
// TotalSupply is a free data retrieval call binding the contract method 0x18160ddd.
//
// Solidity: function totalSupply() constant returns(uint256)
func (_BasicToken *BasicTokenCallerSession) TotalSupply() (*big.Int, error) {
return _BasicToken.Contract.TotalSupply(&_BasicToken.CallOpts)
}
// Transfer is a paid mutator transaction binding the contract method 0xa9059cbb.
//
// Solidity: function transfer(_to address, _value uint256) returns(bool)
func (_BasicToken *BasicTokenTransactor) Transfer(opts *bind.TransactOpts, _to common.Address, _value *big.Int) (*types.Transaction, error) {
return _BasicToken.contract.Transact(opts, "transfer", _to, _value)
}
// Transfer is a paid mutator transaction binding the contract method 0xa9059cbb.
//
// Solidity: function transfer(_to address, _value uint256) returns(bool)
func (_BasicToken *BasicTokenSession) Transfer(_to common.Address, _value *big.Int) (*types.Transaction, error) {
return _BasicToken.Contract.Transfer(&_BasicToken.TransactOpts, _to, _value)
}
// Transfer is a paid mutator transaction binding the contract method 0xa9059cbb.
//
// Solidity: function transfer(_to address, _value uint256) returns(bool)
func (_BasicToken *BasicTokenTransactorSession) Transfer(_to common.Address, _value *big.Int) (*types.Transaction, error) {
return _BasicToken.Contract.Transfer(&_BasicToken.TransactOpts, _to, _value)
}
// BasicTokenTransferIterator is returned from FilterTransfer and is used to iterate over the raw logs and unpacked data for Transfer events raised by the BasicToken contract.
type BasicTokenTransferIterator struct {
Event *BasicTokenTransfer // Event containing the contract specifics and raw log
contract *bind.BoundContract // Generic contract to use for unpacking event data
event string // Event name to use for unpacking event data
logs chan types.Log // Log channel receiving the found contract events
sub ethereum.Subscription // Subscription for errors, completion and termination
done bool // Whether the subscription completed delivering logs
fail error // Occurred error to stop iteration
}
// Next advances the iterator to the subsequent event, returning whether there
// are any more events found. In case of a retrieval or parsing error, false is
// returned and Error() can be queried for the exact failure.
func (it *BasicTokenTransferIterator) Next() bool {
// If the iterator failed, stop iterating
if it.fail != nil {
return false
}
// If the iterator completed, deliver directly whatever's available
if it.done {
select {
case log := <-it.logs:
it.Event = new(BasicTokenTransfer)
if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil {
it.fail = err
return false
}
it.Event.Raw = log
return true
default:
return false
}
}
// Iterator still in progress, wait for either a data or an error event
select {
case log := <-it.logs:
it.Event = new(BasicTokenTransfer)
if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil {
it.fail = err
return false
}
it.Event.Raw = log
return true
case err := <-it.sub.Err():
it.done = true
it.fail = err
return it.Next()
}
}
// Error returns any retrieval or parsing error occurred during filtering.
func (it *BasicTokenTransferIterator) Error() error {
return it.fail
}
// Close terminates the iteration process, releasing any pending underlying
// resources.
func (it *BasicTokenTransferIterator) Close() error {
it.sub.Unsubscribe()
return nil
}
// BasicTokenTransfer represents a Transfer event raised by the BasicToken contract.
type BasicTokenTransfer struct {
From common.Address
To common.Address
Value *big.Int
Raw types.Log // Blockchain specific contextual infos
}
// FilterTransfer is a free log retrieval operation binding the contract event 0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef.
//
// Solidity: event Transfer(from indexed address, to indexed address, value uint256)
func (_BasicToken *BasicTokenFilterer) FilterTransfer(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*BasicTokenTransferIterator, error) {
var fromRule []interface{}
for _, fromItem := range from {
fromRule = append(fromRule, fromItem)
}
var toRule []interface{}
for _, toItem := range to {
toRule = append(toRule, toItem)
}
logs, sub, err := _BasicToken.contract.FilterLogs(opts, "Transfer", fromRule, toRule)
if err != nil {
return nil, err
}
return &BasicTokenTransferIterator{contract: _BasicToken.contract, event: "Transfer", logs: logs, sub: sub}, nil
}
// WatchTransfer is a free log subscription operation binding the contract event 0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef.
//
// Solidity: event Transfer(from indexed address, to indexed address, value uint256)
func (_BasicToken *BasicTokenFilterer) WatchTransfer(opts *bind.WatchOpts, sink chan<- *BasicTokenTransfer, from []common.Address, to []common.Address) (event.Subscription, error) {
var fromRule []interface{}
for _, fromItem := range from {
fromRule = append(fromRule, fromItem)
}
var toRule []interface{}
for _, toItem := range to {
toRule = append(toRule, toItem)
}
logs, sub, err := _BasicToken.contract.WatchLogs(opts, "Transfer", fromRule, toRule)
if err != nil {
return nil, err
}
return event.NewSubscription(func(quit <-chan struct{}) error {
defer sub.Unsubscribe()
for {
select {
case log := <-logs:
// New log arrived, parse the event and forward to the user
event := new(BasicTokenTransfer)
if err := _BasicToken.contract.UnpackLog(event, "Transfer", log); err != nil {
return err
}
event.Raw = log
select {
case sink <- event:
case err := <-sub.Err():
return err
case <-quit:
return nil
}
case err := <-sub.Err():
return err
case <-quit:
return nil
}
}
}), nil
}
| 1 | 7,003 | What the reason to change imports? | sonm-io-core | go |
@@ -83,6 +83,15 @@ public class HiveClientPool extends ClientPool<HiveMetaStoreClient, TException>
return client;
}
+ @Override
+ protected boolean failureDetection(Exception e) {
+ if (super.failureDetection(e) || (e != null && e instanceof MetaException &&
+ e.getMessage().contains("Got exception: org.apache.thrift.transport.TTransportException"))) {
+ return true;
+ }
+ return false;
+ }
+
@Override
protected void close(HiveMetaStoreClient client) {
client.close(); | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.hive;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.iceberg.common.DynConstructors;
import org.apache.thrift.TException;
import org.apache.thrift.transport.TTransportException;
public class HiveClientPool extends ClientPool<HiveMetaStoreClient, TException> {
// use appropriate ctor depending on whether we're working with Hive2 or Hive3 dependencies
// we need to do this because there is a breaking API change between Hive2 and Hive3
private static final DynConstructors.Ctor<HiveMetaStoreClient> CLIENT_CTOR = DynConstructors.builder()
.impl(HiveMetaStoreClient.class, HiveConf.class)
.impl(HiveMetaStoreClient.class, Configuration.class)
.build();
private final HiveConf hiveConf;
HiveClientPool(Configuration conf) {
this(conf.getInt("iceberg.hive.client-pool-size", 5), conf);
}
public HiveClientPool(int poolSize, Configuration conf) {
super(poolSize, TTransportException.class);
this.hiveConf = new HiveConf(conf, HiveClientPool.class);
}
@Override
protected HiveMetaStoreClient newClient() {
try {
try {
return CLIENT_CTOR.newInstance(hiveConf);
} catch (RuntimeException e) {
// any MetaException would be wrapped into RuntimeException during reflection, so let's double-check type here
if (e.getCause() instanceof MetaException) {
throw (MetaException) e.getCause();
}
throw e;
}
} catch (MetaException e) {
throw new RuntimeMetaException(e, "Failed to connect to Hive Metastore");
} catch (Throwable t) {
if (t.getMessage().contains("Another instance of Derby may have already booted")) {
throw new RuntimeMetaException(t, "Failed to start an embedded metastore because embedded " +
"Derby supports only one client at a time. To fix this, use a metastore that supports " +
"multiple clients.");
}
throw new RuntimeMetaException(t, "Failed to connect to Hive Metastore");
}
}
@Override
protected HiveMetaStoreClient reconnect(HiveMetaStoreClient client) {
try {
client.close();
client.reconnect();
} catch (MetaException e) {
throw new RuntimeMetaException(e, "Failed to reconnect to Hive Metastore");
}
return client;
}
@Override
protected void close(HiveMetaStoreClient client) {
client.close();
}
}
| 1 | 32,392 | Nit: continuation indents are 2 indents / 4 spaces. | apache-iceberg | java |
@@ -666,10 +666,15 @@ class JMX(object):
parsed_url = parse.urlsplit(default_address)
if parsed_url.scheme:
cfg.append(JMX._string_prop("HTTPSampler.protocol", parsed_url.scheme))
- if parsed_url.hostname:
- cfg.append(JMX._string_prop("HTTPSampler.domain", parsed_url.hostname))
- if parsed_url.port:
- cfg.append(JMX._string_prop("HTTPSampler.port", parsed_url.port))
+
+ if parsed_url.netloc:
+ netloc = parsed_url.netloc
+ if ':' in netloc:
+ index = netloc.rfind(':')
+ cfg.append(JMX._string_prop("HTTPSampler.port", netloc[index+1:]))
+ netloc = netloc[:index]
+
+ cfg.append(JMX._string_prop("HTTPSampler.domain", netloc))
if timeout:
cfg.append(JMX._string_prop("HTTPSampler.connect_timeout", timeout)) | 1 | """
Module holds all stuff regarding JMX format
Copyright 2015 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import os
import traceback
from itertools import chain
from cssselect import GenericTranslator
from bzt.engine import Scenario, BetterDict
from bzt.six import etree, iteritems, string_types, parse, text_type, numeric_types
class JMX(object):
"""
A class to manipulate and generate JMX test plans for JMeter
:param original: path to existing JMX to load. If it is None, then creates
empty test plan
"""
TEST_PLAN_SEL = "jmeterTestPlan>hashTree>hashTree"
THR_GROUP_SEL = TEST_PLAN_SEL + ">hashTree[type=tg]"
def __init__(self, original=None, test_plan_name="BZT Generated Test Plan"):
self.log = logging.getLogger(self.__class__.__name__)
if original:
self.load(original)
else:
root = etree.Element("jmeterTestPlan")
self.tree = etree.ElementTree(root)
test_plan = etree.Element("TestPlan", guiclass="TestPlanGui",
testname=test_plan_name,
testclass="TestPlan")
htree = etree.Element("hashTree")
htree.append(test_plan)
htree.append(etree.Element("hashTree"))
self.append("jmeterTestPlan", htree)
element_prop = self._get_arguments_panel("TestPlan.user_defined_variables")
self.append("jmeterTestPlan>hashTree>TestPlan", element_prop)
def load(self, original):
"""
Load existing JMX file
:param original: JMX file path
:raise RuntimeError: in case of XML parsing error
"""
try:
self.tree = etree.ElementTree()
self.tree.parse(original)
except BaseException as exc:
self.log.debug("XML parsing error: %s", traceback.format_exc())
data = (original, exc)
raise RuntimeError("XML parsing failed for file %s: %s" % data)
def get(self, selector):
"""
Returns tree elements by CSS selector
:type selector: str
:return:
"""
expression = GenericTranslator().css_to_xpath(selector)
nodes = self.tree.xpath(expression)
return nodes
def append(self, selector, node):
"""
Add node to container specified by selector. If multiple nodes will
match the selector, first of them will be used as container.
:param selector: CSS selector for container
:param node: Element instance to add
:raise RuntimeError: if container was not found
"""
container = self.get(selector)
if not len(container):
msg = "Failed to find TestPlan node in file: %s"
raise RuntimeError(msg % selector)
container[0].append(node)
def save(self, filename):
"""
Save JMX into file
:param filename:
"""
self.log.debug("Saving JMX to: %s", filename)
with open(filename, "wb") as fhd:
# self.log.debug("\n%s", etree.tostring(self.tree))
self.tree.write(fhd, pretty_print=True, encoding="UTF-8", xml_declaration=True)
def enabled_thread_groups(self, all_types=False):
"""
Get thread groups that are enabled
:type all_types: bool
"""
if all_types:
prefix = r'jmeterTestPlan>hashTree>hashTree>kg\.apc\.jmeter\.threads\.'
ultimate_tgroup = self.get(prefix + 'UltimateThreadGroup')
stepping_tgroup = self.get(prefix + 'SteppingThreadGroup')
tgroups = chain(ultimate_tgroup, stepping_tgroup)
else:
tgroups = self.get('jmeterTestPlan>hashTree>hashTree>ThreadGroup')
for group in tgroups:
if group.get("enabled") != 'false':
yield group
@staticmethod
def _flag(flag_name, bool_value):
"""
Generates element for JMX flag node
:param flag_name:
:param bool_value:
:return:
"""
elm = etree.Element(flag_name)
elm.text = "true" if bool_value else "false"
return elm
@staticmethod
def __jtl_writer(filename, label, flags):
"""
Generates JTL writer
:param filename:
:return:
"""
jtl = etree.Element("stringProp", {"name": "filename"})
jtl.text = filename
name = etree.Element("name")
name.text = "saveConfig"
value = etree.Element("value")
value.set("class", "SampleSaveConfiguration")
for key, val in iteritems(flags):
value.append(JMX._flag(key, val))
obj_prop = etree.Element("objProp")
obj_prop.append(name)
obj_prop.append(value)
listener = etree.Element("ResultCollector",
testname=label,
testclass="ResultCollector",
guiclass="SimpleDataWriter")
listener.append(jtl)
listener.append(obj_prop)
return listener
@staticmethod
def new_kpi_listener(filename):
"""
Generates listener for writing basic KPI data in CSV format
:param filename:
:return:
"""
flags = {
"xml": False,
"fieldNames": True,
"time": True,
"timestamp": True,
"latency": True,
"success": True,
"label": True,
"code": True,
"message": True,
"threadName": True,
"dataType": False,
"encoding": False,
"assertions": False,
"subresults": False,
"responseData": False,
"samplerData": False,
"responseHeaders": False,
"requestHeaders": False,
"responseDataOnError": False,
"saveAssertionResultsFailureMessage": False,
"bytes": False,
"hostname": True,
"threadCounts": True,
"url": False
}
return JMX.__jtl_writer(filename, "KPI Writer", flags)
@staticmethod
def new_xml_listener(filename, is_full, user_flags):
"""
:param is_full: bool
:param filename: str
:param user_flags: BetterDict
:return:
"""
default_flags = {
"xml": True,
"fieldNames": True,
"time": True,
"timestamp": True,
"latency": True,
"success": True,
"label": True,
"code": True,
"message": True,
"threadName": True,
"dataType": True,
"encoding": True,
"assertions": True,
"subresults": True,
"responseData": False,
"samplerData": False,
"responseHeaders": True,
"requestHeaders": True,
"responseDataOnError": True,
"saveAssertionResultsFailureMessage": True,
"bytes": True,
"threadCounts": True,
"url": True
}
flags = BetterDict()
flags.merge(default_flags)
flags.merge(user_flags)
if is_full:
writer = JMX.__jtl_writer(filename, "Trace Writer", flags)
else:
writer = JMX.__jtl_writer(filename, "Errors Writer", flags)
writer.append(JMX._bool_prop("ResultCollector.error_logging", True))
return writer
@staticmethod
def _get_arguments_panel(name):
"""
Generates ArgumentsPanel node
:param name:
:return:
"""
return etree.Element("elementProp", name=name, elementType="Arguments",
guiclass="ArgumentsPanel", testclass="Arguments")
@staticmethod
def _get_http_request(url, label, method, timeout, body, keepalive, files=()):
"""
Generates HTTP request
:type method: str
:type label: str
:type url: str
:rtype: lxml.etree.Element
"""
proxy = etree.Element("HTTPSamplerProxy", guiclass="HttpTestSampleGui", testclass="HTTPSamplerProxy")
proxy.set("testname", label)
args = JMX._get_arguments_panel("HTTPsampler.Arguments")
if isinstance(body, string_types):
JMX.__add_body_from_string(args, body, proxy)
elif isinstance(body, dict):
JMX.__add_body_from_script(args, body, proxy)
elif body:
raise ValueError("Cannot handle 'body' option of type %s: %s" % (type(body), body))
parsed_url = parse.urlparse(url)
JMX.__add_hostnameport_2sampler(parsed_url, proxy, url)
path = parsed_url.path
if parsed_url.query:
path += "?" + parsed_url.query
proxy.append(JMX._string_prop("HTTPSampler.path", path))
proxy.append(JMX._string_prop("HTTPSampler.method", method))
proxy.append(JMX._bool_prop("HTTPSampler.use_keepalive", keepalive))
proxy.append(JMX._bool_prop("HTTPSampler.follow_redirects", True))
if timeout is not None:
proxy.append(JMX._string_prop("HTTPSampler.connect_timeout", timeout))
proxy.append(JMX._string_prop("HTTPSampler.response_timeout", timeout))
if files:
proxy.append(JMX._bool_prop("HTTPSampler.DO_MULTIPART_POST", True))
proxy.append(JMX._bool_prop("HTTPSampler.BROWSER_COMPATIBLE_MULTIPART", True))
files_prop = JMX._element_prop("HTTPsampler.Files", "HTTPFileArgs")
files_coll = JMX._collection_prop("HTTPFileArgs.files")
for file_dict in files:
file_elem = JMX._element_prop(file_dict['path'], "HTTPFileArg")
file_elem.append(JMX._string_prop("File.path", file_dict['path']))
file_elem.append(JMX._string_prop("File.paramname", file_dict["param"]))
file_elem.append(JMX._string_prop("File.mimetype", file_dict['mime-type']))
files_coll.append(file_elem)
files_prop.append(files_coll)
proxy.append(files_prop)
return proxy
@staticmethod
def __add_body_from_string(args, body, proxy):
proxy.append(JMX._bool_prop("HTTPSampler.postBodyRaw", True))
coll_prop = JMX._collection_prop("Arguments.arguments")
header = JMX._element_prop("elementProp", "HTTPArgument")
try:
header.append(JMX._string_prop("Argument.value", body))
except ValueError:
logging.warning("Failed to set body: %s", traceback.format_exc())
header.append(JMX._string_prop("Argument.value", "BINARY-STUB"))
coll_prop.append(header)
args.append(coll_prop)
proxy.append(args)
@staticmethod
def __add_body_from_script(args, body, proxy):
http_args_coll_prop = JMX._collection_prop("Arguments.arguments")
for arg_name, arg_value in body.items():
if not (isinstance(arg_value, string_types) or isinstance(arg_value, numeric_types)):
raise ValueError('Body structure requires application/JSON header')
try:
http_element_prop = JMX._element_prop(arg_name, "HTTPArgument")
except ValueError:
logging.warning("Failed to get element property: %s", traceback.format_exc())
http_element_prop = JMX._element_prop('BINARY-STUB', "HTTPArgument")
try:
http_element_prop.append(JMX._string_prop("Argument.name", arg_name))
except ValueError:
logging.warning("Failed to set arg name: %s", traceback.format_exc())
http_element_prop.append(JMX._string_prop("Argument.name", "BINARY-STUB"))
try:
http_element_prop.append(
JMX._string_prop("Argument.value", arg_value if arg_value is not None else ''))
except ValueError:
logging.warning("Failed to set arg name: %s", traceback.format_exc())
http_element_prop.append(JMX._string_prop("Argument.value", "BINARY-STUB"))
http_element_prop.append(JMX._bool_prop("HTTPArgument.always_encode", True))
http_element_prop.append(JMX._bool_prop("HTTPArgument.use_equals", arg_value is not None))
http_element_prop.append(JMX._string_prop("Argument.metadata", '='))
http_args_coll_prop.append(http_element_prop)
args.append(http_args_coll_prop)
proxy.append(args)
@staticmethod
def __add_hostnameport_2sampler(parsed_url, proxy, url):
if parsed_url.scheme:
proxy.append(JMX._string_prop("HTTPSampler.protocol", parsed_url.scheme))
if parsed_url.netloc:
netloc_parts = parsed_url.netloc.split(':')
if netloc_parts[0]:
proxy.append(JMX._string_prop("HTTPSampler.domain", netloc_parts[0]))
if len(netloc_parts) > 1 and netloc_parts[1]:
proxy.append(JMX._string_prop("HTTPSampler.port", netloc_parts[1]))
else:
try:
if parsed_url.port:
proxy.append(JMX._string_prop("HTTPSampler.port", parsed_url.port))
else:
proxy.append(JMX._string_prop("HTTPSampler.port", ""))
except ValueError:
logging.debug("Non-parsable port: %s", url)
proxy.append(JMX._string_prop("HTTPSampler.port", ""))
@staticmethod
def _element_prop(name, element_type):
"""
Generates element property node
:param name:
:param element_type:
:return:
"""
res = etree.Element("elementProp", name=name, elementType=element_type)
return res
@staticmethod
def _collection_prop(name):
"""
Adds Collection prop
:param name:
:return:
"""
res = etree.Element("collectionProp", name=name)
return res
@staticmethod
def _string_prop(name, value):
"""
Generates string property node
:param name:
:param value:
:return:
"""
res = etree.Element("stringProp", name=name)
res.text = text_type(value)
return res
@staticmethod
def _long_prop(name, value):
"""
Generates long property node
:param name:
:param value:
:return:
"""
res = etree.Element("longProp", name=name)
res.text = text_type(value)
return res
@staticmethod
def _bool_prop(name, value):
"""
Generates boolean property
:param name:
:param value:
:return:
"""
res = etree.Element("boolProp", name=name)
res.text = 'true' if value else 'false'
return res
@staticmethod
def int_prop(name, value):
"""
JMX int property
:param name:
:param value:
:return:
"""
res = etree.Element("intProp", name=name)
res.text = text_type(value)
return res
@staticmethod
def get_thread_group(concurrency=None, rampup=None, iterations=None, testname="ThreadGroup", on_error="continue"):
"""
Generates ThreadGroup with 1 thread and 1 loop
:param iterations:
:param rampup:
:param concurrency:
:return:
"""
trg = etree.Element("ThreadGroup", guiclass="ThreadGroupGui",
testclass="ThreadGroup", testname=testname)
if on_error is not None:
trg.append(JMX._string_prop("ThreadGroup.on_sample_error", on_error))
loop = etree.Element("elementProp",
name="ThreadGroup.main_controller",
elementType="LoopController",
guiclass="LoopControlPanel",
testclass="LoopController")
loop.append(JMX._bool_prop("LoopController.continue_forever", iterations < 0))
if not iterations:
iterations = 1
loop.append(JMX._string_prop("LoopController.loops", iterations))
trg.append(loop)
if not concurrency:
concurrency = 1
trg.append(JMX._string_prop("ThreadGroup.num_threads", concurrency))
if not rampup:
rampup = ""
trg.append(JMX._string_prop("ThreadGroup.ramp_time", rampup))
trg.append(JMX._string_prop("ThreadGroup.start_time", ""))
trg.append(JMX._string_prop("ThreadGroup.end_time", ""))
trg.append(JMX._bool_prop("ThreadGroup.scheduler", False))
trg.append(JMX._long_prop("ThreadGroup.duration", 0))
return trg
def get_rps_shaper(self):
"""
:return: etree.Element
"""
throughput_timer_element = etree.Element("kg.apc.jmeter.timers.VariableThroughputTimer",
guiclass="kg.apc.jmeter.timers.VariableThroughputTimerGui",
testclass="kg.apc.jmeter.timers.VariableThroughputTimer",
testname="jp@gc - Throughput Shaping Timer",
enabled="true")
shaper_load_prof = self._collection_prop("load_profile")
throughput_timer_element.append(shaper_load_prof)
return throughput_timer_element
def add_rps_shaper_schedule(self, shaper_etree, start_rps, end_rps, duration):
"""
Adds schedule to rps shaper
:param shaper_etree:
:param start_rps:
:param end_rps:
:param duration:
:return:
"""
shaper_collection = shaper_etree.find(".//collectionProp[@name='load_profile']")
coll_prop = self._collection_prop("1817389797")
start_rps_prop = self._string_prop("49", int(start_rps))
end_rps_prop = self._string_prop("1567", int(end_rps))
duration_prop = self._string_prop("53", int(duration))
coll_prop.append(start_rps_prop)
coll_prop.append(end_rps_prop)
coll_prop.append(duration_prop)
shaper_collection.append(coll_prop)
@staticmethod
def add_user_def_vars_elements(udv_dict, testname="Variables from Taurus"):
"""
:type testname: str
:type udv_dict: dict[str,str]
:rtype: etree.Element
"""
udv_element = etree.Element("Arguments", guiclass="ArgumentsPanel", testclass="Arguments",
testname=testname)
udv_collection_prop = JMX._collection_prop("Arguments.arguments")
for var_name in sorted(udv_dict.keys(), key=lambda x: str(x)):
udv_element_prop = JMX._element_prop(str(var_name), "Argument")
udv_arg_name_prop = JMX._string_prop("Argument.name", var_name)
udv_arg_value_prop = JMX._string_prop("Argument.value", udv_dict[var_name])
udv_arg_desc_prop = JMX._string_prop("Argument.desc", "")
udv_arg_meta_prop = JMX._string_prop("Argument.metadata", "=")
udv_element_prop.append(udv_arg_name_prop)
udv_element_prop.append(udv_arg_value_prop)
udv_element_prop.append(udv_arg_desc_prop)
udv_element_prop.append(udv_arg_meta_prop)
udv_collection_prop.append(udv_element_prop)
udv_element.append(udv_collection_prop)
return udv_element
@staticmethod
def get_stepping_thread_group(concurrency, step_threads, step_time, hold_for, tg_name):
"""
:return: etree element, Stepping Thread Group
"""
stepping_thread_group = etree.Element("kg.apc.jmeter.threads.SteppingThreadGroup",
guiclass="kg.apc.jmeter.threads.SteppingThreadGroupGui",
testclass="kg.apc.jmeter.threads.SteppingThreadGroup",
testname=tg_name, enabled="true")
stepping_thread_group.append(JMX._string_prop("ThreadGroup.on_sample_error", "continue"))
stepping_thread_group.append(JMX._string_prop("ThreadGroup.num_threads", concurrency))
stepping_thread_group.append(JMX._string_prop("Threads initial delay", 0))
stepping_thread_group.append(JMX._string_prop("Start users count", step_threads))
stepping_thread_group.append(JMX._string_prop("Start users count burst", 0))
stepping_thread_group.append(JMX._string_prop("Start users period", step_time))
stepping_thread_group.append(JMX._string_prop("Stop users count", ""))
stepping_thread_group.append(JMX._string_prop("Stop users period", 1))
stepping_thread_group.append(JMX._string_prop("flighttime", int(hold_for)))
stepping_thread_group.append(JMX._string_prop("rampUp", 0))
loop_controller = etree.Element("elementProp", name="ThreadGroup.main_controller", elementType="LoopController",
guiclass="LoopControlPanel", testclass="LoopController",
testname="Loop Controller", enabled="true")
loop_controller.append(JMX._bool_prop("LoopController.continue_forever", False))
loop_controller.append(JMX.int_prop("LoopController.loops", -1))
stepping_thread_group.append(loop_controller)
return stepping_thread_group
@staticmethod
def get_dns_cache_mgr():
"""
Adds dns cache element with defaults parameters
:return:
"""
dns_element = etree.Element("DNSCacheManager", guiclass="DNSCachePanel", testclass="DNSCacheManager",
testname="DNS Cache Manager")
dns_element.append(JMX._collection_prop("DNSCacheManager.servers"))
dns_element.append(JMX._bool_prop("DNSCacheManager.clearEachIteration", False))
dns_element.append(JMX._bool_prop("DNSCacheManager.isCustomResolver", False))
return dns_element
@staticmethod
def _get_header_mgr(hdict):
"""
:type hdict: dict[str,str]
:rtype: lxml.etree.Element
"""
mgr = etree.Element("HeaderManager", guiclass="HeaderPanel", testclass="HeaderManager", testname="Headers")
coll_prop = etree.Element("collectionProp", name="HeaderManager.headers")
for hname, hval in iteritems(hdict):
header = etree.Element("elementProp", name="", elementType="Header")
header.append(JMX._string_prop("Header.name", hname))
header.append(JMX._string_prop("Header.value", hval))
coll_prop.append(header)
mgr.append(coll_prop)
return mgr
@staticmethod
def _get_cache_mgr():
"""
:rtype: lxml.etree.Element
"""
mgr = etree.Element("CacheManager", guiclass="CacheManagerGui", testclass="CacheManager", testname="Cache")
mgr.append(JMX._bool_prop("clearEachIteration", True))
mgr.append(JMX._bool_prop("useExpires", True))
return mgr
@staticmethod
def _get_cookie_mgr():
"""
:rtype: lxml.etree.Element
"""
mgr = etree.Element("CookieManager", guiclass="CookiePanel", testclass="CookieManager", testname="Cookies")
mgr.append(JMX._bool_prop("CookieManager.clearEachIteration", True))
return mgr
@staticmethod
def _get_http_defaults(default_address=None, timeout=None, retrieve_resources=None, concurrent_pool_size=4):
"""
:rtype: lxml.etree.Element
"""
cfg = etree.Element("ConfigTestElement", guiclass="HttpDefaultsGui",
testclass="ConfigTestElement", testname="Defaults")
if retrieve_resources:
cfg.append(JMX._bool_prop("HTTPSampler.image_parser", True))
cfg.append(JMX._bool_prop("HTTPSampler.concurrentDwn", True))
if concurrent_pool_size:
cfg.append(JMX._string_prop("HTTPSampler.concurrentPool", concurrent_pool_size))
params = etree.Element("elementProp",
name="HTTPsampler.Arguments",
elementType="Arguments",
guiclass="HTTPArgumentsPanel",
testclass="Arguments", testname="user_defined")
cfg.append(params)
if default_address:
parsed_url = parse.urlsplit(default_address)
if parsed_url.scheme:
cfg.append(JMX._string_prop("HTTPSampler.protocol", parsed_url.scheme))
if parsed_url.hostname:
cfg.append(JMX._string_prop("HTTPSampler.domain", parsed_url.hostname))
if parsed_url.port:
cfg.append(JMX._string_prop("HTTPSampler.port", parsed_url.port))
if timeout:
cfg.append(JMX._string_prop("HTTPSampler.connect_timeout", timeout))
cfg.append(JMX._string_prop("HTTPSampler.response_timeout", timeout))
return cfg
@staticmethod
def _get_dur_assertion(timeout):
"""
:type timeout: int
:return:
"""
element = etree.Element("DurationAssertion", guiclass="DurationAssertionGui",
testclass="DurationAssertion", testname="Timeout Check")
element.append(JMX._string_prop("DurationAssertion.duration", timeout))
return element
@staticmethod
def _get_constant_timer(delay):
"""
:type delay: int
:rtype: lxml.etree.Element
"""
element = etree.Element("ConstantTimer", guiclass="ConstantTimerGui",
testclass="ConstantTimer", testname="Think-Time")
element.append(JMX._string_prop("ConstantTimer.delay", delay))
return element
@staticmethod
def _get_extractor(varname, headers, regexp, template, match_no, default='NOT_FOUND'):
"""
:type varname: str
:type regexp: str
:type template: str|int
:type match_no: int
:type default: str
:rtype: lxml.etree.Element
"""
if isinstance(template, int):
template = '$%s$' % template
if headers.lower() == 'headers':
headers = 'true'
elif headers.lower() == 'http-code':
headers = 'code'
else:
headers = 'body'
element = etree.Element("RegexExtractor", guiclass="RegexExtractorGui",
testclass="RegexExtractor", testname="Get %s" % varname, enabled="true")
element.append(JMX._string_prop("RegexExtractor.useHeaders", headers))
element.append(JMX._string_prop("RegexExtractor.refname", varname))
element.append(JMX._string_prop("RegexExtractor.regex", regexp))
element.append(JMX._string_prop("Sample.scope", "parent"))
element.append(JMX._string_prop("RegexExtractor.template", template))
element.append(JMX._string_prop("RegexExtractor.default", default))
element.append(JMX._string_prop("RegexExtractor.match_number", match_no))
return element
@staticmethod
def _get_jquerycss_extractor(varname, selector, attribute, match_no, default="NOT_FOUND"):
"""
:type varname: str
:type regexp: str
:type match_no: int
:type default: str
:rtype: lxml.etree.Element
"""
element = etree.Element("HtmlExtractor", guiclass="HtmlExtractorGui", testclass="HtmlExtractor",
testname="Get %s" % varname)
element.append(JMX._string_prop("HtmlExtractor.refname", varname))
element.append(JMX._string_prop("HtmlExtractor.expr", selector))
element.append(JMX._string_prop("HtmlExtractor.attribute", attribute))
element.append(JMX._string_prop("HtmlExtractor.match_number", match_no))
element.append(JMX._string_prop("HtmlExtractor.default", default))
return element
@staticmethod
def _get_json_extractor(varname, jsonpath, default='NOT_FOUND', from_variable=None):
"""
:type varname: str
:type default: str
:rtype: lxml.etree.Element
"""
package = "com.atlantbh.jmeter.plugins.jsonutils.jsonpathextractor"
element = etree.Element("%s.JSONPathExtractor" % package,
guiclass="%s.gui.JSONPathExtractorGui" % package,
testclass="%s.JSONPathExtractor" % package,
testname="Get %s" % varname)
element.append(JMX._string_prop("VAR", varname))
element.append(JMX._string_prop("JSONPATH", jsonpath))
element.append(JMX._string_prop("DEFAULT", default))
if from_variable:
element.append(JMX._string_prop("VARIABLE", from_variable))
element.append(JMX._string_prop("SUBJECT", "VAR"))
return element
@staticmethod
def _get_json_path_assertion(jsonpath, expected_value, json_validation, expect_null, invert):
"""
:type jsonpath: str
:type expected_value: str
:type json_validation: bool
:type expect_null: bool
:return: lxml.etree.Element
"""
package = "com.atlantbh.jmeter.plugins.jsonutils.jsonpathassertion"
element = etree.Element("%s.JSONPathAssertion" % package,
guiclass="%s.gui.JSONPathAssertionGui" % package,
testclass="%s.JSONPathAssertion" % package,
testname="JSon path assertion")
element.append(JMX._string_prop("JSON_PATH", jsonpath))
element.append(JMX._string_prop("EXPECTED_VALUE", expected_value))
element.append(JMX._bool_prop("JSONVALIDATION", json_validation))
element.append(JMX._bool_prop("EXPECT_NULL", expect_null))
element.append(JMX._bool_prop("INVERT", invert))
return element
@staticmethod
def _get_xpath_extractor(varname, xpath, default, validate_xml, ignore_whitespace, use_tolerant_parser):
"""
:type varname: str
:type xpath: str
:type default: str
:type validate_xml: bool
:type ignore_whitespace: bool
:type use_tolerant_parser: bool
:rtype: lxml.etree.Element
"""
element = etree.Element("XPathExtractor",
guiclass="XPathExtractorGui",
testclass="XPathExtractor",
testname="Get %s" % varname)
element.append(JMX._string_prop("XPathExtractor.refname", varname))
element.append(JMX._string_prop("XPathExtractor.xpathQuery", xpath))
element.append(JMX._string_prop("XPathExtractor.default", default))
element.append(JMX._bool_prop("XPathExtractor.validate", validate_xml))
element.append(JMX._bool_prop("XPathExtractor.whitespace", ignore_whitespace))
element.append(JMX._bool_prop("XPathExtractor.tolerant", use_tolerant_parser))
return element
@staticmethod
def _get_xpath_assertion(xpath, validate_xml, ignore_whitespace, use_tolerant_parser, invert):
"""
:type xpath: str
:type validate_xml: bool
:type ignore_whitespace: bool
:type use_tolerant_parser: bool
:return: lxml.etree.Element
"""
element = etree.Element("XPathAssertion",
guiclass="XPathAssertionGui",
testclass="XPathAssertion",
testname="XPath Assertion")
element.append(JMX._string_prop("XPath.xpath", xpath))
element.append(JMX._bool_prop("XPath.validate", validate_xml))
element.append(JMX._bool_prop("XPath.whitespace", ignore_whitespace))
element.append(JMX._bool_prop("XPath.tolerant", use_tolerant_parser))
element.append(JMX._bool_prop("XPath.negate", invert))
return element
@staticmethod
def _get_resp_assertion(field, contains, is_regexp, is_invert, assume_success=False):
"""
:type field: str
:type contains: list[str]
:type is_regexp: bool
:type is_invert: bool
:rtype: lxml.etree.Element
"""
tname = "Assert %s has %s" % ("not" if is_invert else "", [text_type(x) for x in contains])
element = etree.Element("ResponseAssertion", guiclass="AssertionGui",
testclass="ResponseAssertion", testname=tname)
if field == Scenario.FIELD_HEADERS:
fld = "Assertion.response_headers"
elif field == Scenario.FIELD_RESP_CODE:
fld = "Assertion.response_code"
else:
fld = "Assertion.response_data"
if is_regexp:
if is_invert:
mtype = 6 # not contains
else:
mtype = 2 # contains
else:
if is_invert:
mtype = 20 # not substring
else:
mtype = 16 # substring
element.append(JMX._string_prop("Assertion.test_field", fld))
element.append(JMX._string_prop("Assertion.test_type", mtype))
element.append(JMX._bool_prop("Assertion.assume_success", assume_success))
coll_prop = etree.Element("collectionProp", name="Asserion.test_strings")
for string in contains:
coll_prop.append(JMX._string_prop("", string))
element.append(coll_prop)
return element
@staticmethod
def _get_csv_config(path, delimiter, is_quoted, loop):
"""
:type path: str
:type delimiter: str
:type is_quoted: bool
:type is_recycle: bool
:return:
"""
element = etree.Element("CSVDataSet", guiclass="TestBeanGUI",
testclass="CSVDataSet", testname="CSV %s" % os.path.basename(path))
element.append(JMX._string_prop("filename", path))
element.append(JMX._string_prop("delimiter", delimiter))
element.append(JMX._bool_prop("quotedData", is_quoted))
element.append(JMX._bool_prop("recycle", loop))
element.append(JMX._bool_prop("stopThread", not loop))
return element
def set_enabled(self, sel, state):
"""
Toggle items by selector
:type sel: str
:type state: bool
"""
items = self.get(sel)
self.log.debug("Enable %s elements %s: %s", state, sel, items)
for item in items:
item.set("enabled", 'true' if state else 'false')
def set_text(self, sel, text):
"""
Set text value
:type sel: str
:type text: str
"""
items = self.get(sel)
res = 0
for item in items:
item.text = text_type(text)
res += 1
return res
@staticmethod
def _get_simple_controller(name):
return etree.Element("GenericController", guiclass="LogicControllerGui", testclass="GenericController",
testname=name)
def _add_results_tree(self):
dbg_tree = etree.Element("ResultCollector",
testname="View Results Tree",
testclass="ResultCollector",
guiclass="ViewResultsFullVisualizer")
self.append(self.TEST_PLAN_SEL, dbg_tree)
self.append(self.TEST_PLAN_SEL, etree.Element("hashTree"))
def _get_results_tree(self):
dbg_tree = etree.Element("ResultCollector",
testname="View Results Tree",
testclass="ResultCollector",
guiclass="ViewResultsFullVisualizer")
return dbg_tree
@staticmethod
def _get_if_controller(condition):
controller = etree.Element("IfController", guiclass="IfControllerPanel", testclass="IfController",
testname="If Controller")
controller.append(JMX._string_prop("IfController.condition", condition))
return controller
@staticmethod
def _get_loop_controller(loops):
loop_forever = loops == 'forever'
if loop_forever:
iterations = -1
else:
iterations = loops
controller = etree.Element("LoopController", guiclass="LoopControllerPanel", testclass="LoopController",
testname="Loop Controller")
controller.append(JMX._bool_prop("LoopController.continue_forever", loop_forever))
controller.append(JMX._string_prop("LoopController.loops", str(iterations)))
return controller
@staticmethod
def _get_foreach_controller(input_var, loop_var):
# TODO: useSeparator option
controller = etree.Element("ForeachController", guiclass="ForeachControlPanel", testclass="ForeachController",
testname="ForEach Controller")
controller.append(JMX._string_prop("ForeachController.inputVal", input_var))
controller.append(JMX._string_prop("ForeachController.returnVal", loop_var))
controller.append(JMX._bool_prop("ForeachController.useSeparator", True))
return controller
@staticmethod
def _get_while_controller(condition):
controller = etree.Element("WhileController", guiclass="WhileControllerGui", testclass="WhileController",
testname="While Controller")
controller.append(JMX._string_prop("WhileController.condition", condition))
return controller
@staticmethod
def _get_transaction_controller(transaction_name):
controller = etree.Element("TransactionController", guiclass="TransactionControllerGui",
testclass="TransactionController", testname=transaction_name)
controller.append(JMX._bool_prop("TransactionController.parent", True))
return controller
| 1 | 13,914 | You can use `netloc.rsplit(":")` for that. | Blazemeter-taurus | py |
@@ -143,7 +143,7 @@ func (w *watcher) run(ctx context.Context, repo git.Repo, repoCfg *config.PipedR
continue
}
if !ok {
- w.logger.Error("configuration file for Image Watcher not found",
+ w.logger.Info("configuration file for Image Watcher not found",
zap.String("repo-id", repoCfg.RepoID),
zap.Error(err),
) | 1 | // Copyright 2020 The PipeCD Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package imagewatcher provides a piped component
// that periodically checks the container registry and updates
// the image if there are differences with Git.
package imagewatcher
import (
"context"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sync"
"time"
"go.uber.org/zap"
"github.com/pipe-cd/pipe/pkg/app/piped/imageprovider"
"github.com/pipe-cd/pipe/pkg/config"
"github.com/pipe-cd/pipe/pkg/git"
"github.com/pipe-cd/pipe/pkg/yamlprocessor"
)
const (
defaultCommitMessageFormat = "Update image %s to %s defined at %s in %s"
defaultCheckInterval = 5 * time.Minute
)
type Watcher interface {
Run(context.Context) error
}
type gitClient interface {
Clone(ctx context.Context, repoID, remote, branch, destination string) (git.Repo, error)
}
type commit struct {
changes map[string][]byte
message string
}
type watcher struct {
config *config.PipedSpec
gitClient gitClient
logger *zap.Logger
wg sync.WaitGroup
// Indexed by the Image Provider name.
providerCfgs map[string]config.PipedImageProvider
}
func NewWatcher(cfg *config.PipedSpec, gitClient gitClient, logger *zap.Logger) Watcher {
return &watcher{
config: cfg,
gitClient: gitClient,
logger: logger.Named("image-watcher"),
}
}
// Run spawns goroutines for each git repository. They periodically pull the image
// from the container registry to compare the image with one in the git repository.
func (w *watcher) Run(ctx context.Context) error {
w.providerCfgs = make(map[string]config.PipedImageProvider, len(w.config.ImageProviders))
for _, cfg := range w.config.ImageProviders {
w.providerCfgs[cfg.Name] = cfg
}
for _, repoCfg := range w.config.Repositories {
repo, err := w.gitClient.Clone(ctx, repoCfg.RepoID, repoCfg.Remote, repoCfg.Branch, "")
if err != nil {
w.logger.Error("failed to clone repository",
zap.String("repo-id", repoCfg.RepoID),
zap.Error(err),
)
return fmt.Errorf("failed to clone repository %s: %w", repoCfg.RepoID, err)
}
w.wg.Add(1)
go w.run(ctx, repo, &repoCfg)
}
w.wg.Wait()
return nil
}
// run periodically compares the image in the given git repository and one in the image provider.
// And then pushes those with differences.
func (w *watcher) run(ctx context.Context, repo git.Repo, repoCfg *config.PipedRepository) {
defer w.wg.Done()
var (
checkInterval = defaultCheckInterval
commitMsg string
includedCfgs, excludedCfgs []string
)
// Use user-defined settings if there is.
for _, r := range w.config.ImageWatcher.Repos {
if r.RepoID != repoCfg.RepoID {
continue
}
checkInterval = time.Duration(r.CheckInterval)
commitMsg = r.CommitMessage
includedCfgs = r.Includes
excludedCfgs = r.Excludes
break
}
ticker := time.NewTicker(checkInterval)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
err := repo.Pull(ctx, repo.GetClonedBranch())
if err != nil {
w.logger.Error("failed to perform git pull",
zap.String("repo-id", repoCfg.RepoID),
zap.String("branch", repo.GetClonedBranch()),
zap.Error(err),
)
continue
}
cfg, ok, err := config.LoadImageWatcher(repo.GetPath(), includedCfgs, excludedCfgs)
if err != nil {
w.logger.Error("failed to load configuration file for Image Watcher",
zap.String("repo-id", repoCfg.RepoID),
zap.Error(err),
)
continue
}
if !ok {
w.logger.Error("configuration file for Image Watcher not found",
zap.String("repo-id", repoCfg.RepoID),
zap.Error(err),
)
continue
}
if err := w.updateOutdatedImages(ctx, repo, cfg.Targets, commitMsg); err != nil {
w.logger.Error("failed to update the targets",
zap.String("repo-id", repoCfg.RepoID),
zap.Error(err),
)
}
}
}
}
// updateOutdatedImages inspects all targets and pushes the changes to git repo if there is.
func (w *watcher) updateOutdatedImages(ctx context.Context, repo git.Repo, targets []config.ImageWatcherTarget, commitMsg string) error {
commits := make([]*commit, 0)
for _, t := range targets {
c, err := w.checkOutdatedImage(ctx, &t, repo, commitMsg)
if err != nil {
w.logger.Error("failed to update image", zap.Error(err))
continue
}
if c != nil {
commits = append(commits, c)
}
}
if len(commits) == 0 {
return nil
}
// Copy the repo to another directory to avoid pull failure in the future.
tmpDir, err := ioutil.TempDir("", "image-watcher")
if err != nil {
return fmt.Errorf("failed to create a new temporary directory: %w", err)
}
defer os.RemoveAll(tmpDir)
tmpRepo, err := repo.Copy(tmpDir)
if err != nil {
return fmt.Errorf("failed to copy the repository to the temporary directory: %w", err)
}
for _, c := range commits {
if err := tmpRepo.CommitChanges(ctx, tmpRepo.GetClonedBranch(), c.message, false, c.changes); err != nil {
return fmt.Errorf("failed to perform git commit: %w", err)
}
}
return tmpRepo.Push(ctx, tmpRepo.GetClonedBranch())
}
// checkOutdatedImage gives back a change content if any deviation exists
// between the image in the given git repository and one in the image provider.
func (w *watcher) checkOutdatedImage(ctx context.Context, target *config.ImageWatcherTarget, repo git.Repo, commitMsg string) (*commit, error) {
// Retrieve the image from the image provider.
providerCfg, ok := w.providerCfgs[target.Provider]
if !ok {
return nil, fmt.Errorf("unknown image provider %s is defined", target.Provider)
}
provider, err := imageprovider.NewProvider(&providerCfg, w.logger)
if err != nil {
return nil, fmt.Errorf("failed to yield image provider %s: %w", providerCfg.Name, err)
}
i, err := provider.ParseImage(target.Image)
if err != nil {
return nil, fmt.Errorf("failed to parse image string \"%s\": %w", target.Image, err)
}
// TODO: Control not to reach the rate limit
imageInRegistry, err := provider.GetLatestImage(ctx, i)
if err != nil {
return nil, fmt.Errorf("failed to get latest image from %s: %w", provider.Name(), err)
}
// Retrieve the image from the file cloned from the git repository.
path := filepath.Join(repo.GetPath(), target.FilePath)
yml, err := ioutil.ReadFile(path)
if err != nil {
return nil, fmt.Errorf("failed to read file: %w", err)
}
value, err := yamlprocessor.GetValue(yml, target.Field)
if err != nil {
return nil, fmt.Errorf("failed to get value at %s in %s: %w", target.Field, target.FilePath, err)
}
imageInGit, ok := value.(string)
if !ok {
return nil, fmt.Errorf("unknown value is defined at %s in %s", target.FilePath, target.Field)
}
outdated := imageInRegistry.String() != imageInGit
if !outdated {
return nil, nil
}
// Give back a change content.
newYml, err := yamlprocessor.ReplaceValue(yml, target.Field, imageInRegistry.String())
if err != nil {
return nil, fmt.Errorf("failed to replace value at %s with %s: %w", target.Field, imageInRegistry, err)
}
if commitMsg == "" {
commitMsg = fmt.Sprintf(defaultCommitMessageFormat, imageInGit, imageInRegistry.String(), target.Field, target.FilePath)
}
return &commit{
changes: map[string][]byte{
target.FilePath: newYml,
},
message: commitMsg,
}, nil
}
| 1 | 12,406 | nits, I don't get the point of this change | pipe-cd-pipe | go |
@@ -0,0 +1,13 @@
+// +build !windows
+
+// Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+
+package log
+
+// Log message prefixes.
+const (
+ successPrefix = "✔ Success!"
+ errorPrefix = "✘ Error!"
+ warningPrefix = "Note:"
+) | 1 | 1 | 10,427 | `warningPrefix` looks the same on both platforms, we could move it up to `log.go`. | aws-copilot-cli | go |
|
@@ -65,10 +65,10 @@ type Config struct {
Inbounds Inbounds
Outbounds Outbounds
- // Filter and Interceptor that will be applied to all outgoing and incoming
- // requests respectively.
- Filter transport.Filter
- Interceptor transport.Interceptor
+ // Filters and Interceptors that will be applied to all outgoing and
+ // incoming requests respectively.
+ Filters Filters
+ Interceptors Interceptors
Tracer opentracing.Tracer
} | 1 | // Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package yarpc
import (
"fmt"
"sync"
"go.uber.org/yarpc/internal/channel"
"go.uber.org/yarpc/internal/errors"
"go.uber.org/yarpc/internal/request"
intsync "go.uber.org/yarpc/internal/sync"
"go.uber.org/yarpc/transport"
"github.com/opentracing/opentracing-go"
)
// Dispatcher object is used to configure a YARPC application; it is used by
// Clients to send RPCs, and by Procedures to recieve them. This object is what
// enables an application to be transport-agnostic.
type Dispatcher interface {
transport.Registrar
transport.ChannelProvider
// Inbounds returns a copy of the list of inbounds for this RPC object.
//
// The Inbounds will be returned in the same order that was used in the
// configuration.
Inbounds() Inbounds
// Starts the RPC allowing it to accept and processing new incoming
// requests.
//
// Blocks until the RPC is ready to start accepting new requests.
Start() error
// Stops the RPC. No new requests will be accepted.
//
// Blocks until the RPC has stopped.
Stop() error
}
// Config specifies the parameters of a new RPC constructed via New.
type Config struct {
Name string
Inbounds Inbounds
Outbounds Outbounds
// Filter and Interceptor that will be applied to all outgoing and incoming
// requests respectively.
Filter transport.Filter
Interceptor transport.Interceptor
Tracer opentracing.Tracer
}
// Inbounds contains a list of inbound transports
type Inbounds []transport.Inbound
// Outbounds encapsulates a service and its outbounds
type Outbounds map[string]transport.Outbounds
// NewDispatcher builds a new Dispatcher using the specified Config.
func NewDispatcher(cfg Config) Dispatcher {
if cfg.Name == "" {
panic("a service name is required")
}
return dispatcher{
Name: cfg.Name,
Registrar: transport.NewMapRegistry(cfg.Name),
inbounds: cfg.Inbounds,
outbounds: convertOutbounds(cfg.Outbounds, cfg.Filter),
Interceptor: cfg.Interceptor,
deps: transport.NoDeps.WithTracer(cfg.Tracer),
}
}
// convertOutbounds applys filters and creates validator outbounds
func convertOutbounds(outbounds Outbounds, filter transport.Filter) Outbounds {
//TODO(apb): ensure we're not given the same underlying outbound for each RPC type
convertedOutbounds := make(Outbounds, len(outbounds))
for service, outs := range outbounds {
var (
unaryOutbound transport.UnaryOutbound
onewayOutbound transport.OnewayOutbound
)
// apply filters and create ValidatorOutbounds
if outs.Unary != nil {
unaryOutbound = transport.ApplyFilter(outs.Unary, filter)
unaryOutbound = request.UnaryValidatorOutbound{UnaryOutbound: unaryOutbound}
}
// TODO(apb): apply oneway outbound filter
if outs.Oneway != nil {
onewayOutbound = request.OnewayValidatorOutbound{OnewayOutbound: outs.Oneway}
}
convertedOutbounds[service] = transport.Outbounds{
Unary: unaryOutbound,
Oneway: onewayOutbound,
}
}
return convertedOutbounds
}
// dispatcher is the standard RPC implementation.
//
// It allows use of multiple Inbounds and Outbounds together.
type dispatcher struct {
transport.Registrar
Name string
inbounds Inbounds
outbounds Outbounds
Interceptor transport.Interceptor
deps transport.Deps
}
func (d dispatcher) Inbounds() Inbounds {
inbounds := make(Inbounds, len(d.inbounds))
copy(inbounds, d.inbounds)
return inbounds
}
func (d dispatcher) Channel(service string) transport.Channel {
if rs, ok := d.outbounds[service]; ok {
return channel.MultiOutbound(d.Name, service, rs)
}
panic(noOutboundForService{Service: service})
}
func (d dispatcher) Start() error {
var (
mu sync.Mutex
startedInbounds []transport.Inbound
startedOutbounds []transport.Outbound
)
service := transport.ServiceDetail{
Name: d.Name,
Registry: d,
}
startInbound := func(i transport.Inbound) func() error {
return func() error {
if err := i.Start(service, d.deps); err != nil {
return err
}
mu.Lock()
startedInbounds = append(startedInbounds, i)
mu.Unlock()
return nil
}
}
startOutbound := func(o transport.Outbound) func() error {
return func() error {
if o == nil {
return nil
}
if err := o.Start(d.deps); err != nil {
return err
}
mu.Lock()
startedOutbounds = append(startedOutbounds, o)
mu.Unlock()
return nil
}
}
var wait intsync.ErrorWaiter
for _, i := range d.inbounds {
wait.Submit(startInbound(i))
}
// TODO record the name of the service whose outbound failed
for _, o := range d.outbounds {
wait.Submit(startOutbound(o.Unary))
wait.Submit(startOutbound(o.Oneway))
}
errs := wait.Wait()
if len(errs) == 0 {
return nil
}
// Failed to start so stop everything that was started.
wait = intsync.ErrorWaiter{}
for _, i := range startedInbounds {
wait.Submit(i.Stop)
}
for _, o := range startedOutbounds {
wait.Submit(o.Stop)
}
if newErrors := wait.Wait(); len(newErrors) > 0 {
errs = append(errs, newErrors...)
}
return errors.ErrorGroup(errs)
}
func (d dispatcher) Register(rs []transport.Registrant) {
registrants := make([]transport.Registrant, 0, len(rs))
for _, r := range rs {
switch r.HandlerSpec.Type() {
case transport.Unary:
h := transport.ApplyInterceptor(r.HandlerSpec.Unary(), d.Interceptor)
r.HandlerSpec = transport.NewUnaryHandlerSpec(h)
case transport.Oneway:
//TODO(apb): add oneway interceptors https://github.com/yarpc/yarpc-go/issues/413
default:
panic(fmt.Sprintf("unknown handler type %q for service %q, procedure %q",
r.HandlerSpec.Type(), r.Service, r.Procedure))
}
registrants = append(registrants, r)
}
d.Registrar.Register(registrants)
}
func (d dispatcher) Stop() error {
var wait intsync.ErrorWaiter
for _, i := range d.inbounds {
wait.Submit(i.Stop)
}
for _, o := range d.outbounds {
if o.Unary != nil {
wait.Submit(o.Unary.Stop)
}
if o.Oneway != nil {
wait.Submit(o.Oneway.Stop)
}
}
if errs := wait.Wait(); len(errs) > 0 {
return errors.ErrorGroup(errs)
}
return nil
}
| 1 | 11,372 | What's the motivation behind switching to plural here? I think we want singular cc @abhinav | yarpc-yarpc-go | go |
@@ -26,6 +26,19 @@ import (
"fmt"
)
+// Wrap returns a new Status that wraps the original error.
+//
+// The Code should never be CodeOK, if it is, this will return nil.
+func Wrap(code Code, err error) *Status {
+ if code == CodeOK {
+ return nil
+ }
+ return &Status{
+ code: code,
+ err: &wrapError{err: err},
+ }
+}
+
// Newf returns a new Status.
//
// The Code should never be CodeOK, if it is, this will return nil. | 1 | // Copyright (c) 2021 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package yarpcerrors
import (
"bytes"
"errors"
"fmt"
)
// Newf returns a new Status.
//
// The Code should never be CodeOK, if it is, this will return nil.
func Newf(code Code, format string, args ...interface{}) *Status {
if code == CodeOK {
return nil
}
var err error
if len(args) == 0 {
err = errors.New(format)
} else {
err = fmt.Errorf(format, args...)
}
return &Status{
code: code,
err: err,
}
}
type yarpcError interface{ YARPCError() *Status }
// FromError returns the Status for the provided error.
//
// If the error:
// - is nil, return nil
// - is a 'Status', return the 'Status'
// - has a 'YARPCError() *Status' method, returns the 'Status'
// Otherwise, return a wrapped error with code 'CodeUnknown'.
func FromError(err error) *Status {
if err == nil {
return nil
}
if st, ok := fromError(err); ok {
return st
}
// Extra wrapping ensures Unwrap works consistently across *Status created
// by FromError and Newf.
// https://github.com/yarpc/yarpc-go/pull/1966
return &Status{
code: CodeUnknown,
err: &wrapError{err: err},
}
}
func fromError(err error) (st *Status, ok bool) {
if errors.As(err, &st) {
return st, true
}
var yerr yarpcError
if errors.As(err, &yerr) {
return yerr.YARPCError(), true
}
return nil, false
}
// Unwrap supports errors.Unwrap.
//
// See "errors" package documentation for details.
func (s *Status) Unwrap() error {
if s == nil {
return nil
}
return errors.Unwrap(s.err)
}
// IsStatus returns whether the provided error is a YARPC error, or has a
// YARPCError() function to represent the error as a YARPC error. This includes
// wrapped errors.
//
// This is false if the error is nil.
func IsStatus(err error) bool {
_, ok := fromError(err)
return ok
}
// Status represents a YARPC error.
type Status struct {
code Code
name string
err error
details []byte
}
// WithName returns a new Status with the given name.
//
// This should be used for user-defined errors.
//
// Deprecated: Use only error codes to represent the type of the error.
func (s *Status) WithName(name string) *Status {
// TODO: We plan to add a WithDetails method to add semantic metadata to
// Statuses soon.
if s == nil {
return nil
}
return &Status{
code: s.code,
name: name,
err: s.err,
details: s.details,
}
}
// WithDetails returns a new status with the given details bytes.
func (s *Status) WithDetails(details []byte) *Status {
if s == nil {
return nil
}
if len(details) == 0 {
// this ensures that the details field is not set to some pointer if
// there's nothing in details.
details = nil
}
return &Status{
code: s.code,
name: s.name,
err: s.err,
details: details,
}
}
// Code returns the error code for this Status.
func (s *Status) Code() Code {
if s == nil {
return CodeOK
}
return s.code
}
// Name returns the name of the error for this Status.
//
// This is an empty string for all built-in YARPC errors. It may be customized
// by using WithName.
func (s *Status) Name() string {
if s == nil {
return ""
}
return s.name
}
// Message returns the error message for this Status.
func (s *Status) Message() string {
if s == nil {
return ""
}
return s.err.Error()
}
// Details returns the error details for this Status.
func (s *Status) Details() []byte {
if s == nil {
return nil
}
return s.details
}
// Error implements the error interface.
func (s *Status) Error() string {
buffer := bytes.NewBuffer(nil)
_, _ = buffer.WriteString(`code:`)
_, _ = buffer.WriteString(s.code.String())
if s.name != "" {
_, _ = buffer.WriteString(` name:`)
_, _ = buffer.WriteString(s.name)
}
if s.err != nil && s.err.Error() != "" {
_, _ = buffer.WriteString(` message:`)
_, _ = buffer.WriteString(s.err.Error())
}
return buffer.String()
}
// wrapError does what it says on the tin.
type wrapError struct {
err error
}
// Error returns the inner error message.
func (e *wrapError) Error() string {
if e == nil || e.err == nil {
return ""
}
return e.err.Error()
}
// Unwrap returns the inner error.
func (e *wrapError) Unwrap() error {
if e == nil {
return nil
}
return e.err
}
// CancelledErrorf returns a new Status with code CodeCancelled
// by calling Newf(CodeCancelled, format, args...).
func CancelledErrorf(format string, args ...interface{}) error {
return Newf(CodeCancelled, format, args...)
}
// UnknownErrorf returns a new Status with code CodeUnknown
// by calling Newf(CodeUnknown, format, args...).
func UnknownErrorf(format string, args ...interface{}) error {
return Newf(CodeUnknown, format, args...)
}
// InvalidArgumentErrorf returns a new Status with code CodeInvalidArgument
// by calling Newf(CodeInvalidArgument, format, args...).
func InvalidArgumentErrorf(format string, args ...interface{}) error {
return Newf(CodeInvalidArgument, format, args...)
}
// DeadlineExceededErrorf returns a new Status with code CodeDeadlineExceeded
// by calling Newf(CodeDeadlineExceeded, format, args...).
func DeadlineExceededErrorf(format string, args ...interface{}) error {
return Newf(CodeDeadlineExceeded, format, args...)
}
// NotFoundErrorf returns a new Status with code CodeNotFound
// by calling Newf(CodeNotFound, format, args...).
func NotFoundErrorf(format string, args ...interface{}) error {
return Newf(CodeNotFound, format, args...)
}
// AlreadyExistsErrorf returns a new Status with code CodeAlreadyExists
// by calling Newf(CodeAlreadyExists, format, args...).
func AlreadyExistsErrorf(format string, args ...interface{}) error {
return Newf(CodeAlreadyExists, format, args...)
}
// PermissionDeniedErrorf returns a new Status with code CodePermissionDenied
// by calling Newf(CodePermissionDenied, format, args...).
func PermissionDeniedErrorf(format string, args ...interface{}) error {
return Newf(CodePermissionDenied, format, args...)
}
// ResourceExhaustedErrorf returns a new Status with code CodeResourceExhausted
// by calling Newf(CodeResourceExhausted, format, args...).
func ResourceExhaustedErrorf(format string, args ...interface{}) error {
return Newf(CodeResourceExhausted, format, args...)
}
// FailedPreconditionErrorf returns a new Status with code CodeFailedPrecondition
// by calling Newf(CodeFailedPrecondition, format, args...).
func FailedPreconditionErrorf(format string, args ...interface{}) error {
return Newf(CodeFailedPrecondition, format, args...)
}
// AbortedErrorf returns a new Status with code CodeAborted
// by calling Newf(CodeAborted, format, args...).
func AbortedErrorf(format string, args ...interface{}) error {
return Newf(CodeAborted, format, args...)
}
// OutOfRangeErrorf returns a new Status with code CodeOutOfRange
// by calling Newf(CodeOutOfRange, format, args...).
func OutOfRangeErrorf(format string, args ...interface{}) error {
return Newf(CodeOutOfRange, format, args...)
}
// UnimplementedErrorf returns a new Status with code CodeUnimplemented
// by calling Newf(CodeUnimplemented, format, args...).
func UnimplementedErrorf(format string, args ...interface{}) error {
return Newf(CodeUnimplemented, format, args...)
}
// InternalErrorf returns a new Status with code CodeInternal
// by calling Newf(CodeInternal, format, args...).
func InternalErrorf(format string, args ...interface{}) error {
return Newf(CodeInternal, format, args...)
}
// UnavailableErrorf returns a new Status with code CodeUnavailable
// by calling Newf(CodeUnavailable, format, args...).
func UnavailableErrorf(format string, args ...interface{}) error {
return Newf(CodeUnavailable, format, args...)
}
// DataLossErrorf returns a new Status with code CodeDataLoss
// by calling Newf(CodeDataLoss, format, args...).
func DataLossErrorf(format string, args ...interface{}) error {
return Newf(CodeDataLoss, format, args...)
}
// UnauthenticatedErrorf returns a new Status with code CodeUnauthenticated
// by calling Newf(CodeUnauthenticated, format, args...).
func UnauthenticatedErrorf(format string, args ...interface{}) error {
return Newf(CodeUnauthenticated, format, args...)
}
// IsCancelled returns true if FromError(err).Code() == CodeCancelled.
func IsCancelled(err error) bool {
return FromError(err).Code() == CodeCancelled
}
// IsUnknown returns true if FromError(err).Code() == CodeUnknown.
func IsUnknown(err error) bool {
return FromError(err).Code() == CodeUnknown
}
// IsInvalidArgument returns true if FromError(err).Code() == CodeInvalidArgument.
func IsInvalidArgument(err error) bool {
return FromError(err).Code() == CodeInvalidArgument
}
// IsDeadlineExceeded returns true if FromError(err).Code() == CodeDeadlineExceeded.
func IsDeadlineExceeded(err error) bool {
return FromError(err).Code() == CodeDeadlineExceeded
}
// IsNotFound returns true if FromError(err).Code() == CodeNotFound.
func IsNotFound(err error) bool {
return FromError(err).Code() == CodeNotFound
}
// IsAlreadyExists returns true if FromError(err).Code() == CodeAlreadyExists.
func IsAlreadyExists(err error) bool {
return FromError(err).Code() == CodeAlreadyExists
}
// IsPermissionDenied returns true if FromError(err).Code() == CodePermissionDenied.
func IsPermissionDenied(err error) bool {
return FromError(err).Code() == CodePermissionDenied
}
// IsResourceExhausted returns true if FromError(err).Code() == CodeResourceExhausted.
func IsResourceExhausted(err error) bool {
return FromError(err).Code() == CodeResourceExhausted
}
// IsFailedPrecondition returns true if FromError(err).Code() == CodeFailedPrecondition.
func IsFailedPrecondition(err error) bool {
return FromError(err).Code() == CodeFailedPrecondition
}
// IsAborted returns true if FromError(err).Code() == CodeAborted.
func IsAborted(err error) bool {
return FromError(err).Code() == CodeAborted
}
// IsOutOfRange returns true if FromError(err).Code() == CodeOutOfRange.
func IsOutOfRange(err error) bool {
return FromError(err).Code() == CodeOutOfRange
}
// IsUnimplemented returns true if FromError(err).Code() == CodeUnimplemented.
func IsUnimplemented(err error) bool {
return FromError(err).Code() == CodeUnimplemented
}
// IsInternal returns true if FromError(err).Code() == CodeInternal.
func IsInternal(err error) bool {
return FromError(err).Code() == CodeInternal
}
// IsUnavailable returns true if FromError(err).Code() == CodeUnavailable.
func IsUnavailable(err error) bool {
return FromError(err).Code() == CodeUnavailable
}
// IsDataLoss returns true if FromError(err).Code() == CodeDataLoss.
func IsDataLoss(err error) bool {
return FromError(err).Code() == CodeDataLoss
}
// IsUnauthenticated returns true if FromError(err).Code() == CodeUnauthenticated.
func IsUnauthenticated(err error) bool {
return FromError(err).Code() == CodeUnauthenticated
}
// IsYARPCError returns whether the provided error is a YARPC error.
//
// This is always false if the error is nil.
//
// Deprecated: use IsStatus instead.
func IsYARPCError(err error) bool {
return IsStatus(err)
}
// ErrorCode returns the Code for the given error, CodeOK if the error is nil,
// or CodeUnknown if the given error is not a YARPC error.
//
// Deprecated: Use FromError and Code instead.
func ErrorCode(err error) Code {
return FromError(err).Code()
}
// ErrorName returns the name for the given error, or "" if the given
// error is not a YARPC error created with NamedErrorf that has a non-empty name.
//
// Deprecated: Use FromError and Name instead.
func ErrorName(err error) string {
return FromError(err).Name()
}
// ErrorMessage returns the message for the given error, or "" if the given
// error is nil, or err.Error() if the given error is not a YARPC error or
// the YARPC error had no message.
//
// Deprecated: Use FromError and Message instead.
func ErrorMessage(err error) string {
return FromError(err).Message()
}
// NamedErrorf returns a new Status with code CodeUnknown and the given name.
//
// This should be used for user-defined errors.
//
// The name must only contain lowercase letters from a-z and dashes (-), and
// cannot start or end in a dash. If the name is something else, an error with
// code CodeInternal will be returned.
//
// Deprecated: Use Newf and WithName instead.
func NamedErrorf(name string, format string, args ...interface{}) error {
return Newf(CodeUnknown, format, args...).WithName(name)
}
// FromHeaders returns a new Status from headers transmitted from the server side.
//
// If the specified code is CodeOK, this will return nil.
//
// The name must only contain lowercase letters from a-z and dashes (-), and
// cannot start or end in a dash. If the name is something else, an error with
// code CodeInternal will be returned.
//
// This function should not be used by server implementations, use the individual
// error constructors instead. This should only be used by transport implementations.
//
// Deprecated: Use Newf and WithName instead.
func FromHeaders(code Code, name string, message string) error {
return Newf(code, message).WithName(name)
}
| 1 | 19,725 | What do you think about renaming this method `FromErrorAndCode(code Code, err error) *Status`? One of the benefits is that it will be consistent with the naming of `func FromError(err error) *Status {` which does something almost similar. | yarpc-yarpc-go | go |
@@ -35,7 +35,7 @@ namespace Nethermind.JsonRpc
[ConfigItem(Description = "Port number for JSON RPC calls. Ensure the firewall is configured when enabling JSON RPC.", DefaultValue = "8545")]
int Port { get; set; }
- [ConfigItem(Description = "Port number for JSON RPC web socket calls. By default same port is used as regular JSON RPC. Ensure the firewall is configured when enabling JSON RPC.", DefaultValue = null)]
+ [ConfigItem(Description = "Port number for JSON RPC web socket calls. By default same port is used as regular JSON RPC. Ensure the firewall is configured when enabling JSON RPC.", DefaultValue = "8545")]
int WebSocketPort { get; set; }
[ConfigItem(Description = "Defines which RPC modules should be enabled.", DefaultValue = "all")] | 1 | // Copyright (c) 2018 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
using Nethermind.Config;
namespace Nethermind.JsonRpc
{
public interface IJsonRpcConfig : IConfig
{
[ConfigItem(Description = "Defines whether the JSON RPC service is enabled on node startup. Configure host nad port if default values do not work for you.", DefaultValue = "false")]
bool Enabled { get; set; }
[ConfigItem(Description = "Host for JSON RPC calls. Ensure the firewall is configured when enabling JSON RPC. If it does not work with 117.0.0.1 try something like 10.0.0.4 or 192.168.0.1", DefaultValue = "\"127.0.0.1\"")]
string Host { get; set; }
[ConfigItem(Description = "Base file path for diagnostic JSON RPC recorder.", DefaultValue = "\"logs/rpc.log_1.txt\"")]
string RpcRecorderBaseFilePath { get; set; }
[ConfigItem(Description = "Defines whether the JSON RPC diagnostic recording is enabled on node startup. Do not enable unless you are a DEV diagnosing issues with JSON RPC.", DefaultValue = "false")]
bool RpcRecorderEnabled { get; set; }
[ConfigItem(Description = "Port number for JSON RPC calls. Ensure the firewall is configured when enabling JSON RPC.", DefaultValue = "8545")]
int Port { get; set; }
[ConfigItem(Description = "Port number for JSON RPC web socket calls. By default same port is used as regular JSON RPC. Ensure the firewall is configured when enabling JSON RPC.", DefaultValue = null)]
int WebSocketPort { get; set; }
[ConfigItem(Description = "Defines which RPC modules should be enabled.", DefaultValue = "all")]
string[] EnabledModules { get; set; }
}
} | 1 | 23,089 | we cannot state that by default the same value is used if we stopped using null - it would suggest that when you change JSON RPC and leave WS port default then they would be same. | NethermindEth-nethermind | .cs |
@@ -142,7 +142,7 @@ func chromeTestImpl(version protocol.VersionNumber, url string, blockUntilDone f
}
utils.Infof("Running chrome: %s '%s'", getChromePath(), strings.Join(args, "' '"))
command := exec.Command(path, args...)
- session, err := gexec.Start(command, GinkgoWriter, GinkgoWriter)
+ session, err := gexec.Start(command, nil, nil)
Expect(err).NotTo(HaveOccurred())
defer session.Kill()
const pollInterval = 100 * time.Millisecond | 1 | package chrome_test
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"os/exec"
"runtime"
"strconv"
"strings"
"sync/atomic"
"time"
"github.com/lucas-clemente/quic-go/integrationtests/tools/testserver"
"github.com/lucas-clemente/quic-go/internal/utils"
"github.com/lucas-clemente/quic-go/protocol"
_ "github.com/lucas-clemente/quic-go/integrationtests/tools/testlog"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gexec"
"testing"
)
const (
nChromeRetries = 8
dataLen = 500 * 1024 // 500 KB
dataLongLen = 50 * 1024 * 1024 // 50 MB
)
var (
nFilesUploaded int32
testEndpointCalled bool
doneCalled bool
)
func TestChrome(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Chrome Suite")
}
func init() {
// Requires the len & num GET parameters, e.g. /uploadtest?len=100&num=1
http.HandleFunc("/uploadtest", func(w http.ResponseWriter, r *http.Request) {
defer GinkgoRecover()
response := uploadHTML
response = strings.Replace(response, "LENGTH", r.URL.Query().Get("len"), -1)
response = strings.Replace(response, "NUM", r.URL.Query().Get("num"), -1)
_, err := io.WriteString(w, response)
Expect(err).NotTo(HaveOccurred())
testEndpointCalled = true
})
// Requires the len & num GET parameters, e.g. /downloadtest?len=100&num=1
http.HandleFunc("/downloadtest", func(w http.ResponseWriter, r *http.Request) {
defer GinkgoRecover()
response := downloadHTML
response = strings.Replace(response, "LENGTH", r.URL.Query().Get("len"), -1)
response = strings.Replace(response, "NUM", r.URL.Query().Get("num"), -1)
_, err := io.WriteString(w, response)
Expect(err).NotTo(HaveOccurred())
testEndpointCalled = true
})
http.HandleFunc("/uploadhandler", func(w http.ResponseWriter, r *http.Request) {
defer GinkgoRecover()
l, err := strconv.Atoi(r.URL.Query().Get("len"))
Expect(err).NotTo(HaveOccurred())
defer r.Body.Close()
actual, err := ioutil.ReadAll(r.Body)
Expect(err).NotTo(HaveOccurred())
Expect(bytes.Equal(actual, testserver.GeneratePRData(l))).To(BeTrue())
atomic.AddInt32(&nFilesUploaded, 1)
})
http.HandleFunc("/done", func(w http.ResponseWriter, r *http.Request) {
doneCalled = true
})
}
var _ = JustBeforeEach(testserver.StartQuicServer)
var _ = AfterEach(func() {
testserver.StopQuicServer()
nFilesUploaded = 0
doneCalled = false
testEndpointCalled = false
})
func getChromePath() string {
if runtime.GOOS == "darwin" {
return "/Applications/Google Chrome.app/Contents/MacOS/Google Chrome"
}
if path, err := exec.LookPath("google-chrome"); err == nil {
return path
}
if path, err := exec.LookPath("chromium-browser"); err == nil {
return path
}
Fail("No Chrome executable found.")
return ""
}
func chromeTest(version protocol.VersionNumber, url string, blockUntilDone func()) {
// Chrome sometimes starts but doesn't send any HTTP requests for no apparent reason.
// Retry starting it a couple of times.
for i := 0; i < nChromeRetries; i++ {
if chromeTestImpl(version, url, blockUntilDone) {
return
}
}
Fail("Chrome didn't hit the testing endpoints")
}
func chromeTestImpl(version protocol.VersionNumber, url string, blockUntilDone func()) bool {
userDataDir, err := ioutil.TempDir("", "quic-go-test-chrome-dir")
Expect(err).NotTo(HaveOccurred())
defer os.RemoveAll(userDataDir)
path := getChromePath()
args := []string{
"--disable-gpu",
"--no-first-run=true",
"--no-default-browser-check=true",
"--user-data-dir=" + userDataDir,
"--enable-quic=true",
"--no-proxy-server=true",
"--origin-to-force-quic-on=quic.clemente.io:443",
fmt.Sprintf(`--host-resolver-rules=MAP quic.clemente.io:443 localhost:%s`, testserver.Port()),
fmt.Sprintf("--quic-version=QUIC_VERSION_%d", version),
url,
}
utils.Infof("Running chrome: %s '%s'", getChromePath(), strings.Join(args, "' '"))
command := exec.Command(path, args...)
session, err := gexec.Start(command, GinkgoWriter, GinkgoWriter)
Expect(err).NotTo(HaveOccurred())
defer session.Kill()
const pollInterval = 100 * time.Millisecond
const pollDuration = 10 * time.Second
for i := 0; i < int(pollDuration/pollInterval); i++ {
time.Sleep(pollInterval)
if testEndpointCalled {
break
}
}
if !testEndpointCalled {
return false
}
blockUntilDone()
return true
}
func waitForDone() {
Eventually(func() bool { return doneCalled }, 60).Should(BeTrue())
}
func waitForNUploaded(expected int) func() {
return func() {
Eventually(func() int32 {
return atomic.LoadInt32(&nFilesUploaded)
}, 60).Should(BeEquivalentTo(expected))
}
}
const commonJS = `
var buf = new ArrayBuffer(LENGTH);
var prng = new Uint8Array(buf);
var seed = 1;
for (var i = 0; i < LENGTH; i++) {
// https://en.wikipedia.org/wiki/Lehmer_random_number_generator
seed = seed * 48271 % 2147483647;
prng[i] = seed;
}
`
const uploadHTML = `
<html>
<body>
<script>
console.log("Running DL test...");
` + commonJS + `
for (var i = 0; i < NUM; i++) {
var req = new XMLHttpRequest();
req.open("POST", "/uploadhandler?len=" + LENGTH, true);
req.send(buf);
}
</script>
</body>
</html>
`
const downloadHTML = `
<html>
<body>
<script>
console.log("Running DL test...");
` + commonJS + `
function verify(data) {
if (data.length !== LENGTH) return false;
for (var i = 0; i < LENGTH; i++) {
if (data[i] !== prng[i]) return false;
}
return true;
}
var nOK = 0;
for (var i = 0; i < NUM; i++) {
let req = new XMLHttpRequest();
req.responseType = "arraybuffer";
req.open("POST", "/prdata?len=" + LENGTH, true);
req.onreadystatechange = function () {
if (req.readyState === XMLHttpRequest.DONE && req.status === 200) {
if (verify(new Uint8Array(req.response))) {
nOK++;
if (nOK === NUM) {
console.log("Done :)");
var reqDone = new XMLHttpRequest();
reqDone.open("GET", "/done");
reqDone.send();
}
}
}
};
req.send();
}
</script>
</body>
</html>
`
| 1 | 6,601 | I'm still not convinced dropping Chrome logs is the right thing to do, but if you insist | lucas-clemente-quic-go | go |
@@ -38,7 +38,7 @@ public class ReflectionParameterDeclaration implements ParameterDeclaration {
@Override
public String getName() {
- throw new UnsupportedOperationException();
+ throw new UnsupportedOperationException("Unable to get the name");
}
@Override | 1 | /*
* Copyright 2016 Federico Tomassetti
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.javaparser.symbolsolver.reflectionmodel;
import com.github.javaparser.symbolsolver.model.declarations.ParameterDeclaration;
import com.github.javaparser.symbolsolver.model.resolution.TypeSolver;
import com.github.javaparser.symbolsolver.model.typesystem.Type;
/**
* @author Federico Tomassetti
*/
public class ReflectionParameterDeclaration implements ParameterDeclaration {
private Class<?> type;
private java.lang.reflect.Type genericType;
private TypeSolver typeSolver;
private boolean variadic;
public ReflectionParameterDeclaration(Class<?> type, java.lang.reflect.Type genericType, TypeSolver typeSolver, boolean variadic) {
this.type = type;
this.genericType = genericType;
this.typeSolver = typeSolver;
this.variadic = variadic;
}
@Override
public String getName() {
throw new UnsupportedOperationException();
}
@Override
public String toString() {
return "ReflectionParameterDeclaration{" +
"type=" + type +
'}';
}
@Override
public boolean isField() {
return false;
}
@Override
public boolean isParameter() {
return true;
}
@Override
public boolean isVariadic() {
return variadic;
}
@Override
public boolean isType() {
return false;
}
@Override
public Type getType() {
return ReflectionFactory.typeUsageFor(genericType, typeSolver);
}
}
| 1 | 14,156 | It would be helpful to include some reasoning here. With the reflection model, it is often the case that names are not available. | javaparser-javaparser | java |
@@ -82,6 +82,7 @@ func Run() {
certmagic.UserAgent = appName + "/" + appVersion
// Set up process log before anything bad happens
+ caddy.LogDestination = logfile
switch logfile {
case "stdout":
log.SetOutput(os.Stdout) | 1 | // Copyright 2015 Light Code Labs, LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddymain
import (
"bufio"
"errors"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
"github.com/google/uuid"
"github.com/klauspost/cpuid"
"github.com/mholt/caddy"
"github.com/mholt/caddy/caddyfile"
"github.com/mholt/caddy/caddytls"
"github.com/mholt/caddy/telemetry"
"github.com/mholt/certmagic"
lumberjack "gopkg.in/natefinch/lumberjack.v2"
_ "github.com/mholt/caddy/caddyhttp" // plug in the HTTP server type
// This is where other plugins get plugged in (imported)
)
func init() {
caddy.TrapSignals()
setVersion()
flag.BoolVar(&certmagic.Agreed, "agree", false, "Agree to the CA's Subscriber Agreement")
flag.StringVar(&certmagic.CA, "ca", certmagic.CA, "URL to certificate authority's ACME server directory")
flag.StringVar(&certmagic.DefaultServerName, "default-sni", certmagic.DefaultServerName, "If a ClientHello ServerName is empty, use this ServerName to choose a TLS certificate")
flag.BoolVar(&certmagic.DisableHTTPChallenge, "disable-http-challenge", certmagic.DisableHTTPChallenge, "Disable the ACME HTTP challenge")
flag.BoolVar(&certmagic.DisableTLSALPNChallenge, "disable-tls-alpn-challenge", certmagic.DisableTLSALPNChallenge, "Disable the ACME TLS-ALPN challenge")
flag.StringVar(&disabledMetrics, "disabled-metrics", "", "Comma-separated list of telemetry metrics to disable")
flag.StringVar(&conf, "conf", "", "Caddyfile to load (default \""+caddy.DefaultConfigFile+"\")")
flag.StringVar(&cpu, "cpu", "100%", "CPU cap")
flag.StringVar(&envFile, "env", "", "Path to file with environment variables to load in KEY=VALUE format")
flag.BoolVar(&fromJSON, "json-to-caddyfile", false, "From JSON stdin to Caddyfile stdout")
flag.BoolVar(&plugins, "plugins", false, "List installed plugins")
flag.StringVar(&certmagic.Email, "email", "", "Default ACME CA account email address")
flag.DurationVar(&certmagic.HTTPTimeout, "catimeout", certmagic.HTTPTimeout, "Default ACME CA HTTP timeout")
flag.StringVar(&logfile, "log", "", "Process log file")
flag.IntVar(&logRollMB, "log-roll-mb", 100, "Roll process log when it reaches this many megabytes (0 to disable rolling)")
flag.BoolVar(&logRollCompress, "log-roll-compress", true, "Gzip-compress rolled process log files")
flag.StringVar(&caddy.PidFile, "pidfile", "", "Path to write pid file")
flag.BoolVar(&caddy.Quiet, "quiet", false, "Quiet mode (no initialization output)")
flag.StringVar(&revoke, "revoke", "", "Hostname for which to revoke the certificate")
flag.StringVar(&serverType, "type", "http", "Type of server to run")
flag.BoolVar(&toJSON, "caddyfile-to-json", false, "From Caddyfile stdin to JSON stdout")
flag.BoolVar(&version, "version", false, "Show version")
flag.BoolVar(&validate, "validate", false, "Parse the Caddyfile but do not start the server")
caddy.RegisterCaddyfileLoader("flag", caddy.LoaderFunc(confLoader))
caddy.SetDefaultCaddyfileLoader("default", caddy.LoaderFunc(defaultLoader))
}
// Run is Caddy's main() function.
func Run() {
flag.Parse()
caddy.AppName = appName
caddy.AppVersion = appVersion
certmagic.UserAgent = appName + "/" + appVersion
// Set up process log before anything bad happens
switch logfile {
case "stdout":
log.SetOutput(os.Stdout)
case "stderr":
log.SetOutput(os.Stderr)
case "":
log.SetOutput(ioutil.Discard)
default:
if logRollMB > 0 {
log.SetOutput(&lumberjack.Logger{
Filename: logfile,
MaxSize: logRollMB,
MaxAge: 14,
MaxBackups: 10,
Compress: logRollCompress,
})
} else {
err := os.MkdirAll(filepath.Dir(logfile), 0755)
if err != nil {
mustLogFatalf("%v", err)
}
f, err := os.OpenFile(logfile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
mustLogFatalf("%v", err)
}
// don't close file; log should be writeable for duration of process
log.SetOutput(f)
}
}
// load all additional envs as soon as possible
if err := LoadEnvFromFile(envFile); err != nil {
mustLogFatalf("%v", err)
}
// initialize telemetry client
if EnableTelemetry {
err := initTelemetry()
if err != nil {
mustLogFatalf("[ERROR] Initializing telemetry: %v", err)
}
} else if disabledMetrics != "" {
mustLogFatalf("[ERROR] Cannot disable specific metrics because telemetry is disabled")
}
// Check for one-time actions
if revoke != "" {
err := caddytls.Revoke(revoke)
if err != nil {
mustLogFatalf("%v", err)
}
fmt.Printf("Revoked certificate for %s\n", revoke)
os.Exit(0)
}
if version {
fmt.Printf("%s %s (unofficial)\n", appName, appVersion)
if devBuild && gitShortStat != "" {
fmt.Printf("%s\n%s\n", gitShortStat, gitFilesModified)
}
os.Exit(0)
}
if plugins {
fmt.Println(caddy.DescribePlugins())
os.Exit(0)
}
// Check if we just need to do a Caddyfile Convert and exit
checkJSONCaddyfile()
// Set CPU cap
err := setCPU(cpu)
if err != nil {
mustLogFatalf("%v", err)
}
// Executes Startup events
caddy.EmitEvent(caddy.StartupEvent, nil)
// Get Caddyfile input
caddyfileinput, err := caddy.LoadCaddyfile(serverType)
if err != nil {
mustLogFatalf("%v", err)
}
if validate {
err := caddy.ValidateAndExecuteDirectives(caddyfileinput, nil, true)
if err != nil {
mustLogFatalf("%v", err)
}
msg := "Caddyfile is valid"
fmt.Println(msg)
log.Printf("[INFO] %s", msg)
os.Exit(0)
}
// Start your engines
instance, err := caddy.Start(caddyfileinput)
if err != nil {
mustLogFatalf("%v", err)
}
// Begin telemetry (these are no-ops if telemetry disabled)
telemetry.Set("caddy_version", appVersion)
telemetry.Set("num_listeners", len(instance.Servers()))
telemetry.Set("server_type", serverType)
telemetry.Set("os", runtime.GOOS)
telemetry.Set("arch", runtime.GOARCH)
telemetry.Set("cpu", struct {
BrandName string `json:"brand_name,omitempty"`
NumLogical int `json:"num_logical,omitempty"`
AESNI bool `json:"aes_ni,omitempty"`
}{
BrandName: cpuid.CPU.BrandName,
NumLogical: runtime.NumCPU(),
AESNI: cpuid.CPU.AesNi(),
})
if containerized := detectContainer(); containerized {
telemetry.Set("container", containerized)
}
telemetry.StartEmitting()
// Twiddle your thumbs
instance.Wait()
}
// mustLogFatalf wraps log.Fatalf() in a way that ensures the
// output is always printed to stderr so the user can see it
// if the user is still there, even if the process log was not
// enabled. If this process is an upgrade, however, and the user
// might not be there anymore, this just logs to the process
// log and exits.
func mustLogFatalf(format string, args ...interface{}) {
if !caddy.IsUpgrade() {
log.SetOutput(os.Stderr)
}
log.Fatalf(format, args...)
}
// confLoader loads the Caddyfile using the -conf flag.
func confLoader(serverType string) (caddy.Input, error) {
if conf == "" {
return nil, nil
}
if conf == "stdin" {
return caddy.CaddyfileFromPipe(os.Stdin, serverType)
}
var contents []byte
if strings.Contains(conf, "*") {
// Let caddyfile.doImport logic handle the globbed path
contents = []byte("import " + conf)
} else {
var err error
contents, err = ioutil.ReadFile(conf)
if err != nil {
return nil, err
}
}
return caddy.CaddyfileInput{
Contents: contents,
Filepath: conf,
ServerTypeName: serverType,
}, nil
}
// defaultLoader loads the Caddyfile from the current working directory.
func defaultLoader(serverType string) (caddy.Input, error) {
contents, err := ioutil.ReadFile(caddy.DefaultConfigFile)
if err != nil {
if os.IsNotExist(err) {
return nil, nil
}
return nil, err
}
return caddy.CaddyfileInput{
Contents: contents,
Filepath: caddy.DefaultConfigFile,
ServerTypeName: serverType,
}, nil
}
// setVersion figures out the version information
// based on variables set by -ldflags.
func setVersion() {
// A development build is one that's not at a tag or has uncommitted changes
devBuild = gitTag == "" || gitShortStat != ""
if buildDate != "" {
buildDate = " " + buildDate
}
// Only set the appVersion if -ldflags was used
if gitNearestTag != "" || gitTag != "" {
if devBuild && gitNearestTag != "" {
appVersion = fmt.Sprintf("%s (+%s%s)",
strings.TrimPrefix(gitNearestTag, "v"), gitCommit, buildDate)
} else if gitTag != "" {
appVersion = strings.TrimPrefix(gitTag, "v")
}
}
}
func checkJSONCaddyfile() {
if fromJSON {
jsonBytes, err := ioutil.ReadAll(os.Stdin)
if err != nil {
fmt.Fprintf(os.Stderr, "Read stdin failed: %v", err)
os.Exit(1)
}
caddyfileBytes, err := caddyfile.FromJSON(jsonBytes)
if err != nil {
fmt.Fprintf(os.Stderr, "Converting from JSON failed: %v", err)
os.Exit(2)
}
fmt.Println(string(caddyfileBytes))
os.Exit(0)
}
if toJSON {
caddyfileBytes, err := ioutil.ReadAll(os.Stdin)
if err != nil {
fmt.Fprintf(os.Stderr, "Read stdin failed: %v", err)
os.Exit(1)
}
jsonBytes, err := caddyfile.ToJSON(caddyfileBytes)
if err != nil {
fmt.Fprintf(os.Stderr, "Converting to JSON failed: %v", err)
os.Exit(2)
}
fmt.Println(string(jsonBytes))
os.Exit(0)
}
}
// setCPU parses string cpu and sets GOMAXPROCS
// according to its value. It accepts either
// a number (e.g. 3) or a percent (e.g. 50%).
// If the percent resolves to less than a single
// GOMAXPROCS, it rounds it up to GOMAXPROCS=1.
func setCPU(cpu string) error {
var numCPU int
availCPU := runtime.NumCPU()
if strings.HasSuffix(cpu, "%") {
// Percent
var percent float32
pctStr := cpu[:len(cpu)-1]
pctInt, err := strconv.Atoi(pctStr)
if err != nil || pctInt < 1 || pctInt > 100 {
return errors.New("invalid CPU value: percentage must be between 1-100")
}
percent = float32(pctInt) / 100
numCPU = int(float32(availCPU) * percent)
if numCPU < 1 {
numCPU = 1
}
} else {
// Number
num, err := strconv.Atoi(cpu)
if err != nil || num < 1 {
return errors.New("invalid CPU value: provide a number or percent greater than 0")
}
numCPU = num
}
if numCPU > availCPU {
numCPU = availCPU
}
runtime.GOMAXPROCS(numCPU)
return nil
}
// detectContainer attempts to determine whether the process is
// being run inside a container. References:
// https://tuhrig.de/how-to-know-you-are-inside-a-docker-container/
// https://stackoverflow.com/a/20012536/1048862
// https://gist.github.com/anantkamath/623ce7f5432680749e087cf8cfba9b69
func detectContainer() bool {
if runtime.GOOS != "linux" {
return false
}
file, err := os.Open("/proc/1/cgroup")
if err != nil {
return false
}
defer file.Close()
i := 0
scanner := bufio.NewScanner(file)
for scanner.Scan() {
i++
if i > 1000 {
return false
}
line := scanner.Text()
parts := strings.SplitN(line, ":", 3)
if len(parts) < 3 {
continue
}
if strings.Contains(parts[2], "docker") ||
strings.Contains(parts[2], "lxc") ||
strings.Contains(parts[2], "moby") {
return true
}
}
return false
}
// initTelemetry initializes the telemetry engine.
func initTelemetry() error {
uuidFilename := filepath.Join(caddy.AssetsPath(), "uuid")
if customUUIDFile := os.Getenv("CADDY_UUID_FILE"); customUUIDFile != "" {
uuidFilename = customUUIDFile
}
newUUID := func() uuid.UUID {
id := uuid.New()
err := os.MkdirAll(caddy.AssetsPath(), 0700)
if err != nil {
log.Printf("[ERROR] Persisting instance UUID: %v", err)
return id
}
err = ioutil.WriteFile(uuidFilename, []byte(id.String()), 0600) // human-readable as a string
if err != nil {
log.Printf("[ERROR] Persisting instance UUID: %v", err)
}
return id
}
var id uuid.UUID
// load UUID from storage, or create one if we don't have one
if uuidFile, err := os.Open(uuidFilename); os.IsNotExist(err) {
// no UUID exists yet; create a new one and persist it
id = newUUID()
} else if err != nil {
log.Printf("[ERROR] Loading persistent UUID: %v", err)
id = newUUID()
} else {
defer uuidFile.Close()
uuidBytes, err := ioutil.ReadAll(uuidFile)
if err != nil {
log.Printf("[ERROR] Reading persistent UUID: %v", err)
id = newUUID()
} else {
id, err = uuid.ParseBytes(uuidBytes)
if err != nil {
log.Printf("[ERROR] Parsing UUID: %v", err)
id = newUUID()
}
}
}
// parse and check the list of disabled metrics
var disabledMetricsSlice []string
if len(disabledMetrics) > 0 {
if len(disabledMetrics) > 1024 {
// mitigate disk space exhaustion at the collection endpoint
return fmt.Errorf("too many metrics to disable")
}
disabledMetricsSlice = splitTrim(disabledMetrics, ",")
for _, metric := range disabledMetricsSlice {
if metric == "instance_id" || metric == "timestamp" || metric == "disabled_metrics" {
return fmt.Errorf("instance_id, timestamp, and disabled_metrics cannot be disabled")
}
}
}
// initialize telemetry
telemetry.Init(id, disabledMetricsSlice)
// if any metrics were disabled, report which ones (so we know how representative the data is)
if len(disabledMetricsSlice) > 0 {
telemetry.Set("disabled_metrics", disabledMetricsSlice)
log.Printf("[NOTICE] The following telemetry metrics are disabled: %s", disabledMetrics)
}
return nil
}
// Split string s into all substrings separated by sep and returns a slice of
// the substrings between those separators.
//
// If s does not contain sep and sep is not empty, Split returns a
// slice of length 1 whose only element is s.
//
// If sep is empty, Split splits after each UTF-8 sequence. If both s
// and sep are empty, Split returns an empty slice.
//
// Each item that in result is trim space and not empty string
func splitTrim(s string, sep string) []string {
splitItems := strings.Split(s, sep)
trimItems := make([]string, 0, len(splitItems))
for _, item := range splitItems {
if item = strings.TrimSpace(item); item != "" {
trimItems = append(trimItems, item)
}
}
return trimItems
}
// LoadEnvFromFile loads additional envs if file provided and exists
// Envs in file should be in KEY=VALUE format
func LoadEnvFromFile(envFile string) error {
if envFile == "" {
return nil
}
file, err := os.Open(envFile)
if err != nil {
return err
}
defer file.Close()
envMap, err := ParseEnvFile(file)
if err != nil {
return err
}
for k, v := range envMap {
if err := os.Setenv(k, v); err != nil {
return err
}
}
return nil
}
// ParseEnvFile implements parse logic for environment files
func ParseEnvFile(envInput io.Reader) (map[string]string, error) {
envMap := make(map[string]string)
scanner := bufio.NewScanner(envInput)
var line string
lineNumber := 0
for scanner.Scan() {
line = strings.TrimSpace(scanner.Text())
lineNumber++
// skip lines starting with comment
if strings.HasPrefix(line, "#") {
continue
}
// skip empty line
if len(line) == 0 {
continue
}
fields := strings.SplitN(line, "=", 2)
if len(fields) != 2 {
return nil, fmt.Errorf("Can't parse line %d; line should be in KEY=VALUE format", lineNumber)
}
if strings.Contains(fields[0], " ") {
return nil, fmt.Errorf("Can't parse line %d; KEY contains whitespace", lineNumber)
}
key := fields[0]
val := fields[1]
if key == "" {
return nil, fmt.Errorf("Can't parse line %d; KEY can't be empty string", lineNumber)
}
envMap[key] = val
}
if err := scanner.Err(); err != nil {
return nil, err
}
return envMap, nil
}
const appName = "Caddy"
// Flags that control program flow or startup
var (
serverType string
conf string
cpu string
envFile string
fromJSON bool
logfile string
logRollMB int
logRollCompress bool
revoke string
toJSON bool
version bool
plugins bool
validate bool
disabledMetrics string
)
// Build information obtained with the help of -ldflags
var (
appVersion = "(untracked dev build)" // inferred at startup
devBuild = true // inferred at startup
buildDate string // date -u
gitTag string // git describe --exact-match HEAD 2> /dev/null
gitNearestTag string // git describe --abbrev=0 --tags HEAD
gitCommit string // git rev-parse HEAD
gitShortStat string // git diff-index --shortstat
gitFilesModified string // git diff-index --name-only HEAD
)
// EnableTelemetry defines whether telemetry is enabled in Run.
var EnableTelemetry = true
| 1 | 13,082 | Instead of setting this here, why not have the flag call in the init function set `caddy.LogDestination` directly, and switch on that? | caddyserver-caddy | go |
@@ -20,6 +20,7 @@
* External dependencies
*/
import { get } from 'lodash';
+import { __ } from '@wordpress/i18n';
/**
* Formats a number using the JS Internationalization Number Format API. | 1 | /**
* Internationalization Utilities.
*
* Site Kit by Google, Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* External dependencies
*/
import { get } from 'lodash';
/**
* Formats a number using the JS Internationalization Number Format API.
*
* @since 1.8.0
* @see {@link https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/NumberFormat/NumberFormat|`options` parameter} For all available formatting options.
*
* @param {number} number The number to format.
* @param {Object} [options] Formatting options.
* @param {string} [options.locale] Locale to use for formatting. Defaults to current locale used by Site Kit.
* @return {string} The formatted number.
*/
export const numberFormat = ( number, options = {} ) => {
const { locale = getLocale(), ...formatOptions } = options;
return new Intl.NumberFormat( locale, formatOptions ).format( number );
};
/**
* Gets the current locale for use with browser APIs.
*
* @since 1.8.0
*
* @param {Object} _global The global window object.
* @return {string} Current Site Kit locale if set, otherwise the current language set by the browser.
* E.g. `en-US` or `de-DE`
*/
export const getLocale = ( _global = global ) => {
const siteKitLocale = get( _global, [ '_googlesitekitLegacyData', 'locale' ] );
if ( siteKitLocale ) {
const matches = siteKitLocale.match( /^(\w{2})?(_)?(\w{2})/ );
if ( matches && matches[ 0 ] ) {
return matches[ 0 ].replace( /_/g, '-' );
}
}
return _global.navigator.language;
};
| 1 | 34,361 | This belongs under "WordPress dependencies". | google-site-kit-wp | js |
@@ -4,6 +4,8 @@ using Datadog.Trace.TestHelpers;
using Xunit;
using Xunit.Abstractions;
+#if !NET452
+
namespace Datadog.Trace.ClrProfiler.IntegrationTests
{
public class Elasticsearch5Tests : TestHelper | 1 | using System.Collections.Generic;
using System.Linq;
using Datadog.Trace.TestHelpers;
using Xunit;
using Xunit.Abstractions;
namespace Datadog.Trace.ClrProfiler.IntegrationTests
{
public class Elasticsearch5Tests : TestHelper
{
public Elasticsearch5Tests(ITestOutputHelper output)
: base("Elasticsearch.V5", output)
{
}
[Theory]
[MemberData(nameof(PackageVersions.ElasticSearch5), MemberType = typeof(PackageVersions))]
[Trait("Category", "EndToEnd")]
public void SubmitsTraces(string packageVersion)
{
int agentPort = TcpPortProvider.GetOpenPort();
using (var agent = new MockTracerAgent(agentPort))
using (var processResult = RunSampleAndWaitForExit(agent.Port, packageVersion: packageVersion))
{
Assert.True(processResult.ExitCode >= 0, $"Process exited with code {processResult.ExitCode}");
var expected = new List<string>();
// commands with sync and async
for (var i = 0; i < 2; i++)
{
expected.AddRange(new List<string>
{
"Bulk",
"Create",
"Search",
"DeleteByQuery",
"CreateIndex",
"IndexExists",
"UpdateIndexSettings",
"BulkAlias",
"GetAlias",
"PutAlias",
// "AliasExists",
"DeleteAlias",
"DeleteAlias",
"CreateIndex",
// "SplitIndex",
"DeleteIndex",
"CloseIndex",
"OpenIndex",
"PutIndexTemplate",
"IndexTemplateExists",
"DeleteIndexTemplate",
"IndicesShardStores",
"IndicesStats",
"DeleteIndex",
"GetAlias",
"CatAliases",
"CatAllocation",
"CatCount",
"CatFielddata",
"CatHealth",
"CatHelp",
"CatIndices",
"CatMaster",
"CatNodeAttributes",
"CatNodes",
"CatPendingTasks",
"CatPlugins",
"CatRecovery",
"CatRepositories",
"CatSegments",
"CatShards",
// "CatSnapshots",
"CatTasks",
"CatTemplates",
"CatThreadPool",
// "PutJob",
// "ValidateJob",
// "GetInfluencers",
// "GetJobs",
// "GetJobStats",
// "GetModelSnapshots",
// "GetOverallBuckets",
// "FlushJob",
// "ForecastJob",
// "GetAnomalyRecords",
// "GetBuckets",
// "GetCategories",
// "CloseJob",
// "OpenJob",
// "DeleteJob",
"ClusterAllocationExplain",
"ClusterGetSettings",
"ClusterHealth",
"ClusterPendingTasks",
"ClusterPutSettings",
"ClusterReroute",
"ClusterState",
"ClusterStats",
"PutRole",
// "PutRoleMapping",
"GetRole",
// "GetRoleMapping",
// "DeleteRoleMapping",
"DeleteRole",
"PutUser",
"ChangePassword",
"GetUser",
// "DisableUser",
"DeleteUser",
});
}
var spans = agent.WaitForSpans(expected.Count)
.Where(s => s.Type == "elasticsearch")
.OrderBy(s => s.Start)
.ToList();
foreach (var span in spans)
{
Assert.Equal("elasticsearch.query", span.Name);
Assert.Equal("Samples.Elasticsearch.V5-elasticsearch", span.Service);
Assert.Equal("elasticsearch", span.Type);
}
ValidateSpans(spans, (span) => span.Resource, expected);
}
}
}
}
| 1 | 14,966 | Was there an issue on `net452`? | DataDog-dd-trace-dotnet | .cs |
@@ -201,12 +201,17 @@ namespace Microsoft.Cci.Writers.CSharp
WriteOutParameterInitializations(method);
- if (_forCompilationThrowPlatformNotSupported)
+ if (_platformNotSupportedExceptionMessage != null)
{
Write("throw new ");
if (_forCompilationIncludeGlobalprefix)
Write("global::");
- Write("System.PlatformNotSupportedException(); ");
+ if(_platformNotSupportedExceptionMessage.Equals("Default"))
+ Write("System.PlatformNotSupportedException();");
+ else if(_platformNotSupportedExceptionMessage.StartsWith("SR."))
+ Write($"System.PlatformNotSupportedException({_platformNotSupportedExceptionMessage}); ");
+ else
+ Write($"System.PlatformNotSupportedException(\"{_platformNotSupportedExceptionMessage}\"); ");
}
else if (method.ContainingTypeDefinition.IsValueType && method.IsConstructor)
{ | 1 | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
using System.Collections.Generic;
using System.Diagnostics.Contracts;
using System.Linq;
using Microsoft.Cci.Extensions;
using Microsoft.Cci.Extensions.CSharp;
using Microsoft.Cci.Writers.Syntax;
namespace Microsoft.Cci.Writers.CSharp
{
public partial class CSDeclarationWriter
{
private void WriteMethodDefinition(IMethodDefinition method)
{
if (method.IsPropertyOrEventAccessor())
return;
if (method.IsDestructor())
{
WriteDestructor(method);
return;
}
string name = method.GetMethodName();
WriteMethodPseudoCustomAttributes(method);
WriteAttributes(method.Attributes);
WriteAttributes(method.SecurityAttributes);
if (!method.ContainingTypeDefinition.IsInterface)
{
if (!method.IsExplicitInterfaceMethod()) WriteVisibility(method.Visibility);
WriteMethodModifiers(method);
}
WriteInterfaceMethodModifiers(method);
WriteMethodDefinitionSignature(method, name);
WriteMethodBody(method);
}
private void WriteDestructor(IMethodDefinition method)
{
WriteSymbol("~");
WriteIdentifier(((INamedEntity)method.ContainingTypeDefinition).Name);
WriteSymbol("(");
WriteSymbol(")", false);
WriteEmptyBody();
}
private void WriteTypeName(ITypeReference type, ITypeReference containingType, bool isDynamic = false)
{
var useKeywords = containingType.GetTypeName() != type.GetTypeName();
WriteTypeName(type, isDynamic: isDynamic, useTypeKeywords: useKeywords);
}
private void WriteMethodDefinitionSignature(IMethodDefinition method, string name)
{
bool isOperator = method.IsConversionOperator();
if (!isOperator && !method.IsConstructor)
{
WriteAttributes(method.ReturnValueAttributes, true);
if (method.ReturnValueIsByRef)
WriteKeyword("ref");
// We are ignoring custom modifiers right now, we might need to add them later.
WriteTypeName(method.Type, method.ContainingType, isDynamic: IsDynamic(method.ReturnValueAttributes));
}
WriteIdentifier(name);
if (isOperator)
{
WriteSpace();
WriteTypeName(method.Type, method.ContainingType);
}
Contract.Assert(!(method is IGenericMethodInstance), "Currently don't support generic method instances");
if (method.IsGeneric)
WriteGenericParameters(method.GenericParameters);
WriteParameters(method.Parameters, method.ContainingType, extensionMethod: method.IsExtensionMethod(), acceptsExtraArguments: method.AcceptsExtraArguments);
if (method.IsGeneric && !method.IsOverride() && !method.IsExplicitInterfaceMethod())
WriteGenericContraints(method.GenericParameters);
}
private void WriteParameters(IEnumerable<IParameterDefinition> parameters, ITypeReference containingType, bool property = false, bool extensionMethod = false, bool acceptsExtraArguments = false)
{
string start = property ? "[" : "(";
string end = property ? "]" : ")";
WriteSymbol(start);
_writer.WriteList(parameters, p =>
{
WriteParameter(p, containingType, extensionMethod);
extensionMethod = false;
});
if (acceptsExtraArguments)
{
if (parameters.Any())
_writer.WriteSymbol(",");
_writer.WriteSpace();
_writer.Write("__arglist");
}
WriteSymbol(end);
}
private void WriteParameter(IParameterDefinition parameter, ITypeReference containingType, bool extensionMethod)
{
WriteAttributes(parameter.Attributes, true);
if (extensionMethod)
WriteKeyword("this");
if (parameter.IsParameterArray)
WriteKeyword("params");
if (parameter.IsOut && !parameter.IsIn && parameter.IsByReference)
{
WriteKeyword("out");
}
else
{
// For In/Out we should not emit them until we find a scenario that is needs thems.
//if (parameter.IsIn)
// WriteFakeAttribute("System.Runtime.InteropServices.In", writeInline: true);
//if (parameter.IsOut)
// WriteFakeAttribute("System.Runtime.InteropServices.Out", writeInline: true);
if (parameter.IsByReference)
WriteKeyword("ref");
}
WriteTypeName(parameter.Type, containingType, isDynamic: IsDynamic(parameter.Attributes));
WriteIdentifier(parameter.Name);
if (parameter.IsOptional && parameter.HasDefaultValue)
{
WriteSymbol("=");
WriteMetadataConstant(parameter.DefaultValue, parameter.Type);
}
}
private void WriteInterfaceMethodModifiers(IMethodDefinition method)
{
if (method.GetHiddenBaseMethod(_filter) != Dummy.Method)
WriteKeyword("new");
}
private void WriteMethodModifiers(IMethodDefinition method)
{
if (method.IsMethodUnsafe())
WriteKeyword("unsafe");
if (method.IsStatic)
WriteKeyword("static");
if (method.IsPlatformInvoke)
WriteKeyword("extern");
if (method.IsVirtual)
{
if (method.IsNewSlot)
{
if (method.IsAbstract)
WriteKeyword("abstract");
else if (!method.IsSealed) // non-virtual interfaces implementations are sealed virtual newslots
WriteKeyword("virtual");
}
else
{
if (method.IsAbstract)
WriteKeyword("abstract");
else if (method.IsSealed)
WriteKeyword("sealed");
WriteKeyword("override");
}
}
}
private void WriteMethodBody(IMethodDefinition method)
{
if (method.IsAbstract || !_forCompilation || method.IsPlatformInvoke)
{
WriteSymbol(";");
return;
}
if (method.IsConstructor)
WriteBaseConstructorCall(method.ContainingTypeDefinition);
// Write Dummy Body
WriteSpace();
WriteSymbol("{", true);
WriteOutParameterInitializations(method);
if (_forCompilationThrowPlatformNotSupported)
{
Write("throw new ");
if (_forCompilationIncludeGlobalprefix)
Write("global::");
Write("System.PlatformNotSupportedException(); ");
}
else if (method.ContainingTypeDefinition.IsValueType && method.IsConstructor)
{
// Structs cannot have empty constructors so we need to output this dummy body
Write("throw null;");
}
else if (!TypeHelper.TypesAreEquivalent(method.Type, method.ContainingTypeDefinition.PlatformType.SystemVoid))
{
WriteKeyword("throw null;");
}
WriteSymbol("}");
}
private void WritePrivateConstructor(ITypeDefinition type)
{
if (!_forCompilation ||
type.IsInterface ||
type.IsEnum ||
type.IsDelegate ||
type.IsValueType ||
type.IsStatic)
return;
WriteVisibility(TypeMemberVisibility.Assembly);
WriteIdentifier(((INamedEntity)type).Name);
WriteSymbol("(");
WriteSymbol(")");
WriteBaseConstructorCall(type);
WriteEmptyBody();
}
private void WriteOutParameterInitializations(IMethodDefinition method)
{
if (!_forCompilation)
return;
var outParams = method.Parameters.Where(p => p.IsOut);
foreach (var param in outParams)
{
WriteIdentifier(param.Name);
WriteSpace();
WriteSymbol("=", true);
WriteDefaultOf(param.Type);
WriteSymbol(";", true);
}
}
private void WriteBaseConstructorCall(ITypeDefinition type)
{
if (!_forCompilation)
return;
ITypeDefinition baseType = type.BaseClasses.FirstOrDefault().GetDefinitionOrNull();
if (baseType == null)
return;
var ctors = baseType.Methods.Where(m => m.IsConstructor && _filter.Include(m) && !m.Attributes.Any(a => a.IsObsoleteWithUsageTreatedAsCompilationError()));
var defaultCtor = ctors.Where(c => c.ParameterCount == 0);
// Don't need a base call if we have a default constructor
if (defaultCtor.Any())
return;
var ctor = ctors.FirstOrDefault();
if (ctor == null)
return;
WriteSpace();
WriteSymbol(":", true);
WriteKeyword("base");
WriteSymbol("(");
_writer.WriteList(ctor.Parameters, p => WriteDefaultOf(p.Type));
WriteSymbol(")");
}
private void WriteEmptyBody()
{
if (!_forCompilation)
{
WriteSymbol(";");
}
else
{
WriteSpace();
WriteSymbol("{", true);
WriteSymbol("}");
}
}
private void WriteDefaultOf(ITypeReference type)
{
WriteKeyword("default", true);
WriteSymbol("(");
WriteTypeName(type, true);
WriteSymbol(")");
}
public static IDefinition GetDummyConstructor(ITypeDefinition type)
{
return new DummyInternalConstructor() { ContainingType = type };
}
private class DummyInternalConstructor : IDefinition
{
public ITypeDefinition ContainingType { get; set; }
public IEnumerable<ICustomAttribute> Attributes
{
get { throw new System.NotImplementedException(); }
}
public void Dispatch(IMetadataVisitor visitor)
{
throw new System.NotImplementedException();
}
public IEnumerable<ILocation> Locations
{
get { throw new System.NotImplementedException(); }
}
public void DispatchAsReference(IMetadataVisitor visitor)
{
throw new System.NotImplementedException();
}
}
}
}
| 1 | 12,567 | Another option is to always write it without quotes. I always have to use SR. And why not? | dotnet-buildtools | .cs |
@@ -1,7 +1,7 @@
import PropTypes from 'prop-types'
+import deepmerge from 'deepmerge' // < 1kb payload overhead when lodash/merge is > 3kb.
-export const defaultBreakpoints = [40, 52, 64].map(n => n + 'em')
-
+// PropTypes
export const propType = PropTypes.oneOfType([
PropTypes.number,
PropTypes.string, | 1 | import PropTypes from 'prop-types'
export const defaultBreakpoints = [40, 52, 64].map(n => n + 'em')
export const propType = PropTypes.oneOfType([
PropTypes.number,
PropTypes.string,
PropTypes.array,
PropTypes.object,
])
export const cloneFunction = fn => (...args) => fn(...args)
export const get = (obj, ...paths) => {
const value = paths.reduce((a, path) => {
if (is(a)) return a
const keys = typeof path === 'string' ? path.split('.') : [path]
return keys.reduce((a, key) => (a && is(a[key]) ? a[key] : null), obj)
}, null)
return is(value) ? value : paths[paths.length - 1]
}
export const themeGet = (path, fallback = null) => props =>
get(props.theme, path, fallback)
export const is = n => n !== undefined && n !== null
export const isObject = n => typeof n === 'object' && n !== null
export const num = n => typeof n === 'number' && !isNaN(n)
export const px = n => (num(n) && n !== 0 ? n + 'px' : n)
export const createMediaQuery = n => `@media screen and (min-width: ${px(n)})`
const getValue = (n, scale) => get(scale, n)
// loosely based on deepmerge package
export const merge = (a, b) => {
const result = {}
for (const key in a) {
result[key] = a[key]
}
for (const key in b) {
if (!a[key] || typeof a[key] !== 'object') {
result[key] = b[key]
} else {
result[key] = merge(a[key], b[key])
}
}
return result
}
const mergeAll = (...args) => {
let result = {}
for (let i = 0; i < args.length; i++) {
result = merge(result, args[i])
}
return result
}
export const style = ({
prop,
cssProperty,
alias,
key,
transformValue = getValue,
scale: defaultScale = {},
}) => {
const property = cssProperty || prop
const func = props => {
const value = get(props, prop, alias, null)
if (!is(value)) return null
const scale = get(props.theme, key, defaultScale)
const createStyle = n =>
is(n)
? {
[property]: transformValue(n, scale),
}
: null
if (!isObject(value)) return createStyle(value)
const breakpoints = get(props.theme, 'breakpoints', defaultBreakpoints)
const styles = []
if (Array.isArray(value)) {
styles.push(createStyle(value[0]))
for (let i = 1; i < value.slice(0, breakpoints.length + 1).length; i++) {
const rule = createStyle(value[i])
if (rule) {
const media = createMediaQuery(breakpoints[i - 1])
styles.push({ [media]: rule })
}
}
} else {
for (let key in value) {
const breakpoint = breakpoints[key]
const media = createMediaQuery(breakpoint)
const rule = createStyle(value[key])
if (!breakpoint) {
styles.unshift(rule)
} else {
styles.push({ [media]: rule })
}
}
styles.sort()
}
return mergeAll(...styles)
}
func.propTypes = {
[prop]: cloneFunction(propType),
}
func.propTypes[prop].meta = {
prop,
themeKey: key,
}
if (alias) {
func.propTypes[alias] = cloneFunction(propType)
func.propTypes[alias].meta = {
prop: alias,
themeKey: key,
}
}
return func
}
export const compose = (...funcs) => {
const func = props => {
const n = funcs.map(fn => fn(props)).filter(Boolean)
return mergeAll(...n)
}
func.propTypes = {}
funcs.forEach(fn => {
func.propTypes = {
...func.propTypes,
...fn.propTypes,
}
})
return func
}
export const mapProps = mapper => func => {
const next = props => func(mapper(props))
for (const key in func) {
next[key] = func[key]
}
return next
}
export const variant = ({ key, prop = 'variant' }) => {
const fn = props => get(props.theme, [key, props[prop]].join('.'), null)
fn.propTypes = {
[prop]: PropTypes.oneOfType([PropTypes.number, PropTypes.string]),
}
return fn
}
// space
const spaceScale = [0, 4, 8, 16, 32, 64, 128, 256, 512]
const getSpace = (n, scale) => {
if (!num(n)) {
return px(get(scale, n, n))
}
const isNegative = n < 0
const absolute = Math.abs(n)
const value = get(scale, absolute)
if (!num(value)) {
return isNegative ? '-' + value : value
}
return px(value * (isNegative ? -1 : 1))
}
export const margin = style({
prop: 'margin',
alias: 'm',
key: 'space',
transformValue: getSpace,
scale: spaceScale,
})
export const marginTop = style({
prop: 'marginTop',
alias: 'mt',
key: 'space',
transformValue: getSpace,
scale: spaceScale,
})
export const marginBottom = style({
prop: 'marginBottom',
alias: 'mb',
key: 'space',
transformValue: getSpace,
scale: spaceScale,
})
export const marginLeft = style({
prop: 'marginLeft',
alias: 'ml',
key: 'space',
transformValue: getSpace,
scale: spaceScale,
})
export const marginRight = style({
prop: 'marginRight',
alias: 'mr',
key: 'space',
transformValue: getSpace,
scale: spaceScale,
})
export const padding = style({
prop: 'padding',
alias: 'p',
key: 'space',
transformValue: getSpace,
scale: spaceScale,
})
export const paddingTop = style({
prop: 'paddingTop',
alias: 'pt',
key: 'space',
transformValue: getSpace,
scale: spaceScale,
})
export const paddingBottom = style({
prop: 'paddingBottom',
alias: 'pb',
key: 'space',
transformValue: getSpace,
scale: spaceScale,
})
export const paddingLeft = style({
prop: 'paddingLeft',
alias: 'pl',
key: 'space',
transformValue: getSpace,
scale: spaceScale,
})
export const paddingRight = style({
prop: 'paddingRight',
alias: 'pr',
key: 'space',
transformValue: getSpace,
scale: spaceScale,
})
export const space = mapProps(props => ({
...props,
mt: is(props.my) ? props.my : props.mt,
mb: is(props.my) ? props.my : props.mb,
ml: is(props.mx) ? props.mx : props.ml,
mr: is(props.mx) ? props.mx : props.mr,
pt: is(props.py) ? props.py : props.pt,
pb: is(props.py) ? props.py : props.pb,
pl: is(props.px) ? props.px : props.pl,
pr: is(props.px) ? props.px : props.pr,
}))(
compose(
margin,
marginTop,
marginBottom,
marginLeft,
marginRight,
padding,
paddingTop,
paddingBottom,
paddingLeft,
paddingRight
)
)
// color
export const textColor = style({
prop: 'color',
key: 'colors',
})
export const backgroundColor = style({
prop: 'backgroundColor',
alias: 'bg',
key: 'colors',
})
export const color = compose(
textColor,
backgroundColor
)
// width
export const getWidth = (n, scale) => (!num(n) || n > 1 ? px(n) : n * 100 + '%')
export const width = style({
prop: 'width',
key: 'widths',
transformValue: getWidth,
})
// typography
export const getPx = (n, scale) => px(get(scale, n))
export const fontSize = style({
prop: 'fontSize',
key: 'fontSizes',
transformValue: getPx,
scale: [12, 14, 16, 20, 24, 32, 48, 64, 72],
})
export const fontFamily = style({
prop: 'fontFamily',
key: 'fonts',
})
export const fontWeight = style({
prop: 'fontWeight',
key: 'fontWeights',
})
export const lineHeight = style({
prop: 'lineHeight',
key: 'lineHeights',
})
export const textAlign = style({
prop: 'textAlign',
})
export const fontStyle = style({
prop: 'fontStyle',
})
export const letterSpacing = style({
prop: 'letterSpacing',
key: 'letterSpacings',
transformValue: getPx,
})
// layout
export const display = style({
prop: 'display',
})
export const maxWidth = style({
prop: 'maxWidth',
key: 'maxWidths',
transformValue: getPx,
})
export const minWidth = style({
prop: 'minWidth',
key: 'minWidths',
transformValue: getPx,
})
export const height = style({
prop: 'height',
key: 'heights',
transformValue: getPx,
})
export const maxHeight = style({
prop: 'maxHeight',
key: 'maxHeights',
transformValue: getPx,
})
export const minHeight = style({
prop: 'minHeight',
key: 'minHeights',
transformValue: getPx,
})
export const size = mapProps(props => ({
...props,
width: props.size,
height: props.size,
}))(
compose(
width,
height
)
)
export const verticalAlign = style({ prop: 'verticalAlign' })
// flexbox
export const alignItems = style({ prop: 'alignItems' })
export const alignContent = style({ prop: 'alignContent' })
export const justifyItems = style({ prop: 'justifyItems' })
export const justifyContent = style({ prop: 'justifyContent' })
export const flexWrap = style({ prop: 'flexWrap' })
export const flexBasis = style({ prop: 'flexBasis', transformValue: getWidth })
export const flexDirection = style({ prop: 'flexDirection' })
export const flex = style({ prop: 'flex' })
export const justifySelf = style({ prop: 'justifySelf' })
export const alignSelf = style({ prop: 'alignSelf' })
export const order = style({ prop: 'order' })
// grid
export const gridGap = style({
prop: 'gridGap',
key: 'space',
transformValue: getPx,
scale: spaceScale,
})
export const gridColumnGap = style({
prop: 'gridColumnGap',
key: 'space',
transformValue: getPx,
scale: spaceScale,
})
export const gridRowGap = style({
prop: 'gridRowGap',
key: 'space',
transformValue: getPx,
scale: spaceScale,
})
export const gridColumn = style({ prop: 'gridColumn' })
export const gridRow = style({ prop: 'gridRow' })
export const gridAutoFlow = style({ prop: 'gridAutoFlow' })
export const gridAutoColumns = style({ prop: 'gridAutoColumns' })
export const gridAutoRows = style({ prop: 'gridAutoRows' })
export const gridTemplateColumns = style({ prop: 'gridTemplateColumns' })
export const gridTemplateRows = style({ prop: 'gridTemplateRows' })
export const gridTemplateAreas = style({ prop: 'gridTemplateAreas' })
export const gridArea = style({ prop: 'gridArea' })
// borders
export const border = style({
prop: 'border',
key: 'borders',
})
export const borderWidth = style({
prop: 'borderWidth',
key: 'borderWidths',
transformValue: getPx,
})
export const borderStyle = style({
prop: 'borderStyle',
key: 'borderStyles',
})
export const borderColor = style({
prop: 'borderColor',
key: 'colors',
})
export const borderTop = style({
prop: 'borderTop',
key: 'borders',
})
export const borderRight = style({
prop: 'borderRight',
key: 'borders',
})
export const borderBottom = style({
prop: 'borderBottom',
key: 'borders',
})
export const borderLeft = style({
prop: 'borderLeft',
key: 'borders',
})
export const borderRadius = style({
prop: 'borderRadius',
key: 'radii',
transformValue: getPx,
})
export const borders = compose(
border,
borderTop,
borderRight,
borderBottom,
borderLeft,
borderWidth,
borderStyle,
borderColor,
borderRadius
)
export const boxShadow = style({
prop: 'boxShadow',
key: 'shadows',
})
export const opacity = style({ prop: 'opacity' })
export const overflow = style({ prop: 'overflow' })
// backgrounds
export const background = style({ prop: 'background' })
export const backgroundImage = style({ prop: 'backgroundImage' })
export const backgroundSize = style({ prop: 'backgroundSize' })
export const backgroundPosition = style({ prop: 'backgroundPosition' })
export const backgroundRepeat = style({ prop: 'backgroundRepeat' })
// position
export const position = style({ prop: 'position' })
export const zIndex = style({ prop: 'zIndex', key: 'zIndices' })
export const top = style({ prop: 'top', transformValue: getPx })
export const right = style({ prop: 'right', transformValue: getPx })
export const bottom = style({ prop: 'bottom', transformValue: getPx })
export const left = style({ prop: 'left', transformValue: getPx })
// variants
export const buttonStyle = variant({ key: 'buttons' })
export const textStyle = variant({ key: 'textStyles', prop: 'textStyle' })
export const colorStyle = variant({ key: 'colorStyles', prop: 'colors' })
| 1 | 5,050 | This should use the `merge` function added in #473 instead of introducing a new dependency | styled-system-styled-system | js |
@@ -14480,6 +14480,16 @@ bool CoreChecks::ValidateDescriptorUpdateTemplate(const char *func_name,
func_name, pd_set, report_data->FormatHandle(pCreateInfo->pipelineLayout).c_str());
}
}
+ } else if (VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET == pCreateInfo->templateType) {
+ for (const auto &binding : layout->GetBindings()) {
+ if (binding.descriptorType == VK_DESCRIPTOR_TYPE_MUTABLE_VALVE) {
+ skip |= LogError(
+ device, "VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-04615",
+ "%s: pCreateInfo->templateType is VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET, but "
+ "pCreateInfo->descriptorSetLayout contains a binding with descriptor type VK_DESCRIPTOR_TYPE_MUTABLE_VALVE.",
+ func_name);
+ }
+ }
}
for (uint32_t i = 0; i < pCreateInfo->descriptorUpdateEntryCount; ++i) {
const auto &descriptor_update = pCreateInfo->pDescriptorUpdateEntries[i]; | 1 | /* Copyright (c) 2015-2021 The Khronos Group Inc.
* Copyright (c) 2015-2021 Valve Corporation
* Copyright (c) 2015-2021 LunarG, Inc.
* Copyright (C) 2015-2021 Google Inc.
* Modifications Copyright (C) 2020-2021 Advanced Micro Devices, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Author: Cody Northrop <cnorthrop@google.com>
* Author: Michael Lentine <mlentine@google.com>
* Author: Tobin Ehlis <tobine@google.com>
* Author: Chia-I Wu <olv@google.com>
* Author: Chris Forbes <chrisf@ijw.co.nz>
* Author: Mark Lobodzinski <mark@lunarg.com>
* Author: Ian Elliott <ianelliott@google.com>
* Author: Dave Houlton <daveh@lunarg.com>
* Author: Dustin Graves <dustin@lunarg.com>
* Author: Jeremy Hayes <jeremy@lunarg.com>
* Author: Jon Ashburn <jon@lunarg.com>
* Author: Karl Schultz <karl@lunarg.com>
* Author: Mark Young <marky@lunarg.com>
* Author: Mike Schuchardt <mikes@lunarg.com>
* Author: Mike Weiblen <mikew@lunarg.com>
* Author: Tony Barbour <tony@LunarG.com>
* Author: John Zulauf <jzulauf@lunarg.com>
* Author: Shannon McPherson <shannon@lunarg.com>
* Author: Jeremy Kniager <jeremyk@lunarg.com>
* Author: Tobias Hector <tobias.hector@amd.com>
* Author: Jeremy Gebben <jeremyg@lunarg.com>
*/
#include <algorithm>
#include <array>
#include <assert.h>
#include <cmath>
#include <fstream>
#include <iostream>
#include <list>
#include <map>
#include <memory>
#include <mutex>
#include <set>
#include <sstream>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <string>
#include <valarray>
#if defined(__linux__) || defined(__FreeBSD__)
#include <unistd.h>
#include <sys/types.h>
#endif
#include "vk_loader_platform.h"
#include "vk_enum_string_helper.h"
#include "chassis.h"
#include "convert_to_renderpass2.h"
#include "core_validation.h"
#include "buffer_validation.h"
#include "shader_validation.h"
#include "vk_layer_utils.h"
#include "sync_utils.h"
#include "sync_vuid_maps.h"
// these templates are defined in buffer_validation.cpp so we need to pull in the explicit instantiations from there
extern template void CoreChecks::TransitionImageLayouts(CMD_BUFFER_STATE *cb_state, uint32_t barrier_count,
const VkImageMemoryBarrier *barrier);
extern template void CoreChecks::TransitionImageLayouts(CMD_BUFFER_STATE *cb_state, uint32_t barrier_count,
const VkImageMemoryBarrier2KHR *barrier);
extern template bool CoreChecks::ValidateImageBarrierAttachment(const Location &loc, CMD_BUFFER_STATE const *cb_state,
const FRAMEBUFFER_STATE *framebuffer, uint32_t active_subpass,
const safe_VkSubpassDescription2 &sub_desc,
const VkRenderPass rp_handle,
const VkImageMemoryBarrier &img_barrier,
const CMD_BUFFER_STATE *primary_cb_state) const;
extern template bool CoreChecks::ValidateImageBarrierAttachment(const Location &loc, CMD_BUFFER_STATE const *cb_state,
const FRAMEBUFFER_STATE *framebuffer, uint32_t active_subpass,
const safe_VkSubpassDescription2 &sub_desc,
const VkRenderPass rp_handle,
const VkImageMemoryBarrier2KHR &img_barrier,
const CMD_BUFFER_STATE *primary_cb_state) const;
using std::max;
using std::string;
using std::stringstream;
using std::unique_ptr;
using std::vector;
void CoreChecks::AddInitialLayoutintoImageLayoutMap(const IMAGE_STATE &image_state, GlobalImageLayoutMap &image_layout_map) {
auto *range_map = GetLayoutRangeMap(image_layout_map, image_state);
auto range_gen = subresource_adapter::RangeGenerator(image_state.subresource_encoder);
for (; range_gen->non_empty(); ++range_gen) {
range_map->insert(range_map->end(), std::make_pair(*range_gen, image_state.createInfo.initialLayout));
}
}
// Override base class, we have some extra work to do here
void CoreChecks::InitDeviceValidationObject(bool add_obj, ValidationObject *inst_obj, ValidationObject *dev_obj) {
if (add_obj) {
ValidationStateTracker::InitDeviceValidationObject(add_obj, inst_obj, dev_obj);
}
}
// For given mem object, verify that it is not null or UNBOUND, if it is, report error. Return skip value.
template <typename T1>
bool CoreChecks::VerifyBoundMemoryIsValid(const DEVICE_MEMORY_STATE *mem_state, const T1 object,
const VulkanTypedHandle &typed_handle, const char *api_name,
const char *error_code) const {
return VerifyBoundMemoryIsValid<T1, SimpleErrorLocation>(mem_state, object, typed_handle, {api_name, error_code});
}
template <typename T1, typename LocType>
bool CoreChecks::VerifyBoundMemoryIsValid(const DEVICE_MEMORY_STATE *mem_state, const T1 object,
const VulkanTypedHandle &typed_handle, const LocType &location) const {
bool result = false;
auto type_name = object_string[typed_handle.type];
if (!mem_state) {
result |= LogError(object, location.Vuid(),
"%s: %s used with no memory bound. Memory should be bound by calling vkBind%sMemory().",
location.FuncName(), report_data->FormatHandle(typed_handle).c_str(), type_name + 2);
} else if (mem_state->Destroyed()) {
result |= LogError(object, location.Vuid(),
"%s: %s used with no memory bound and previously bound memory was freed. Memory must not be freed "
"prior to this operation.",
location.FuncName(), report_data->FormatHandle(typed_handle).c_str());
}
return result;
}
// Check to see if memory was ever bound to this image
bool CoreChecks::ValidateMemoryIsBoundToImage(const IMAGE_STATE *image_state, const Location &loc) const {
using LocationAdapter = core_error::LocationVuidAdapter<sync_vuid_maps::GetImageBarrierVUIDFunctor>;
return ValidateMemoryIsBoundToImage<LocationAdapter>(image_state, LocationAdapter(loc, sync_vuid_maps::ImageError::kNoMemory));
}
bool CoreChecks::ValidateMemoryIsBoundToImage(const IMAGE_STATE *image_state, const char *api_name, const char *error_code) const {
return ValidateMemoryIsBoundToImage<SimpleErrorLocation>(image_state, SimpleErrorLocation(api_name, error_code));
}
template <typename LocType>
bool CoreChecks::ValidateMemoryIsBoundToImage(const IMAGE_STATE *image_state, const LocType &location) const {
bool result = false;
if (image_state->create_from_swapchain != VK_NULL_HANDLE) {
if (!image_state->bind_swapchain) {
LogObjectList objlist(image_state->image());
objlist.add(image_state->create_from_swapchain);
result |= LogError(
objlist, location.Vuid(),
"%s: %s is created by %s, and the image should be bound by calling vkBindImageMemory2(), and the pNext chain "
"includes VkBindImageMemorySwapchainInfoKHR.",
location.FuncName(), report_data->FormatHandle(image_state->image()).c_str(),
report_data->FormatHandle(image_state->create_from_swapchain).c_str());
} else if (image_state->create_from_swapchain != image_state->bind_swapchain->swapchain()) {
LogObjectList objlist(image_state->image());
objlist.add(image_state->create_from_swapchain);
objlist.add(image_state->bind_swapchain->Handle());
result |=
LogError(objlist, location.Vuid(),
"%s: %s is created by %s, but the image is bound by %s. The image should be created and bound by the same "
"swapchain",
location.FuncName(), report_data->FormatHandle(image_state->image()).c_str(),
report_data->FormatHandle(image_state->create_from_swapchain).c_str(),
report_data->FormatHandle(image_state->bind_swapchain->Handle()).c_str());
}
} else if (image_state->IsExternalAHB()) {
// TODO look into how to properly check for a valid bound memory for an external AHB
} else if (0 == (static_cast<uint32_t>(image_state->createInfo.flags) & VK_IMAGE_CREATE_SPARSE_BINDING_BIT)) {
result |= VerifyBoundMemoryIsValid(image_state->MemState(), image_state->image(), image_state->Handle(), location);
}
return result;
}
// Check to see if memory was bound to this buffer
bool CoreChecks::ValidateMemoryIsBoundToBuffer(const BUFFER_STATE *buffer_state, const char *api_name,
const char *error_code) const {
bool result = false;
if (0 == (static_cast<uint32_t>(buffer_state->createInfo.flags) & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)) {
result |= VerifyBoundMemoryIsValid(buffer_state->MemState(), buffer_state->buffer(), buffer_state->Handle(), api_name,
error_code);
}
return result;
}
// Check to see if memory was bound to this acceleration structure
bool CoreChecks::ValidateMemoryIsBoundToAccelerationStructure(const ACCELERATION_STRUCTURE_STATE *as_state, const char *api_name,
const char *error_code) const {
return VerifyBoundMemoryIsValid(as_state->MemState(), as_state->acceleration_structure(), as_state->Handle(), api_name,
error_code);
}
// Check to see if memory was bound to this acceleration structure
bool CoreChecks::ValidateMemoryIsBoundToAccelerationStructure(const ACCELERATION_STRUCTURE_STATE_KHR *as_state,
const char *api_name, const char *error_code) const {
return VerifyBoundMemoryIsValid(as_state->MemState(), as_state->acceleration_structure(), as_state->Handle(), api_name,
error_code);
}
// Valid usage checks for a call to SetMemBinding().
// For NULL mem case, output warning
// Make sure given object is in global object map
// IF a previous binding existed, output validation error
// Otherwise, add reference from objectInfo to memoryInfo
// Add reference off of objInfo
// TODO: We may need to refactor or pass in multiple valid usage statements to handle multiple valid usage conditions.
bool CoreChecks::ValidateSetMemBinding(VkDeviceMemory mem, const VulkanTypedHandle &typed_handle, const char *apiName) const {
bool skip = false;
// It's an error to bind an object to NULL memory
if (mem != VK_NULL_HANDLE) {
const BINDABLE *mem_binding = ValidationStateTracker::GetObjectMemBinding(typed_handle);
assert(mem_binding);
if (mem_binding->sparse) {
const char *error_code = nullptr;
const char *handle_type = nullptr;
if (typed_handle.type == kVulkanObjectTypeBuffer) {
handle_type = "BUFFER";
if (strcmp(apiName, "vkBindBufferMemory()") == 0) {
error_code = "VUID-vkBindBufferMemory-buffer-01030";
} else {
error_code = "VUID-VkBindBufferMemoryInfo-buffer-01030";
}
} else if (typed_handle.type == kVulkanObjectTypeImage) {
handle_type = "IMAGE";
if (strcmp(apiName, "vkBindImageMemory()") == 0) {
error_code = "VUID-vkBindImageMemory-image-01045";
} else {
error_code = "VUID-VkBindImageMemoryInfo-image-01045";
}
} else {
// Unsupported object type
assert(false);
}
LogObjectList objlist(mem);
objlist.add(typed_handle);
skip |= LogError(objlist, error_code,
"In %s, attempting to bind %s to %s which was created with sparse memory flags "
"(VK_%s_CREATE_SPARSE_*_BIT).",
apiName, report_data->FormatHandle(mem).c_str(), report_data->FormatHandle(typed_handle).c_str(),
handle_type);
}
const DEVICE_MEMORY_STATE *mem_info = ValidationStateTracker::GetDevMemState(mem);
if (mem_info) {
const DEVICE_MEMORY_STATE *prev_binding = mem_binding->MemState();
if (prev_binding) {
if (!prev_binding->Destroyed()) {
const char *error_code = nullptr;
if (typed_handle.type == kVulkanObjectTypeBuffer) {
if (strcmp(apiName, "vkBindBufferMemory()") == 0) {
error_code = "VUID-vkBindBufferMemory-buffer-01029";
} else {
error_code = "VUID-VkBindBufferMemoryInfo-buffer-01029";
}
} else if (typed_handle.type == kVulkanObjectTypeImage) {
if (strcmp(apiName, "vkBindImageMemory()") == 0) {
error_code = "VUID-vkBindImageMemory-image-01044";
} else {
error_code = "VUID-VkBindImageMemoryInfo-image-01044";
}
} else {
// Unsupported object type
assert(false);
}
LogObjectList objlist(mem);
objlist.add(typed_handle);
objlist.add(prev_binding->mem());
skip |=
LogError(objlist, error_code, "In %s, attempting to bind %s to %s which has already been bound to %s.",
apiName, report_data->FormatHandle(mem).c_str(), report_data->FormatHandle(typed_handle).c_str(),
report_data->FormatHandle(prev_binding->mem()).c_str());
} else {
LogObjectList objlist(mem);
objlist.add(typed_handle);
skip |=
LogError(objlist, kVUID_Core_MemTrack_RebindObject,
"In %s, attempting to bind %s to %s which was previous bound to memory that has "
"since been freed. Memory bindings are immutable in "
"Vulkan so this attempt to bind to new memory is not allowed.",
apiName, report_data->FormatHandle(mem).c_str(), report_data->FormatHandle(typed_handle).c_str());
}
}
}
}
return skip;
}
bool CoreChecks::ValidateDeviceQueueFamily(uint32_t queue_family, const char *cmd_name, const char *parameter_name,
const char *error_code, bool optional = false) const {
bool skip = false;
if (!optional && queue_family == VK_QUEUE_FAMILY_IGNORED) {
skip |= LogError(device, error_code,
"%s: %s is VK_QUEUE_FAMILY_IGNORED, but it is required to provide a valid queue family index value.",
cmd_name, parameter_name);
} else if (queue_family_index_set.find(queue_family) == queue_family_index_set.end()) {
skip |=
LogError(device, error_code,
"%s: %s (= %" PRIu32
") is not one of the queue families given via VkDeviceQueueCreateInfo structures when the device was created.",
cmd_name, parameter_name, queue_family);
}
return skip;
}
// Validate the specified queue families against the families supported by the physical device that owns this device
bool CoreChecks::ValidatePhysicalDeviceQueueFamilies(uint32_t queue_family_count, const uint32_t *queue_families,
const char *cmd_name, const char *array_parameter_name,
const char *vuid) const {
bool skip = false;
if (queue_families) {
layer_data::unordered_set<uint32_t> set;
for (uint32_t i = 0; i < queue_family_count; ++i) {
std::string parameter_name = std::string(array_parameter_name) + "[" + std::to_string(i) + "]";
if (set.count(queue_families[i])) {
skip |= LogError(device, vuid, "%s: %s (=%" PRIu32 ") is not unique within %s array.", cmd_name,
parameter_name.c_str(), queue_families[i], array_parameter_name);
} else {
set.insert(queue_families[i]);
if (queue_families[i] == VK_QUEUE_FAMILY_IGNORED) {
skip |= LogError(
device, vuid,
"%s: %s is VK_QUEUE_FAMILY_IGNORED, but it is required to provide a valid queue family index value.",
cmd_name, parameter_name.c_str());
} else if (queue_families[i] >= physical_device_state->queue_family_known_count) {
LogObjectList obj_list(physical_device);
obj_list.add(device);
skip |=
LogError(obj_list, vuid,
"%s: %s (= %" PRIu32
") is not one of the queue families supported by the parent PhysicalDevice %s of this device %s.",
cmd_name, parameter_name.c_str(), queue_families[i],
report_data->FormatHandle(physical_device).c_str(), report_data->FormatHandle(device).c_str());
}
}
}
}
return skip;
}
// Check object status for selected flag state
bool CoreChecks::ValidateStatus(const CMD_BUFFER_STATE *pNode, CBStatusFlags status_mask, const char *fail_msg,
const char *msg_code) const {
if (!(pNode->status & status_mask)) {
return LogError(pNode->commandBuffer(), msg_code, "%s: %s.", report_data->FormatHandle(pNode->commandBuffer()).c_str(),
fail_msg);
}
return false;
}
// Return true if for a given PSO, the given state enum is dynamic, else return false
bool CoreChecks::IsDynamic(const PIPELINE_STATE *pPipeline, const VkDynamicState state) const {
if (pPipeline && (pPipeline->GetPipelineType() == VK_PIPELINE_BIND_POINT_GRAPHICS) &&
pPipeline->create_info.graphics.pDynamicState) {
for (uint32_t i = 0; i < pPipeline->create_info.graphics.pDynamicState->dynamicStateCount; i++) {
if (state == pPipeline->create_info.graphics.pDynamicState->pDynamicStates[i]) return true;
}
}
return false;
}
// Validate state stored as flags at time of draw call
bool CoreChecks::ValidateDrawStateFlags(const CMD_BUFFER_STATE *pCB, const PIPELINE_STATE *pPipe, bool indexed,
const char *msg_code) const {
bool result = false;
if (pPipe->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_LINE_LIST ||
pPipe->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP) {
result |=
ValidateStatus(pCB, CBSTATUS_LINE_WIDTH_SET, "Dynamic line width state not set for this command buffer", msg_code);
}
const auto &create_info = pPipe->create_info.graphics;
if (create_info.pRasterizationState && (create_info.pRasterizationState->depthBiasEnable == VK_TRUE)) {
result |=
ValidateStatus(pCB, CBSTATUS_DEPTH_BIAS_SET, "Dynamic depth bias state not set for this command buffer", msg_code);
}
if (pPipe->blend_constants_enabled) {
result |= ValidateStatus(pCB, CBSTATUS_BLEND_CONSTANTS_SET, "Dynamic blend constants state not set for this command buffer",
msg_code);
}
if (create_info.pDepthStencilState && (create_info.pDepthStencilState->depthBoundsTestEnable == VK_TRUE)) {
result |=
ValidateStatus(pCB, CBSTATUS_DEPTH_BOUNDS_SET, "Dynamic depth bounds state not set for this command buffer", msg_code);
}
if (create_info.pDepthStencilState && (create_info.pDepthStencilState->stencilTestEnable == VK_TRUE)) {
result |= ValidateStatus(pCB, CBSTATUS_STENCIL_READ_MASK_SET,
"Dynamic stencil read mask state not set for this command buffer", msg_code);
result |= ValidateStatus(pCB, CBSTATUS_STENCIL_WRITE_MASK_SET,
"Dynamic stencil write mask state not set for this command buffer", msg_code);
result |= ValidateStatus(pCB, CBSTATUS_STENCIL_REFERENCE_SET,
"Dynamic stencil reference state not set for this command buffer", msg_code);
}
if (indexed) {
result |= ValidateStatus(pCB, CBSTATUS_INDEX_BUFFER_BOUND,
"Index buffer object not bound to this command buffer when Indexed Draw attempted", msg_code);
}
if (pPipe->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_LINE_LIST ||
pPipe->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP) {
const auto *line_state =
LvlFindInChain<VkPipelineRasterizationLineStateCreateInfoEXT>(create_info.pRasterizationState->pNext);
if (line_state && line_state->stippledLineEnable) {
result |= ValidateStatus(pCB, CBSTATUS_LINE_STIPPLE_SET, "Dynamic line stipple state not set for this command buffer",
msg_code);
}
}
return result;
}
bool CoreChecks::LogInvalidAttachmentMessage(const char *type1_string, const RENDER_PASS_STATE *rp1_state, const char *type2_string,
const RENDER_PASS_STATE *rp2_state, uint32_t primary_attach, uint32_t secondary_attach,
const char *msg, const char *caller, const char *error_code) const {
LogObjectList objlist(rp1_state->renderPass());
objlist.add(rp2_state->renderPass());
return LogError(objlist, error_code,
"%s: RenderPasses incompatible between %s w/ %s and %s w/ %s Attachment %u is not "
"compatible with %u: %s.",
caller, type1_string, report_data->FormatHandle(rp1_state->renderPass()).c_str(), type2_string,
report_data->FormatHandle(rp2_state->renderPass()).c_str(), primary_attach, secondary_attach, msg);
}
bool CoreChecks::ValidateAttachmentCompatibility(const char *type1_string, const RENDER_PASS_STATE *rp1_state,
const char *type2_string, const RENDER_PASS_STATE *rp2_state,
uint32_t primary_attach, uint32_t secondary_attach, const char *caller,
const char *error_code) const {
bool skip = false;
const auto &primary_pass_ci = rp1_state->createInfo;
const auto &secondary_pass_ci = rp2_state->createInfo;
if (primary_pass_ci.attachmentCount <= primary_attach) {
primary_attach = VK_ATTACHMENT_UNUSED;
}
if (secondary_pass_ci.attachmentCount <= secondary_attach) {
secondary_attach = VK_ATTACHMENT_UNUSED;
}
if (primary_attach == VK_ATTACHMENT_UNUSED && secondary_attach == VK_ATTACHMENT_UNUSED) {
return skip;
}
if (primary_attach == VK_ATTACHMENT_UNUSED) {
skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach,
"The first is unused while the second is not.", caller, error_code);
return skip;
}
if (secondary_attach == VK_ATTACHMENT_UNUSED) {
skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach,
"The second is unused while the first is not.", caller, error_code);
return skip;
}
if (primary_pass_ci.pAttachments[primary_attach].format != secondary_pass_ci.pAttachments[secondary_attach].format) {
skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach,
"They have different formats.", caller, error_code);
}
if (primary_pass_ci.pAttachments[primary_attach].samples != secondary_pass_ci.pAttachments[secondary_attach].samples) {
skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach,
"They have different samples.", caller, error_code);
}
if (primary_pass_ci.pAttachments[primary_attach].flags != secondary_pass_ci.pAttachments[secondary_attach].flags) {
skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach,
"They have different flags.", caller, error_code);
}
return skip;
}
bool CoreChecks::ValidateSubpassCompatibility(const char *type1_string, const RENDER_PASS_STATE *rp1_state,
const char *type2_string, const RENDER_PASS_STATE *rp2_state, const int subpass,
const char *caller, const char *error_code) const {
bool skip = false;
const auto &primary_desc = rp1_state->createInfo.pSubpasses[subpass];
const auto &secondary_desc = rp2_state->createInfo.pSubpasses[subpass];
uint32_t max_input_attachment_count = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount);
for (uint32_t i = 0; i < max_input_attachment_count; ++i) {
uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED;
if (i < primary_desc.inputAttachmentCount) {
primary_input_attach = primary_desc.pInputAttachments[i].attachment;
}
if (i < secondary_desc.inputAttachmentCount) {
secondary_input_attach = secondary_desc.pInputAttachments[i].attachment;
}
skip |= ValidateAttachmentCompatibility(type1_string, rp1_state, type2_string, rp2_state, primary_input_attach,
secondary_input_attach, caller, error_code);
}
uint32_t max_color_attachment_count = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount);
for (uint32_t i = 0; i < max_color_attachment_count; ++i) {
uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED;
if (i < primary_desc.colorAttachmentCount) {
primary_color_attach = primary_desc.pColorAttachments[i].attachment;
}
if (i < secondary_desc.colorAttachmentCount) {
secondary_color_attach = secondary_desc.pColorAttachments[i].attachment;
}
skip |= ValidateAttachmentCompatibility(type1_string, rp1_state, type2_string, rp2_state, primary_color_attach,
secondary_color_attach, caller, error_code);
if (rp1_state->createInfo.subpassCount > 1) {
uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED;
if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) {
primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment;
}
if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) {
secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment;
}
skip |= ValidateAttachmentCompatibility(type1_string, rp1_state, type2_string, rp2_state, primary_resolve_attach,
secondary_resolve_attach, caller, error_code);
}
}
uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED;
if (primary_desc.pDepthStencilAttachment) {
primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment;
}
if (secondary_desc.pDepthStencilAttachment) {
secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment;
}
skip |= ValidateAttachmentCompatibility(type1_string, rp1_state, type2_string, rp2_state, primary_depthstencil_attach,
secondary_depthstencil_attach, caller, error_code);
// Both renderpasses must agree on Multiview usage
if (primary_desc.viewMask && secondary_desc.viewMask) {
if (primary_desc.viewMask != secondary_desc.viewMask) {
std::stringstream ss;
ss << "For subpass " << subpass << ", they have a different viewMask. The first has view mask " << primary_desc.viewMask
<< " while the second has view mask " << secondary_desc.viewMask << ".";
skip |= LogInvalidPnextMessage(type1_string, rp1_state, type2_string, rp2_state, ss.str().c_str(), caller, error_code);
}
} else if (primary_desc.viewMask) {
skip |= LogInvalidPnextMessage(type1_string, rp1_state, type2_string, rp2_state,
"The first uses Multiview (has non-zero viewMasks) while the second one does not.", caller,
error_code);
} else if (secondary_desc.viewMask) {
skip |= LogInvalidPnextMessage(type1_string, rp1_state, type2_string, rp2_state,
"The second uses Multiview (has non-zero viewMasks) while the first one does not.", caller,
error_code);
}
return skip;
}
bool CoreChecks::LogInvalidPnextMessage(const char *type1_string, const RENDER_PASS_STATE *rp1_state, const char *type2_string,
const RENDER_PASS_STATE *rp2_state, const char *msg, const char *caller,
const char *error_code) const {
LogObjectList objlist(rp1_state->renderPass());
objlist.add(rp2_state->renderPass());
return LogError(objlist, error_code, "%s: RenderPasses incompatible between %s w/ %s and %s w/ %s: %s", caller, type1_string,
report_data->FormatHandle(rp1_state->renderPass()).c_str(), type2_string,
report_data->FormatHandle(rp2_state->renderPass()).c_str(), msg);
}
// Verify that given renderPass CreateInfo for primary and secondary command buffers are compatible.
// This function deals directly with the CreateInfo, there are overloaded versions below that can take the renderPass handle and
// will then feed into this function
bool CoreChecks::ValidateRenderPassCompatibility(const char *type1_string, const RENDER_PASS_STATE *rp1_state,
const char *type2_string, const RENDER_PASS_STATE *rp2_state, const char *caller,
const char *error_code) const {
bool skip = false;
// createInfo flags must be identical for the renderpasses to be compatible.
if (rp1_state->createInfo.flags != rp2_state->createInfo.flags) {
LogObjectList objlist(rp1_state->renderPass());
objlist.add(rp2_state->renderPass());
skip |=
LogError(objlist, error_code,
"%s: RenderPasses incompatible between %s w/ %s with flags of %u and %s w/ "
"%s with a flags of %u.",
caller, type1_string, report_data->FormatHandle(rp1_state->renderPass()).c_str(), rp1_state->createInfo.flags,
type2_string, report_data->FormatHandle(rp2_state->renderPass()).c_str(), rp2_state->createInfo.flags);
}
if (rp1_state->createInfo.subpassCount != rp2_state->createInfo.subpassCount) {
LogObjectList objlist(rp1_state->renderPass());
objlist.add(rp2_state->renderPass());
skip |= LogError(objlist, error_code,
"%s: RenderPasses incompatible between %s w/ %s with a subpassCount of %u and %s w/ "
"%s with a subpassCount of %u.",
caller, type1_string, report_data->FormatHandle(rp1_state->renderPass()).c_str(),
rp1_state->createInfo.subpassCount, type2_string, report_data->FormatHandle(rp2_state->renderPass()).c_str(),
rp2_state->createInfo.subpassCount);
} else {
for (uint32_t i = 0; i < rp1_state->createInfo.subpassCount; ++i) {
skip |= ValidateSubpassCompatibility(type1_string, rp1_state, type2_string, rp2_state, i, caller, error_code);
}
}
// Find an entry of the Fragment Density Map type in the pNext chain, if it exists
const auto fdm1 = LvlFindInChain<VkRenderPassFragmentDensityMapCreateInfoEXT>(rp1_state->createInfo.pNext);
const auto fdm2 = LvlFindInChain<VkRenderPassFragmentDensityMapCreateInfoEXT>(rp2_state->createInfo.pNext);
// Both renderpasses must agree on usage of a Fragment Density Map type
if (fdm1 && fdm2) {
uint32_t primary_input_attach = fdm1->fragmentDensityMapAttachment.attachment;
uint32_t secondary_input_attach = fdm2->fragmentDensityMapAttachment.attachment;
skip |= ValidateAttachmentCompatibility(type1_string, rp1_state, type2_string, rp2_state, primary_input_attach,
secondary_input_attach, caller, error_code);
} else if (fdm1) {
skip |= LogInvalidPnextMessage(type1_string, rp1_state, type2_string, rp2_state,
"The first uses a Fragment Density Map while the second one does not.", caller, error_code);
} else if (fdm2) {
skip |= LogInvalidPnextMessage(type1_string, rp1_state, type2_string, rp2_state,
"The second uses a Fragment Density Map while the first one does not.", caller, error_code);
}
return skip;
}
// For given pipeline, return number of MSAA samples, or one if MSAA disabled
static VkSampleCountFlagBits GetNumSamples(PIPELINE_STATE const *pipe) {
if (pipe->create_info.graphics.pMultisampleState != NULL &&
VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pipe->create_info.graphics.pMultisampleState->sType) {
return pipe->create_info.graphics.pMultisampleState->rasterizationSamples;
}
return VK_SAMPLE_COUNT_1_BIT;
}
static void ListBits(std::ostream &s, uint32_t bits) {
for (int i = 0; i < 32 && bits; i++) {
if (bits & (1 << i)) {
s << i;
bits &= ~(1 << i);
if (bits) {
s << ",";
}
}
}
}
std::string DynamicStateString(CBStatusFlags input_value) {
std::string ret;
int index = 0;
while (input_value) {
if (input_value & 1) {
if (!ret.empty()) ret.append("|");
ret.append(string_VkDynamicState(ConvertToDynamicState(static_cast<CBStatusFlagBits>(1llu << index))));
}
++index;
input_value >>= 1;
}
if (ret.empty()) ret.append(string_VkDynamicState(ConvertToDynamicState(static_cast<CBStatusFlagBits>(0))));
return ret;
}
// Validate draw-time state related to the PSO
bool CoreChecks::ValidatePipelineDrawtimeState(const LAST_BOUND_STATE &state, const CMD_BUFFER_STATE *pCB, CMD_TYPE cmd_type,
const PIPELINE_STATE *pPipeline) const {
bool skip = false;
const auto ¤t_vtx_bfr_binding_info = pCB->current_vertex_buffer_binding_info.vertex_buffer_bindings;
const DrawDispatchVuid vuid = GetDrawDispatchVuid(cmd_type);
const char *caller = CommandTypeString(cmd_type);
// Verify vertex & index buffer for unprotected command buffer.
// Because vertex & index buffer is read only, it doesn't need to care protected command buffer case.
if (enabled_features.core11.protectedMemory == VK_TRUE) {
for (const auto &buffer_binding : current_vtx_bfr_binding_info) {
if (buffer_binding.buffer_state && !buffer_binding.buffer_state->Destroyed()) {
skip |= ValidateProtectedBuffer(pCB, buffer_binding.buffer_state.get(), caller, vuid.unprotected_command_buffer,
"Buffer is vertex buffer");
}
}
if (pCB->index_buffer_binding.buffer_state && !pCB->index_buffer_binding.buffer_state->Destroyed()) {
skip |= ValidateProtectedBuffer(pCB, pCB->index_buffer_binding.buffer_state.get(), caller,
vuid.unprotected_command_buffer, "Buffer is index buffer");
}
}
// Verify if using dynamic state setting commands that it doesn't set up in pipeline
CBStatusFlags invalid_status = CBSTATUS_ALL_STATE_SET & ~(pCB->dynamic_status | pCB->static_status);
if (invalid_status) {
std::string dynamic_states = DynamicStateString(invalid_status);
LogObjectList objlist(pCB->commandBuffer());
objlist.add(pPipeline->pipeline());
skip |= LogError(objlist, vuid.dynamic_state_setting_commands,
"%s: %s doesn't set up %s, but it calls the related dynamic state setting commands", caller,
report_data->FormatHandle(state.pipeline_state->pipeline()).c_str(), dynamic_states.c_str());
}
// Verify vertex binding
if (pPipeline->vertex_binding_descriptions_.size() > 0) {
for (size_t i = 0; i < pPipeline->vertex_binding_descriptions_.size(); i++) {
const auto vertex_binding = pPipeline->vertex_binding_descriptions_[i].binding;
if (current_vtx_bfr_binding_info.size() < (vertex_binding + 1)) {
skip |= LogError(pCB->commandBuffer(), vuid.vertex_binding,
"%s: %s expects that this Command Buffer's vertex binding Index %u should be set via "
"vkCmdBindVertexBuffers. This is because VkVertexInputBindingDescription struct at "
"index " PRINTF_SIZE_T_SPECIFIER " of pVertexBindingDescriptions has a binding value of %u.",
caller, report_data->FormatHandle(state.pipeline_state->pipeline()).c_str(), vertex_binding, i,
vertex_binding);
} else if ((current_vtx_bfr_binding_info[vertex_binding].buffer_state == nullptr) &&
!enabled_features.robustness2_features.nullDescriptor) {
skip |= LogError(pCB->commandBuffer(), vuid.vertex_binding_null,
"%s: Vertex binding %d must not be VK_NULL_HANDLE %s expects that this Command Buffer's vertex "
"binding Index %u should be set via "
"vkCmdBindVertexBuffers. This is because VkVertexInputBindingDescription struct at "
"index " PRINTF_SIZE_T_SPECIFIER " of pVertexBindingDescriptions has a binding value of %u.",
caller, vertex_binding, report_data->FormatHandle(state.pipeline_state->pipeline()).c_str(),
vertex_binding, i, vertex_binding);
}
}
// Verify vertex attribute address alignment
for (size_t i = 0; i < pPipeline->vertex_attribute_descriptions_.size(); i++) {
const auto &attribute_description = pPipeline->vertex_attribute_descriptions_[i];
const auto vertex_binding = attribute_description.binding;
const auto attribute_offset = attribute_description.offset;
const auto &vertex_binding_map_it = pPipeline->vertex_binding_to_index_map_.find(vertex_binding);
if ((vertex_binding_map_it != pPipeline->vertex_binding_to_index_map_.cend()) &&
(vertex_binding < current_vtx_bfr_binding_info.size()) &&
((current_vtx_bfr_binding_info[vertex_binding].buffer_state) ||
enabled_features.robustness2_features.nullDescriptor)) {
auto vertex_buffer_stride = pPipeline->vertex_binding_descriptions_[vertex_binding_map_it->second].stride;
if (IsDynamic(pPipeline, VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT)) {
vertex_buffer_stride = static_cast<uint32_t>(current_vtx_bfr_binding_info[vertex_binding].stride);
uint32_t attribute_binding_extent =
attribute_description.offset + FormatElementSize(attribute_description.format);
if (vertex_buffer_stride != 0 && vertex_buffer_stride < attribute_binding_extent) {
skip |= LogError(pCB->commandBuffer(), "VUID-vkCmdBindVertexBuffers2EXT-pStrides-06209",
"The pStrides[%u] (%u) parameter in the last call to vkCmdBindVertexBuffers2EXT is not 0 "
"and less than the extent of the binding for attribute %zu (%u).",
vertex_binding, vertex_buffer_stride, i, attribute_binding_extent);
}
}
const auto vertex_buffer_offset = current_vtx_bfr_binding_info[vertex_binding].offset;
// Use 1 as vertex/instance index to use buffer stride as well
const auto attrib_address = vertex_buffer_offset + vertex_buffer_stride + attribute_offset;
VkDeviceSize vtx_attrib_req_alignment = pPipeline->vertex_attribute_alignments_[i];
if (SafeModulo(attrib_address, vtx_attrib_req_alignment) != 0) {
LogObjectList objlist(current_vtx_bfr_binding_info[vertex_binding].buffer_state->buffer());
objlist.add(state.pipeline_state->pipeline());
skip |= LogError(
objlist, vuid.vertex_binding_attribute,
"%s: Invalid attribAddress alignment for vertex attribute " PRINTF_SIZE_T_SPECIFIER
", %s,from of %s and vertex %s.",
caller, i, string_VkFormat(attribute_description.format),
report_data->FormatHandle(state.pipeline_state->pipeline()).c_str(),
report_data->FormatHandle(current_vtx_bfr_binding_info[vertex_binding].buffer_state->buffer()).c_str());
}
} else {
LogObjectList objlist(pCB->commandBuffer());
objlist.add(state.pipeline_state->pipeline());
skip |= LogError(objlist, vuid.vertex_binding_attribute,
"%s: binding #%" PRIu32
" in pVertexAttributeDescriptions of %s is invalid in vkCmdBindVertexBuffers of %s.",
caller, vertex_binding, report_data->FormatHandle(state.pipeline_state->pipeline()).c_str(),
report_data->FormatHandle(pCB->commandBuffer()).c_str());
}
}
}
// If Viewport or scissors are dynamic, verify that dynamic count matches PSO count.
// Skip check if rasterization is disabled, if there is no viewport, or if viewport/scissors are being inherited.
bool dyn_viewport = IsDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
const auto &create_info = pPipeline->create_info.graphics;
if ((!create_info.pRasterizationState || (create_info.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) &&
create_info.pViewportState && pCB->inheritedViewportDepths.size() == 0) {
bool dyn_scissor = IsDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
// NB (akeley98): Current validation layers do not detect the error where vkCmdSetViewport (or scissor) was called, but
// the dynamic state set is overwritten by binding a graphics pipeline with static viewport (scissor) state.
// This condition be detected by checking trashedViewportMask & viewportMask (trashedScissorMask & scissorMask) is
// nonzero in the range of bits needed by the pipeline.
if (dyn_viewport) {
const auto required_viewports_mask = (1 << create_info.pViewportState->viewportCount) - 1;
const auto missing_viewport_mask = ~pCB->viewportMask & required_viewports_mask;
if (missing_viewport_mask) {
std::stringstream ss;
ss << caller << ": Dynamic viewport(s) ";
ListBits(ss, missing_viewport_mask);
ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetViewport().";
skip |= LogError(device, vuid.dynamic_state, "%s", ss.str().c_str());
}
}
if (dyn_scissor) {
const auto required_scissor_mask = (1 << create_info.pViewportState->scissorCount) - 1;
const auto missing_scissor_mask = ~pCB->scissorMask & required_scissor_mask;
if (missing_scissor_mask) {
std::stringstream ss;
ss << caller << ": Dynamic scissor(s) ";
ListBits(ss, missing_scissor_mask);
ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetScissor().";
skip |= LogError(device, vuid.dynamic_state, "%s", ss.str().c_str());
}
}
bool dyn_viewport_count = IsDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT);
bool dyn_scissor_count = IsDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT);
// VUID {refpage}-viewportCount-03417
if (dyn_viewport_count && !dyn_scissor_count) {
const auto required_viewport_mask = (1 << create_info.pViewportState->scissorCount) - 1;
const auto missing_viewport_mask = ~pCB->viewportWithCountMask & required_viewport_mask;
if (missing_viewport_mask) {
std::stringstream ss;
ss << caller << ": Dynamic viewport with count ";
ListBits(ss, missing_viewport_mask);
ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetViewportWithCountEXT().";
skip |= LogError(device, vuid.viewport_count, "%s", ss.str().c_str());
}
}
// VUID {refpage}-scissorCount-03418
if (dyn_scissor_count && !dyn_viewport_count) {
const auto required_scissor_mask = (1 << create_info.pViewportState->viewportCount) - 1;
const auto missing_scissor_mask = ~pCB->scissorWithCountMask & required_scissor_mask;
if (missing_scissor_mask) {
std::stringstream ss;
ss << caller << ": Dynamic scissor with count ";
ListBits(ss, missing_scissor_mask);
ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetScissorWithCountEXT().";
skip |= LogError(device, vuid.scissor_count, "%s", ss.str().c_str());
}
}
// VUID {refpage}-viewportCount-03419
if (dyn_scissor_count && dyn_viewport_count) {
if (pCB->viewportWithCountMask != pCB->scissorWithCountMask) {
std::stringstream ss;
ss << caller << ": Dynamic viewport and scissor with count ";
ListBits(ss, pCB->viewportWithCountMask ^ pCB->scissorWithCountMask);
ss << " are used by pipeline state object, but were not provided via matching calls to "
"vkCmdSetViewportWithCountEXT and vkCmdSetScissorWithCountEXT().";
skip |= LogError(device, vuid.viewport_scissor_count, "%s", ss.str().c_str());
}
}
}
// If inheriting viewports, verify that not using more than inherited.
if (pCB->inheritedViewportDepths.size() != 0 && dyn_viewport) {
uint32_t viewport_count = create_info.pViewportState->viewportCount;
uint32_t max_inherited = uint32_t(pCB->inheritedViewportDepths.size());
if (viewport_count > max_inherited) {
skip |= LogError(device, vuid.dynamic_state,
"Pipeline requires more viewports (%u) than inherited (viewportDepthCount=%u).",
unsigned(viewport_count), unsigned(max_inherited));
}
}
// Verify that any MSAA request in PSO matches sample# in bound FB
// Verify that blend is enabled only if supported by subpasses image views format features
// Skip the check if rasterization is disabled.
if (!create_info.pRasterizationState || (create_info.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
VkSampleCountFlagBits pso_num_samples = GetNumSamples(pPipeline);
if (pCB->activeRenderPass) {
const auto render_pass_info = pCB->activeRenderPass->createInfo.ptr();
const VkSubpassDescription2 *subpass_desc = &render_pass_info->pSubpasses[pCB->activeSubpass];
uint32_t i;
unsigned subpass_num_samples = 0;
for (i = 0; i < subpass_desc->colorAttachmentCount; i++) {
const auto attachment = subpass_desc->pColorAttachments[i].attachment;
if (attachment != VK_ATTACHMENT_UNUSED) {
subpass_num_samples |= static_cast<unsigned>(render_pass_info->pAttachments[attachment].samples);
const auto *imageview_state = pCB->GetActiveAttachmentImageViewState(attachment);
if (imageview_state != nullptr &&
attachment < pPipeline->create_info.graphics.pColorBlendState->attachmentCount) {
if ((imageview_state->format_features & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT) == 0 &&
pPipeline->create_info.graphics.pColorBlendState->pAttachments[attachment].blendEnable != VK_FALSE) {
skip |= LogError(pPipeline->pipeline(), vuid.blend_enable,
"%s: Image view's format features of the color attachment (%" PRIu32
") of the active subpass do not contain VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT "
"bit, but active pipeline's pAttachments[%" PRIu32 "].blendEnable is not VK_FALSE.",
caller, attachment, attachment);
}
}
}
}
if (subpass_desc->pDepthStencilAttachment &&
subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
const auto attachment = subpass_desc->pDepthStencilAttachment->attachment;
subpass_num_samples |= static_cast<unsigned>(render_pass_info->pAttachments[attachment].samples);
}
if (!(IsExtEnabled(device_extensions.vk_amd_mixed_attachment_samples) ||
IsExtEnabled(device_extensions.vk_nv_framebuffer_mixed_samples)) &&
((subpass_num_samples & static_cast<unsigned>(pso_num_samples)) != subpass_num_samples)) {
LogObjectList objlist(pPipeline->pipeline());
objlist.add(pCB->activeRenderPass->renderPass());
skip |=
LogError(objlist, vuid.rasterization_samples,
"%s: In %s the sample count is %s while the current %s has %s and they need to be the same.", caller,
report_data->FormatHandle(pPipeline->pipeline()).c_str(), string_VkSampleCountFlagBits(pso_num_samples),
report_data->FormatHandle(pCB->activeRenderPass->renderPass()).c_str(),
string_VkSampleCountFlags(static_cast<VkSampleCountFlags>(subpass_num_samples)).c_str());
}
} else {
skip |= LogError(pPipeline->pipeline(), kVUID_Core_DrawState_NoActiveRenderpass,
"%s: No active render pass found at draw-time in %s!", caller,
report_data->FormatHandle(pPipeline->pipeline()).c_str());
}
}
// Verify that PSO creation renderPass is compatible with active renderPass
if (pCB->activeRenderPass) {
// TODO: AMD extension codes are included here, but actual function entrypoints are not yet intercepted
if (pCB->activeRenderPass->renderPass() != pPipeline->rp_state->renderPass()) {
// renderPass that PSO was created with must be compatible with active renderPass that PSO is being used with
skip |= ValidateRenderPassCompatibility("active render pass", pCB->activeRenderPass.get(), "pipeline state object",
pPipeline->rp_state.get(), caller, vuid.render_pass_compatible);
}
if (pPipeline->create_info.graphics.subpass != pCB->activeSubpass) {
skip |=
LogError(pPipeline->pipeline(), vuid.subpass_index, "%s: Pipeline was built for subpass %u but used in subpass %u.",
caller, pPipeline->create_info.graphics.subpass, pCB->activeSubpass);
}
// Check if depth stencil attachment was created with sample location compatible bit
if (pPipeline->sample_location_enabled == VK_TRUE) {
const safe_VkAttachmentReference2 *ds_attachment =
pCB->activeRenderPass->createInfo.pSubpasses[pCB->activeSubpass].pDepthStencilAttachment;
const FRAMEBUFFER_STATE *fb_state = pCB->activeFramebuffer.get();
if ((ds_attachment != nullptr) && (fb_state != nullptr)) {
const uint32_t attachment = ds_attachment->attachment;
if (attachment != VK_ATTACHMENT_UNUSED) {
const auto *imageview_state = pCB->GetActiveAttachmentImageViewState(attachment);
if (imageview_state != nullptr) {
const IMAGE_STATE *image_state = GetImageState(imageview_state->create_info.image);
if (image_state != nullptr) {
if ((image_state->createInfo.flags & VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT) == 0) {
skip |= LogError(pPipeline->pipeline(), vuid.sample_location,
"%s: sampleLocationsEnable is true for the pipeline, but the subpass (%u) depth "
"stencil attachment's VkImage was not created with "
"VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT.",
caller, pCB->activeSubpass);
}
}
}
}
}
}
}
skip |= ValidateStatus(pCB, CBSTATUS_PATCH_CONTROL_POINTS_SET, "Dynamic patch control points not set for this command buffer",
vuid.patch_control_points);
skip |= ValidateStatus(pCB, CBSTATUS_RASTERIZER_DISCARD_ENABLE_SET,
"Dynamic rasterizer discard enable not set for this command buffer", vuid.rasterizer_discard_enable);
skip |= ValidateStatus(pCB, CBSTATUS_DEPTH_BIAS_ENABLE_SET, "Dynamic depth bias enable not set for this command buffer",
vuid.depth_bias_enable);
skip |= ValidateStatus(pCB, CBSTATUS_LOGIC_OP_SET, "Dynamic state logicOp not set for this command buffer", vuid.logic_op);
skip |= ValidateStatus(pCB, CBSTATUS_PRIMITIVE_RESTART_ENABLE_SET,
"Dynamic primitive restart enable not set for this command buffer", vuid.primitive_restart_enable);
skip |= ValidateStatus(pCB, CBSTATUS_VERTEX_INPUT_BINDING_STRIDE_SET,
"Dynamic vertex input binding stride not set for this command buffer", vuid.vertex_input_binding_stride);
skip |=
ValidateStatus(pCB, CBSTATUS_VERTEX_INPUT_SET, "Dynamic vertex input not set for this command buffer", vuid.vertex_input);
// VUID {refpage}-primitiveTopology-03420
skip |= ValidateStatus(pCB, CBSTATUS_PRIMITIVE_TOPOLOGY_SET, "Dynamic primitive topology state not set for this command buffer",
vuid.primitive_topology);
if (IsDynamic(pPipeline, VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY_EXT)) {
bool compatible_topology = false;
switch (create_info.pInputAssemblyState->topology) {
case VK_PRIMITIVE_TOPOLOGY_POINT_LIST:
switch (pCB->primitiveTopology) {
case VK_PRIMITIVE_TOPOLOGY_POINT_LIST:
compatible_topology = true;
break;
default:
break;
}
break;
case VK_PRIMITIVE_TOPOLOGY_LINE_LIST:
case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP:
case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY:
case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY:
switch (pCB->primitiveTopology) {
case VK_PRIMITIVE_TOPOLOGY_LINE_LIST:
case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP:
compatible_topology = true;
break;
default:
break;
}
break;
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST:
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP:
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN:
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY:
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY:
switch (pCB->primitiveTopology) {
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST:
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP:
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN:
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY:
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY:
compatible_topology = true;
break;
default:
break;
}
break;
case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST:
switch (pCB->primitiveTopology) {
case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST:
compatible_topology = true;
break;
default:
break;
}
break;
default:
break;
}
if (!compatible_topology) {
skip |= LogError(pPipeline->pipeline(), vuid.primitive_topology,
"%s: the last primitive topology %s state set by vkCmdSetPrimitiveTopologyEXT is "
"not compatible with the pipeline topology %s.",
caller, string_VkPrimitiveTopology(pCB->primitiveTopology),
string_VkPrimitiveTopology(pPipeline->create_info.graphics.pInputAssemblyState->topology));
}
}
if (enabled_features.fragment_shading_rate_features.primitiveFragmentShadingRate) {
skip |= ValidateGraphicsPipelineShaderDynamicState(pPipeline, pCB, caller, vuid);
}
return skip;
}
// For given cvdescriptorset::DescriptorSet, verify that its Set is compatible w/ the setLayout corresponding to
// pipelineLayout[layoutIndex]
static bool VerifySetLayoutCompatibility(const debug_report_data *report_data, const cvdescriptorset::DescriptorSet *descriptor_set,
PIPELINE_LAYOUT_STATE const *pipeline_layout, const uint32_t layoutIndex,
string &errorMsg) {
auto num_sets = pipeline_layout->set_layouts.size();
if (layoutIndex >= num_sets) {
stringstream error_str;
error_str << report_data->FormatHandle(pipeline_layout->layout()) << ") only contains " << num_sets
<< " setLayouts corresponding to sets 0-" << num_sets - 1 << ", but you're attempting to bind set to index "
<< layoutIndex;
errorMsg = error_str.str();
return false;
}
if (descriptor_set->IsPushDescriptor()) return true;
auto layout_node = pipeline_layout->set_layouts[layoutIndex].get();
return cvdescriptorset::VerifySetLayoutCompatibility(report_data, layout_node, descriptor_set->GetLayout().get(), &errorMsg);
}
// Validate overall state at the time of a draw call
bool CoreChecks::ValidateCmdBufDrawState(const CMD_BUFFER_STATE *cb_node, CMD_TYPE cmd_type, const bool indexed,
const VkPipelineBindPoint bind_point) const {
const DrawDispatchVuid vuid = GetDrawDispatchVuid(cmd_type);
const char *function = CommandTypeString(cmd_type);
const auto lv_bind_point = ConvertToLvlBindPoint(bind_point);
const auto &state = cb_node->lastBound[lv_bind_point];
const auto *pipe = state.pipeline_state;
if (nullptr == pipe) {
return LogError(cb_node->commandBuffer(), vuid.pipeline_bound,
"Must not call %s on this command buffer while there is no %s pipeline bound.", function,
bind_point == VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR
? "RayTracing"
: bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS ? "Graphics" : "Compute");
}
bool result = false;
if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point) {
// First check flag states
result |= ValidateDrawStateFlags(cb_node, pipe, indexed, vuid.dynamic_state);
if (cb_node->activeRenderPass && cb_node->activeFramebuffer) {
// Verify attachments for unprotected/protected command buffer.
if (enabled_features.core11.protectedMemory == VK_TRUE && cb_node->active_attachments) {
uint32_t i = 0;
for (const auto &view_state : *cb_node->active_attachments.get()) {
const auto &subpass = cb_node->active_subpasses->at(i);
if (subpass.used && view_state && !view_state->Destroyed()) {
std::string image_desc = "Image is ";
image_desc.append(string_VkImageUsageFlagBits(subpass.usage));
// Because inputAttachment is read only, it doesn't need to care protected command buffer case.
// Some CMD_TYPE could not be protected. See VUID 02711.
if (subpass.usage != VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT &&
vuid.protected_command_buffer != kVUIDUndefined) {
result |= ValidateUnprotectedImage(cb_node, view_state->image_state.get(), function,
vuid.protected_command_buffer, image_desc.c_str());
}
result |= ValidateProtectedImage(cb_node, view_state->image_state.get(), function,
vuid.unprotected_command_buffer, image_desc.c_str());
}
++i;
}
}
}
}
// Now complete other state checks
string error_string;
auto const &pipeline_layout = pipe->pipeline_layout.get();
// Check if the current pipeline is compatible for the maximum used set with the bound sets.
if (pipe->active_slots.size() > 0 && !CompatForSet(pipe->max_active_slot, state, pipeline_layout->compat_for_set)) {
LogObjectList objlist(pipe->pipeline());
objlist.add(pipeline_layout->layout());
objlist.add(state.pipeline_layout);
result |= LogError(objlist, vuid.compatible_pipeline,
"%s(): %s defined with %s is not compatible for maximum set statically used %" PRIu32
" with bound descriptor sets, last bound with %s",
CommandTypeString(cmd_type), report_data->FormatHandle(pipe->pipeline()).c_str(),
report_data->FormatHandle(pipeline_layout->layout()).c_str(), pipe->max_active_slot,
report_data->FormatHandle(state.pipeline_layout).c_str());
}
for (const auto &set_binding_pair : pipe->active_slots) {
uint32_t set_index = set_binding_pair.first;
// If valid set is not bound throw an error
if ((state.per_set.size() <= set_index) || (!state.per_set[set_index].bound_descriptor_set)) {
result |= LogError(cb_node->commandBuffer(), kVUID_Core_DrawState_DescriptorSetNotBound,
"%s(): %s uses set #%u but that set is not bound.", CommandTypeString(cmd_type),
report_data->FormatHandle(pipe->pipeline()).c_str(), set_index);
} else if (!VerifySetLayoutCompatibility(report_data, state.per_set[set_index].bound_descriptor_set, pipeline_layout,
set_index, error_string)) {
// Set is bound but not compatible w/ overlapping pipeline_layout from PSO
VkDescriptorSet set_handle = state.per_set[set_index].bound_descriptor_set->GetSet();
LogObjectList objlist(set_handle);
objlist.add(pipeline_layout->layout());
result |= LogError(objlist, kVUID_Core_DrawState_PipelineLayoutsIncompatible,
"%s(): %s bound as set #%u is not compatible with overlapping %s due to: %s",
CommandTypeString(cmd_type), report_data->FormatHandle(set_handle).c_str(), set_index,
report_data->FormatHandle(pipeline_layout->layout()).c_str(), error_string.c_str());
} else { // Valid set is bound and layout compatible, validate that it's updated
// Pull the set node
const cvdescriptorset::DescriptorSet *descriptor_set = state.per_set[set_index].bound_descriptor_set;
// Validate the draw-time state for this descriptor set
std::string err_str;
// For the "bindless" style resource usage with many descriptors, need to optimize command <-> descriptor
// binding validation. Take the requested binding set and prefilter it to eliminate redundant validation checks.
// Here, the currently bound pipeline determines whether an image validation check is redundant...
// for images are the "req" portion of the binding_req is indirectly (but tightly) coupled to the pipeline.
cvdescriptorset::PrefilterBindRequestMap reduced_map(*descriptor_set, set_binding_pair.second);
const auto &binding_req_map = reduced_map.FilteredMap(*cb_node, *pipe);
// We can skip validating the descriptor set if "nothing" has changed since the last validation.
// Same set, no image layout changes, and same "pipeline state" (binding_req_map). If there are
// any dynamic descriptors, always revalidate rather than caching the values. We currently only
// apply this optimization if IsManyDescriptors is true, to avoid the overhead of copying the
// binding_req_map which could potentially be expensive.
bool descriptor_set_changed =
!reduced_map.IsManyDescriptors() ||
// Revalidate each time if the set has dynamic offsets
state.per_set[set_index].dynamicOffsets.size() > 0 ||
// Revalidate if descriptor set (or contents) has changed
state.per_set[set_index].validated_set != descriptor_set ||
state.per_set[set_index].validated_set_change_count != descriptor_set->GetChangeCount() ||
(!disabled[image_layout_validation] &&
state.per_set[set_index].validated_set_image_layout_change_count != cb_node->image_layout_change_count);
bool need_validate = descriptor_set_changed ||
// Revalidate if previous bindingReqMap doesn't include new bindingReqMap
!std::includes(state.per_set[set_index].validated_set_binding_req_map.begin(),
state.per_set[set_index].validated_set_binding_req_map.end(),
binding_req_map.begin(), binding_req_map.end());
if (need_validate) {
if (!descriptor_set_changed && reduced_map.IsManyDescriptors()) {
// Only validate the bindings that haven't already been validated
BindingReqMap delta_reqs;
std::set_difference(binding_req_map.begin(), binding_req_map.end(),
state.per_set[set_index].validated_set_binding_req_map.begin(),
state.per_set[set_index].validated_set_binding_req_map.end(),
layer_data::insert_iterator<BindingReqMap>(delta_reqs, delta_reqs.begin()));
result |=
ValidateDrawState(descriptor_set, delta_reqs, state.per_set[set_index].dynamicOffsets, cb_node,
cb_node->active_attachments.get(), cb_node->active_subpasses.get(), function, vuid);
} else {
result |=
ValidateDrawState(descriptor_set, binding_req_map, state.per_set[set_index].dynamicOffsets, cb_node,
cb_node->active_attachments.get(), cb_node->active_subpasses.get(), function, vuid);
}
}
}
}
// Check general pipeline state that needs to be validated at drawtime
if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point) {
result |= ValidatePipelineDrawtimeState(state, cb_node, cmd_type, pipe);
}
// Verify if push constants have been set
// NOTE: Currently not checking whether active push constants are compatible with the active pipeline, nor whether the
// "life times" of push constants are correct.
// Discussion on validity of these checks can be found at https://gitlab.khronos.org/vulkan/vulkan/-/issues/2602.
if (!cb_node->push_constant_data_ranges || (pipeline_layout->push_constant_ranges == cb_node->push_constant_data_ranges)) {
for (const auto &stage : pipe->stage_state) {
const auto *entrypoint = stage.module->FindEntrypointStruct(stage.create_info->pName, stage.create_info->stage);
if (!entrypoint || !entrypoint->push_constant_used_in_shader.IsUsed()) {
continue;
}
// Edge case where if the shader is using push constants statically and there never was a vkCmdPushConstants
if (!cb_node->push_constant_data_ranges) {
LogObjectList objlist(cb_node->commandBuffer());
objlist.add(pipeline_layout->layout());
objlist.add(pipe->pipeline());
result |= LogError(objlist, vuid.push_constants_set,
"%s(): Shader in %s uses push-constant statically but vkCmdPushConstants was not called yet for "
"pipeline layout %s.",
CommandTypeString(cmd_type), string_VkShaderStageFlags(stage.stage_flag).c_str(),
report_data->FormatHandle(pipeline_layout->layout()).c_str());
}
const auto it = cb_node->push_constant_data_update.find(stage.stage_flag);
if (it == cb_node->push_constant_data_update.end()) {
// This error has been printed in ValidatePushConstantUsage.
break;
}
}
}
return result;
}
bool CoreChecks::ValidateCmdRayQueryState(const CMD_BUFFER_STATE *cb_state, CMD_TYPE cmd_type,
const VkPipelineBindPoint bind_point) const {
bool skip = false;
const DrawDispatchVuid vuid = GetDrawDispatchVuid(cmd_type);
const auto lv_bind_point = ConvertToLvlBindPoint(bind_point);
const auto &state = cb_state->lastBound[lv_bind_point];
const auto *pipe = state.pipeline_state;
bool ray_query_shader = false;
if (nullptr != pipe) {
if (bind_point == VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR) {
ray_query_shader = true;
} else {
// TODO - Loop through shader for RayQueryKHR for draw/dispatch commands
}
}
if (cb_state->unprotected == false && ray_query_shader) {
skip |= LogError(cb_state->commandBuffer(), vuid.ray_query_protected_cb,
"%s(): can't use in protected command buffers for RayQuery operations.", CommandTypeString(cmd_type));
}
return skip;
}
bool CoreChecks::ValidateGraphicsPipelineBlendEnable(const PIPELINE_STATE *pPipeline) const {
bool skip = false;
const auto& create_info = pPipeline->create_info.graphics;
if (create_info.pColorBlendState) {
const auto *subpass_desc = &pPipeline->rp_state->createInfo.pSubpasses[create_info.subpass];
for (uint32_t i = 0; i < pPipeline->attachments.size() && i < subpass_desc->colorAttachmentCount; ++i) {
const auto attachment = subpass_desc->pColorAttachments[i].attachment;
if (attachment == VK_ATTACHMENT_UNUSED) continue;
const auto attachment_desc = pPipeline->rp_state->createInfo.pAttachments[attachment];
const VkFormatFeatureFlags format_features = GetPotentialFormatFeatures(attachment_desc.format);
if (create_info.pRasterizationState &&
!create_info.pRasterizationState->rasterizerDiscardEnable &&
pPipeline->attachments[i].blendEnable && !(format_features & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT)) {
skip |=
LogError(device, "VUID-VkGraphicsPipelineCreateInfo-blendEnable-04717",
"vkCreateGraphicsPipelines(): pipeline.pColorBlendState.pAttachments[%" PRIu32
"].blendEnable is VK_TRUE but format %s of the corresponding attachment description (subpass %" PRIu32
", attachment %" PRIu32 ") does not support VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT.",
i, string_VkFormat(attachment_desc.format), create_info.subpass, attachment);
}
}
}
return skip;
}
bool CoreChecks::ValidatePipelineLocked(std::vector<std::shared_ptr<PIPELINE_STATE>> const &pPipelines, int pipelineIndex) const {
bool skip = false;
const PIPELINE_STATE *pipeline = pPipelines[pipelineIndex].get();
const auto &create_info = pipeline->create_info.graphics;
// If create derivative bit is set, check that we've specified a base
// pipeline correctly, and that the base pipeline was created to allow
// derivatives.
if (create_info.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
const PIPELINE_STATE *base_pipeline = nullptr;
if (!((create_info.basePipelineHandle != VK_NULL_HANDLE) ^ (create_info.basePipelineIndex != -1))) {
// TODO: This check is a superset of VUID-VkGraphicsPipelineCreateInfo-flags-00724 and
// TODO: VUID-VkGraphicsPipelineCreateInfo-flags-00725
skip |= LogError(device, kVUID_Core_DrawState_InvalidPipelineCreateState,
"Invalid Pipeline CreateInfo[%d]: exactly one of base pipeline index and handle must be specified",
pipelineIndex);
} else if (create_info.basePipelineIndex != -1) {
if (create_info.basePipelineIndex >= pipelineIndex) {
skip |=
LogError(device, "VUID-vkCreateGraphicsPipelines-flags-00720",
"Invalid Pipeline CreateInfo[%d]: base pipeline must occur earlier in array than derivative pipeline.",
pipelineIndex);
} else {
base_pipeline = pPipelines[create_info.basePipelineIndex].get();
}
} else if (create_info.basePipelineHandle != VK_NULL_HANDLE) {
base_pipeline = GetPipelineState(create_info.basePipelineHandle);
}
if (base_pipeline && !(base_pipeline->create_info.graphics.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
skip |= LogError(device, "VUID-vkCreateGraphicsPipelines-flags-00721",
"Invalid Pipeline CreateInfo[%d]: base pipeline does not allow derivatives.", pipelineIndex);
}
}
// Check for portability errors
if (IsExtEnabled(device_extensions.vk_khr_portability_subset)) {
if ((VK_FALSE == enabled_features.portability_subset_features.triangleFans) &&
(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN == pipeline->topology_at_rasterizer)) {
skip |=
LogError(device, "VUID-VkPipelineInputAssemblyStateCreateInfo-triangleFans-04452",
"Invalid Pipeline CreateInfo[%d] (portability error): VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN is not supported",
pipelineIndex);
}
// Validate vertex inputs
for (const auto &desc : pipeline->vertex_binding_descriptions_) {
const auto min_alignment = phys_dev_ext_props.portability_props.minVertexInputBindingStrideAlignment;
if ((desc.stride < min_alignment) || (min_alignment == 0) || ((desc.stride % min_alignment) != 0)) {
skip |= LogError(
device, "VUID-VkVertexInputBindingDescription-stride-04456",
"Invalid Pipeline CreateInfo[%d] (portability error): Vertex input stride must be at least as large as and a "
"multiple of VkPhysicalDevicePortabilitySubsetPropertiesKHR::minVertexInputBindingStrideAlignment.",
pipelineIndex);
}
}
// Validate vertex attributes
if (VK_FALSE == enabled_features.portability_subset_features.vertexAttributeAccessBeyondStride) {
for (const auto &attrib : pipeline->vertex_attribute_descriptions_) {
const auto vertex_binding_map_it = pipeline->vertex_binding_to_index_map_.find(attrib.binding);
if (vertex_binding_map_it != pipeline->vertex_binding_to_index_map_.cend()) {
const auto& desc = pipeline->vertex_binding_descriptions_[vertex_binding_map_it->second];
if ((attrib.offset + FormatElementSize(attrib.format)) > desc.stride) {
skip |= LogError(device, "VUID-VkVertexInputAttributeDescription-vertexAttributeAccessBeyondStride-04457",
"Invalid Pipeline CreateInfo[%d] (portability error): (attribute.offset + "
"sizeof(vertex_description.format)) is larger than the vertex stride",
pipelineIndex);
}
}
}
}
// Validate polygon mode
auto raster_state_ci = create_info.pRasterizationState;
if ((VK_FALSE == enabled_features.portability_subset_features.pointPolygons) && raster_state_ci &&
(VK_FALSE == raster_state_ci->rasterizerDiscardEnable) && (VK_POLYGON_MODE_POINT == raster_state_ci->polygonMode)) {
skip |=
LogError(device, "VUID-VkPipelineRasterizationStateCreateInfo-pointPolygons-04458",
"Invalid Pipeline CreateInfo[%d] (portability error): point polygons are not supported", pipelineIndex);
}
}
return skip;
}
// UNLOCKED pipeline validation. DO NOT lookup objects in the CoreChecks->* maps in this function.
bool CoreChecks::ValidatePipelineUnlocked(const PIPELINE_STATE *pPipeline, uint32_t pipelineIndex) const {
bool skip = false;
const auto &create_info = pPipeline->create_info.graphics;
// Ensure the subpass index is valid. If not, then ValidateGraphicsPipelineShaderState
// produces nonsense errors that confuse users. Other layers should already
// emit errors for renderpass being invalid.
auto subpass_desc = &pPipeline->rp_state->createInfo.pSubpasses[create_info.subpass];
if (create_info.subpass >= pPipeline->rp_state->createInfo.subpassCount) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-subpass-00759",
"Invalid Pipeline CreateInfo[%" PRIu32
"] State: Subpass index %u is out of range for this renderpass (0..%u).",
pipelineIndex, create_info.subpass, pPipeline->rp_state->createInfo.subpassCount - 1);
subpass_desc = nullptr;
}
if (create_info.pColorBlendState != NULL) {
const safe_VkPipelineColorBlendStateCreateInfo *color_blend_state = create_info.pColorBlendState;
if (subpass_desc && color_blend_state->attachmentCount != subpass_desc->colorAttachmentCount) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-attachmentCount-00746",
"vkCreateGraphicsPipelines() pCreateInfo[%" PRIu32
"]: %s subpass %u has colorAttachmentCount of %u which doesn't "
"match the pColorBlendState->attachmentCount of %u.",
pipelineIndex, report_data->FormatHandle(pPipeline->rp_state->renderPass()).c_str(),
create_info.subpass, subpass_desc->colorAttachmentCount, color_blend_state->attachmentCount);
}
if (!enabled_features.core.independentBlend) {
if (pPipeline->attachments.size() > 1) {
const VkPipelineColorBlendAttachmentState *const attachments = &pPipeline->attachments[0];
for (size_t i = 1; i < pPipeline->attachments.size(); i++) {
// Quoting the spec: "If [the independent blend] feature is not enabled, the VkPipelineColorBlendAttachmentState
// settings for all color attachments must be identical." VkPipelineColorBlendAttachmentState contains
// only attachment state, so memcmp is best suited for the comparison
if (memcmp(static_cast<const void *>(attachments), static_cast<const void *>(&attachments[i]),
sizeof(attachments[0]))) {
skip |= LogError(device, "VUID-VkPipelineColorBlendStateCreateInfo-pAttachments-00605",
"Invalid Pipeline CreateInfo[%" PRIu32
"]: If independent blend feature not enabled, all elements of "
"pAttachments must be identical.",
pipelineIndex);
break;
}
}
}
}
if (!enabled_features.core.logicOp && (create_info.pColorBlendState->logicOpEnable != VK_FALSE)) {
skip |= LogError(device, "VUID-VkPipelineColorBlendStateCreateInfo-logicOpEnable-00606",
"Invalid Pipeline CreateInfo[%" PRIu32
"]: If logic operations feature not enabled, logicOpEnable must be VK_FALSE.",
pipelineIndex);
}
for (size_t i = 0; i < pPipeline->attachments.size(); i++) {
if ((pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
(pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
(pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
(pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) {
if (!enabled_features.core.dualSrcBlend) {
skip |= LogError(device, "VUID-VkPipelineColorBlendAttachmentState-srcColorBlendFactor-00608",
"vkCreateGraphicsPipelines(): pPipelines[%" PRIu32
"].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER
"].srcColorBlendFactor uses a dual-source blend factor (%d), but this device feature is not "
"enabled.",
pipelineIndex, i, pPipeline->attachments[i].srcColorBlendFactor);
}
}
if ((pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
(pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
(pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
(pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) {
if (!enabled_features.core.dualSrcBlend) {
skip |= LogError(device, "VUID-VkPipelineColorBlendAttachmentState-dstColorBlendFactor-00609",
"vkCreateGraphicsPipelines(): pPipelines[%" PRIu32
"].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER
"].dstColorBlendFactor uses a dual-source blend factor (%d), but this device feature is not "
"enabled.",
pipelineIndex, i, pPipeline->attachments[i].dstColorBlendFactor);
}
}
if ((pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
(pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
(pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
(pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) {
if (!enabled_features.core.dualSrcBlend) {
skip |= LogError(device, "VUID-VkPipelineColorBlendAttachmentState-srcAlphaBlendFactor-00610",
"vkCreateGraphicsPipelines(): pPipelines[%" PRIu32
"].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER
"].srcAlphaBlendFactor uses a dual-source blend factor (%d), but this device feature is not "
"enabled.",
pipelineIndex, i, pPipeline->attachments[i].srcAlphaBlendFactor);
}
}
if ((pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
(pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
(pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
(pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) {
if (!enabled_features.core.dualSrcBlend) {
skip |= LogError(device, "VUID-VkPipelineColorBlendAttachmentState-dstAlphaBlendFactor-00611",
"vkCreateGraphicsPipelines(): pPipelines[%" PRIu32
"].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER
"].dstAlphaBlendFactor uses a dual-source blend factor (%d), but this device feature is not "
"enabled.",
pipelineIndex, i, pPipeline->attachments[i].dstAlphaBlendFactor);
}
}
}
auto color_write = lvl_find_in_chain<VkPipelineColorWriteCreateInfoEXT>(create_info.pColorBlendState->pNext);
if (color_write) {
if (color_write->attachmentCount != color_blend_state->attachmentCount) {
skip |= LogError(
device, "VUID-VkPipelineColorWriteCreateInfoEXT-attachmentCount-04802",
"vkCreateGraphicsPipelines(): VkPipelineColorWriteCreateInfoEXT in the pNext chain of pPipelines[%" PRIu32
"].pColorBlendState has different attachmentCount (%" PRIu32 ") than pColorBlendState.attachmentCount (%" PRIu32
").",
pipelineIndex, color_write->attachmentCount, color_blend_state->attachmentCount);
}
if (!enabled_features.color_write_features.colorWriteEnable) {
for (uint32_t i = 0; i < color_write->attachmentCount; ++i) {
if (color_write->pColorWriteEnables[i] != VK_TRUE) {
skip |= LogError(device, "VUID-VkPipelineColorWriteCreateInfoEXT-pAttachments-04801",
"vkCreateGraphicsPipelines(): pPipelines[%" PRIu32
"].pColorBlendState pNext chain includes VkPipelineColorWriteCreateInfoEXT with "
"pColorWriteEnables[%" PRIu32 "] = VK_FALSE, but colorWriteEnable is not enabled.",
pipelineIndex, i);
}
}
}
}
const auto *color_blend_advanced =
LvlFindInChain<VkPipelineColorBlendAdvancedStateCreateInfoEXT>(create_info.pColorBlendState->pNext);
if (color_blend_advanced) {
if (!phys_dev_ext_props.blend_operation_advanced_props.advancedBlendCorrelatedOverlap &&
color_blend_advanced->blendOverlap != VK_BLEND_OVERLAP_UNCORRELATED_EXT) {
skip |= LogError(
device, "VUID-VkPipelineColorBlendAdvancedStateCreateInfoEXT-blendOverlap-01426",
"vkCreateGraphicsPipelines(): pPipelines[%" PRIu32
"].pColorBlendState pNext chain contains VkPipelineColorBlendAdvancedStateCreateInfoEXT structure with "
"blendOverlap equal to %s, but "
"VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT::advancedBlendCorrelatedOverlap is not supported.",
pipelineIndex, string_VkBlendOverlapEXT(color_blend_advanced->blendOverlap));
}
if (!phys_dev_ext_props.blend_operation_advanced_props.advancedBlendNonPremultipliedDstColor &&
color_blend_advanced->dstPremultiplied != VK_TRUE) {
skip |= LogError(
device, "VUID-VkPipelineColorBlendAdvancedStateCreateInfoEXT-dstPremultiplied-01425",
"vkCreateGraphicsPipelines(): pPipelines[%" PRIu32
"].pColorBlendState pNext chain contains VkPipelineColorBlendAdvancedStateCreateInfoEXT structure with "
"dstPremultiplied equal to VK_FALSE, but "
"VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT::advancedBlendNonPremultipliedDstColor is not supported.",
pipelineIndex);
}
if (!phys_dev_ext_props.blend_operation_advanced_props.advancedBlendNonPremultipliedSrcColor &&
color_blend_advanced->srcPremultiplied != VK_TRUE) {
skip |= LogError(
device, "VUID-VkPipelineColorBlendAdvancedStateCreateInfoEXT-srcPremultiplied-01424",
"vkCreateGraphicsPipelines(): pPipelines[%" PRIu32
"].pColorBlendState pNext chain contains VkPipelineColorBlendAdvancedStateCreateInfoEXT structure with "
"srcPremultiplied equal to VK_FALSE, but "
"VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT::advancedBlendNonPremultipliedSrcColor is not supported.",
pipelineIndex);
}
}
}
if (ValidateGraphicsPipelineShaderState(pPipeline)) {
skip = true;
}
skip |= ValidateGraphicsPipelineBlendEnable(pPipeline);
// Each shader's stage must be unique
for (uint32_t stage = VK_SHADER_STAGE_VERTEX_BIT; stage & VK_SHADER_STAGE_ALL_GRAPHICS; stage <<= 1) {
if (pPipeline->active_shaders & stage) {
const auto &states = pPipeline->stage_state;
if (std::count_if(states.begin(), states.end(),
[stage](const PipelineStageState &pss) { return stage == pss.stage_flag; }) > 1) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-stage-00726",
"Invalid Pipeline CreateInfo[%" PRIu32 "] State: Multiple shaders provided for stage %s",
pipelineIndex, string_VkShaderStageFlagBits(VkShaderStageFlagBits(stage)));
}
}
}
if (!enabled_features.core.geometryShader && (pPipeline->active_shaders & VK_SHADER_STAGE_GEOMETRY_BIT)) {
skip |= LogError(device, "VUID-VkPipelineShaderStageCreateInfo-stage-00704",
"Invalid Pipeline CreateInfo[%" PRIu32 "] State: Geometry Shader not supported.", pipelineIndex);
}
if (!enabled_features.core.tessellationShader && (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT ||
pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)) {
skip |= LogError(device, "VUID-VkPipelineShaderStageCreateInfo-stage-00705",
"Invalid Pipeline CreateInfo[%" PRIu32 "] State: Tessellation Shader not supported.", pipelineIndex);
}
if (IsExtEnabled(device_extensions.vk_nv_mesh_shader)) {
// VS or mesh is required
if (!(pPipeline->active_shaders & (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_MESH_BIT_NV))) {
skip |=
LogError(device, "VUID-VkGraphicsPipelineCreateInfo-stage-02096",
"Invalid Pipeline CreateInfo[%" PRIu32 "] State: Vertex Shader or Mesh Shader required.", pipelineIndex);
}
// Can't mix mesh and VTG
if ((pPipeline->active_shaders & (VK_SHADER_STAGE_MESH_BIT_NV | VK_SHADER_STAGE_TASK_BIT_NV)) &&
(pPipeline->active_shaders &
(VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT |
VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT))) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pStages-02095",
"Invalid Pipeline CreateInfo[%" PRIu32
"] State: Geometric shader stages must either be all mesh (mesh | task) "
"or all VTG (vertex, tess control, tess eval, geom).",
pipelineIndex);
}
} else {
// VS is required
if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-stage-00727",
"Invalid Pipeline CreateInfo[%" PRIu32 "] State: Vertex Shader required.", pipelineIndex);
}
}
if (!enabled_features.mesh_shader_features.meshShader && (pPipeline->active_shaders & VK_SHADER_STAGE_MESH_BIT_NV)) {
skip |= LogError(device, "VUID-VkPipelineShaderStageCreateInfo-stage-02091",
"Invalid Pipeline CreateInfo[%" PRIu32 "] State: Mesh Shader not supported.", pipelineIndex);
}
if (!enabled_features.mesh_shader_features.taskShader && (pPipeline->active_shaders & VK_SHADER_STAGE_TASK_BIT_NV)) {
skip |= LogError(device, "VUID-VkPipelineShaderStageCreateInfo-stage-02092",
"Invalid Pipeline CreateInfo[%" PRIu32 "] State: Task Shader not supported.", pipelineIndex);
}
// Either both or neither TC/TE shaders should be defined
bool has_control = (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) != 0;
bool has_eval = (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) != 0;
if (has_control && !has_eval) {
skip |=
LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pStages-00729",
"Invalid Pipeline CreateInfo[%" PRIu32 "] State: TE and TC shaders must be included or excluded as a pair.",
pipelineIndex);
}
if (!has_control && has_eval) {
skip |=
LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pStages-00730",
"Invalid Pipeline CreateInfo[%" PRIu32 "] State: TE and TC shaders must be included or excluded as a pair.",
pipelineIndex);
}
// Compute shaders should be specified independent of Gfx shaders
if (pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-stage-00728",
"Invalid Pipeline CreateInfo[%" PRIu32 "] State: Do not specify Compute Shader for Gfx Pipeline.",
pipelineIndex);
}
if ((pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT) && !create_info.pInputAssemblyState) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pStages-02098",
"Invalid Pipeline CreateInfo[%" PRIu32 "] State: Missing pInputAssemblyState.", pipelineIndex);
}
// VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines.
// Mismatching primitive topology and tessellation fails graphics pipeline creation.
if (has_control && has_eval &&
(!create_info.pInputAssemblyState || create_info.pInputAssemblyState->topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pStages-00736",
"Invalid Pipeline CreateInfo[%" PRIu32
"] State: VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA topology for "
"tessellation pipelines.",
pipelineIndex);
}
if (create_info.pInputAssemblyState) {
if (create_info.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
if (!has_control || !has_eval) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-topology-00737",
"Invalid Pipeline CreateInfo[%" PRIu32
"] State: VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid "
"for tessellation pipelines.",
pipelineIndex);
}
}
if ((create_info.pInputAssemblyState->primitiveRestartEnable == VK_TRUE) &&
(create_info.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_POINT_LIST ||
create_info.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST ||
create_info.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST ||
create_info.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY ||
create_info.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY ||
create_info.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
if (IsExtEnabled(device_extensions.vk_ext_primitive_topology_list_restart)) {
if (create_info.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
if (!enabled_features.primitive_topology_list_restart_features.primitiveTopologyPatchListRestart) {
skip |= LogError(device, "VUID-VkPipelineInputAssemblyStateCreateInfo-topology-06253",
"vkCreateGraphicsPipelines() pCreateInfo[%" PRIu32
"]: topology is %s and primitiveRestartEnable is VK_TRUE and the "
"primitiveTopologyPatchListRestart feature is not enabled.",
pipelineIndex,
string_VkPrimitiveTopology(create_info.pInputAssemblyState->topology));
}
} else if (!enabled_features.primitive_topology_list_restart_features.primitiveTopologyListRestart) {
skip |= LogError(
device, "VUID-VkPipelineInputAssemblyStateCreateInfo-topology-06252",
"vkCreateGraphicsPipelines() pCreateInfo[%" PRIu32
"]: topology is %s and primitiveRestartEnable is VK_TRUE and the primitiveTopologyListRestart feature "
"is not enabled.",
pipelineIndex, string_VkPrimitiveTopology(create_info.pInputAssemblyState->topology));
}
} else {
skip |= LogError(device, "VUID-VkPipelineInputAssemblyStateCreateInfo-topology-00428",
"vkCreateGraphicsPipelines() pCreateInfo[%" PRIu32
"]: topology is %s and primitiveRestartEnable is VK_TRUE. It is invalid.",
pipelineIndex,
string_VkPrimitiveTopology(create_info.pInputAssemblyState->topology));
}
}
if ((enabled_features.core.geometryShader == VK_FALSE) &&
(create_info.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY ||
create_info.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY ||
create_info.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY ||
create_info.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY)) {
skip |= LogError(device, "VUID-VkPipelineInputAssemblyStateCreateInfo-topology-00429",
"vkCreateGraphicsPipelines() pCreateInfo[%" PRIu32
"]: topology is %s and geometry shaders feature is not enabled. "
"It is invalid.",
pipelineIndex, string_VkPrimitiveTopology(create_info.pInputAssemblyState->topology));
}
if ((enabled_features.core.tessellationShader == VK_FALSE) &&
(create_info.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
skip |= LogError(device, "VUID-VkPipelineInputAssemblyStateCreateInfo-topology-00430",
"vkCreateGraphicsPipelines() pCreateInfo[%" PRIu32
"]: topology is %s and tessellation shaders feature is not "
"enabled. It is invalid.",
pipelineIndex, string_VkPrimitiveTopology(create_info.pInputAssemblyState->topology));
}
}
// If a rasterization state is provided...
if (create_info.pRasterizationState) {
if ((create_info.pRasterizationState->depthClampEnable == VK_TRUE) && (!enabled_features.core.depthClamp)) {
skip |= LogError(device, "VUID-VkPipelineRasterizationStateCreateInfo-depthClampEnable-00782",
"vkCreateGraphicsPipelines() pCreateInfo[%" PRIu32
"]: the depthClamp device feature is disabled: the "
"depthClampEnable member "
"of the VkPipelineRasterizationStateCreateInfo structure must be set to VK_FALSE.",
pipelineIndex);
}
if (!IsDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_BIAS) && (create_info.pRasterizationState->depthBiasClamp != 0.0) &&
(!enabled_features.core.depthBiasClamp)) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00754",
"vkCreateGraphicsPipelines() pCreateInfo[%" PRIu32
"]: the depthBiasClamp device feature is disabled: the "
"depthBiasClamp member "
"of the VkPipelineRasterizationStateCreateInfo structure must be set to 0.0 unless the "
"VK_DYNAMIC_STATE_DEPTH_BIAS dynamic state is enabled",
pipelineIndex);
}
// If rasterization is enabled...
if (create_info.pRasterizationState->rasterizerDiscardEnable == VK_FALSE) {
if ((create_info.pMultisampleState->alphaToOneEnable == VK_TRUE) && (!enabled_features.core.alphaToOne)) {
skip |= LogError(device, "VUID-VkPipelineMultisampleStateCreateInfo-alphaToOneEnable-00785",
"vkCreateGraphicsPipelines() pCreateInfo[%" PRIu32
"]: the alphaToOne device feature is disabled: the alphaToOneEnable "
"member of the VkPipelineMultisampleStateCreateInfo structure must be set to VK_FALSE.",
pipelineIndex);
}
// If subpass uses a depth/stencil attachment, pDepthStencilState must be a pointer to a valid structure
if (subpass_desc && subpass_desc->pDepthStencilAttachment &&
subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
if (!create_info.pDepthStencilState) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00752",
"Invalid Pipeline CreateInfo[%" PRIu32
"] State: pDepthStencilState is NULL when rasterization is enabled "
"and subpass uses a depth/stencil attachment.",
pipelineIndex);
} else if (create_info.pDepthStencilState->depthBoundsTestEnable == VK_TRUE) {
if (!enabled_features.core.depthBounds) {
skip |=
LogError(device, "VUID-VkPipelineDepthStencilStateCreateInfo-depthBoundsTestEnable-00598",
"vkCreateGraphicsPipelines() pCreateInfo[%" PRIu32
"]: the depthBounds device feature is disabled: the "
"depthBoundsTestEnable member of the VkPipelineDepthStencilStateCreateInfo structure must be "
"set to VK_FALSE.",
pipelineIndex);
}
// The extension was not created with a feature bit whichs prevents displaying the 2 variations of the VUIDs
if (!IsExtEnabled(device_extensions.vk_ext_depth_range_unrestricted) &&
!IsDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_BOUNDS)) {
const float minDepthBounds = create_info.pDepthStencilState->minDepthBounds;
const float maxDepthBounds = create_info.pDepthStencilState->maxDepthBounds;
// Also VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00755
if (!(minDepthBounds >= 0.0) || !(minDepthBounds <= 1.0)) {
skip |=
LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-02510",
"vkCreateGraphicsPipelines() pCreateInfo[%" PRIu32
"]: VK_EXT_depth_range_unrestricted extension "
"is not enabled, VK_DYNAMIC_STATE_DEPTH_BOUNDS is not used, depthBoundsTestEnable is "
"true, and pDepthStencilState::minDepthBounds (=%f) is not within the [0.0, 1.0] range.",
pipelineIndex, minDepthBounds);
}
if (!(maxDepthBounds >= 0.0) || !(maxDepthBounds <= 1.0)) {
skip |=
LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-02510",
"vkCreateGraphicsPipelines() pCreateInfo[%" PRIu32
"]: VK_EXT_depth_range_unrestricted extension "
"is not enabled, VK_DYNAMIC_STATE_DEPTH_BOUNDS is not used, depthBoundsTestEnable is "
"true, and pDepthStencilState::maxDepthBounds (=%f) is not within the [0.0, 1.0] range.",
pipelineIndex, maxDepthBounds);
}
}
}
}
// If subpass uses color attachments, pColorBlendState must be valid pointer
if (subpass_desc) {
uint32_t color_attachment_count = 0;
for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; ++i) {
if (subpass_desc->pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) {
++color_attachment_count;
}
}
if (color_attachment_count > 0 && create_info.pColorBlendState == nullptr) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00753",
"Invalid Pipeline CreateInfo[%" PRIu32
"] State: pColorBlendState is NULL when rasterization is enabled and "
"subpass uses color attachments.",
pipelineIndex);
}
}
constexpr int num_bits = sizeof(subpass_desc->viewMask) * CHAR_BIT;
std::bitset<num_bits> view_bits(subpass_desc->viewMask);
uint32_t view_bits_count = static_cast<uint32_t>(view_bits.count());
if (view_bits_count > 1) {
if (!enabled_features.multiview_features.multiviewTessellationShader &&
(pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT ||
pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-renderPass-00760",
"Invalid Pipeline CreateInfo[%" PRIu32 "] State: subpass has %" PRIu32
" bits set in viewMask and pStages includes tessellation shaders, but the "
"VkPhysicalDeviceMultiviewFeatures::multiviewTessellationShader features is not enabled.",
pipelineIndex, view_bits_count);
}
if (!enabled_features.multiview_features.multiviewGeometryShader &&
pPipeline->active_shaders & VK_SHADER_STAGE_GEOMETRY_BIT) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-renderPass-00761",
"Invalid Pipeline CreateInfo[%" PRIu32 "] State: subpass has %" PRIu32
" bits set in viewMask and pStages includes geometry shader, but the "
"VkPhysicalDeviceMultiviewFeatures::multiviewGeometryShader features is not enabled.",
pipelineIndex, view_bits_count);
}
}
}
auto provoking_vertex_state_ci =
lvl_find_in_chain<VkPipelineRasterizationProvokingVertexStateCreateInfoEXT>(create_info.pRasterizationState->pNext);
if (provoking_vertex_state_ci &&
provoking_vertex_state_ci->provokingVertexMode == VK_PROVOKING_VERTEX_MODE_LAST_VERTEX_EXT &&
!enabled_features.provoking_vertex_features.provokingVertexLast) {
skip |= LogError(
device, "VUID-VkPipelineRasterizationProvokingVertexStateCreateInfoEXT-provokingVertexMode-04883",
"provokingVertexLast feature is not enabled.");
}
const auto rasterization_state_stream_ci = LvlFindInChain<VkPipelineRasterizationStateStreamCreateInfoEXT>(
pPipeline->create_info.graphics.pRasterizationState->pNext);
if (rasterization_state_stream_ci) {
if (!enabled_features.transform_feedback_features.geometryStreams) {
skip |= LogError(device, "VUID-VkPipelineRasterizationStateStreamCreateInfoEXT-geometryStreams-02324",
"pCreateInfos[%" PRIu32
"].pRasterizationState pNext chain includes VkPipelineRasterizationStateStreamCreateInfoEXT, but "
"geometryStreams feature is not enabled.",
pipelineIndex);
} else if (phys_dev_ext_props.transform_feedback_props.transformFeedbackRasterizationStreamSelect == VK_FALSE &&
rasterization_state_stream_ci->rasterizationStream != 0) {
skip |= LogError(device, "VUID-VkPipelineRasterizationStateStreamCreateInfoEXT-rasterizationStream-02326",
"VkPhysicalDeviceTransformFeedbackPropertiesEXT::transformFeedbackRasterizationStreamSelect is "
"VK_FALSE, but pCreateInfos[%" PRIu32
"].pRasterizationState pNext chain includes VkPipelineRasterizationStateStreamCreateInfoEXT with "
"rasterizationStream (%" PRIu32 ") not equal to 0.",
pipelineIndex, rasterization_state_stream_ci->rasterizationStream);
} else if (rasterization_state_stream_ci->rasterizationStream >=
phys_dev_ext_props.transform_feedback_props.maxTransformFeedbackStreams) {
skip |= LogError(
device, "VUID-VkPipelineRasterizationStateStreamCreateInfoEXT-rasterizationStream-02325",
"pCreateInfos[%" PRIu32
"].pRasterizationState pNext chain includes VkPipelineRasterizationStateStreamCreateInfoEXT with "
"rasterizationStream (%" PRIu32
") not less than VkPhysicalDeviceTransformFeedbackPropertiesEXT::maxTransformFeedbackStreams (%" PRIu32 ").",
pipelineIndex, rasterization_state_stream_ci->rasterizationStream,
phys_dev_ext_props.transform_feedback_props.maxTransformFeedbackStreams);
}
}
const auto rasterization_conservative_state_ci =
LvlFindInChain<VkPipelineRasterizationConservativeStateCreateInfoEXT>(create_info.pRasterizationState->pNext);
if (rasterization_conservative_state_ci) {
if (rasterization_conservative_state_ci->extraPrimitiveOverestimationSize < 0.0f ||
rasterization_conservative_state_ci->extraPrimitiveOverestimationSize >
phys_dev_ext_props.conservative_rasterization_props.maxExtraPrimitiveOverestimationSize) {
skip |= LogError(
device, "VUID-VkPipelineRasterizationConservativeStateCreateInfoEXT-extraPrimitiveOverestimationSize-01769",
"pCreateInfos[%" PRIu32
"].pRasterizationState pNext chain includes VkPipelineRasterizationConservativeStateCreateInfoEXT with "
"extraPrimitiveOverestimationSize (%f), which is not between 0.0 and "
"VkPipelineRasterizationConservativeStateCreateInfoEXT::maxExtraPrimitiveOverestimationSize (%f).",
pipelineIndex, rasterization_conservative_state_ci->extraPrimitiveOverestimationSize,
phys_dev_ext_props.conservative_rasterization_props.maxExtraPrimitiveOverestimationSize);
}
}
}
if ((pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT) && !create_info.pVertexInputState &&
!IsDynamic(pPipeline, VK_DYNAMIC_STATE_VERTEX_INPUT_EXT)) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pStages-02097",
"Invalid Pipeline CreateInfo[%" PRIu32 "] State: Missing pVertexInputState.", pipelineIndex);
}
auto vi = create_info.pVertexInputState;
if (vi != NULL) {
for (uint32_t j = 0; j < vi->vertexAttributeDescriptionCount; j++) {
VkFormat format = vi->pVertexAttributeDescriptions[j].format;
// Internal call to get format info. Still goes through layers, could potentially go directly to ICD.
VkFormatProperties properties;
DispatchGetPhysicalDeviceFormatProperties(physical_device, format, &properties);
if ((properties.bufferFeatures & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) == 0) {
skip |= LogError(device, "VUID-VkVertexInputAttributeDescription-format-00623",
"vkCreateGraphicsPipelines: pCreateInfo[%" PRIu32
"].pVertexInputState->vertexAttributeDescriptions[%d].format "
"(%s) is not a supported vertex buffer format.",
pipelineIndex, j, string_VkFormat(format));
}
}
}
if (subpass_desc && create_info.pMultisampleState) {
const safe_VkPipelineMultisampleStateCreateInfo *multisample_state = create_info.pMultisampleState;
auto accum_color_samples = [subpass_desc, pPipeline](uint32_t &samples) {
for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; i++) {
const auto attachment = subpass_desc->pColorAttachments[i].attachment;
if (attachment != VK_ATTACHMENT_UNUSED) {
samples |= static_cast<uint32_t>(pPipeline->rp_state->createInfo.pAttachments[attachment].samples);
}
}
};
if (!(IsExtEnabled(device_extensions.vk_amd_mixed_attachment_samples) ||
IsExtEnabled(device_extensions.vk_nv_framebuffer_mixed_samples))) {
uint32_t raster_samples = static_cast<uint32_t>(GetNumSamples(pPipeline));
uint32_t subpass_num_samples = 0;
accum_color_samples(subpass_num_samples);
if (subpass_desc->pDepthStencilAttachment &&
subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
const auto attachment = subpass_desc->pDepthStencilAttachment->attachment;
subpass_num_samples |= static_cast<uint32_t>(pPipeline->rp_state->createInfo.pAttachments[attachment].samples);
}
// subpass_num_samples is 0 when the subpass has no attachments or if all attachments are VK_ATTACHMENT_UNUSED.
// Only validate the value of subpass_num_samples if the subpass has attachments that are not VK_ATTACHMENT_UNUSED.
if (subpass_num_samples && (!IsPowerOfTwo(subpass_num_samples) || (subpass_num_samples != raster_samples))) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-subpass-00757",
"vkCreateGraphicsPipelines: pCreateInfo[%" PRIu32
"].pMultisampleState->rasterizationSamples (%u) "
"does not match the number of samples of the RenderPass color and/or depth attachment.",
pipelineIndex, raster_samples);
}
}
if (IsExtEnabled(device_extensions.vk_amd_mixed_attachment_samples)) {
VkSampleCountFlagBits max_sample_count = static_cast<VkSampleCountFlagBits>(0);
for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; ++i) {
if (subpass_desc->pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) {
max_sample_count = std::max(
max_sample_count,
pPipeline->rp_state->createInfo.pAttachments[subpass_desc->pColorAttachments[i].attachment].samples);
}
}
if (subpass_desc->pDepthStencilAttachment &&
subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
max_sample_count = std::max(
max_sample_count,
pPipeline->rp_state->createInfo.pAttachments[subpass_desc->pDepthStencilAttachment->attachment].samples);
}
if ((create_info.pRasterizationState->rasterizerDiscardEnable == VK_FALSE) &&
(max_sample_count != static_cast<VkSampleCountFlagBits>(0)) &&
(multisample_state->rasterizationSamples != max_sample_count)) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-subpass-01505",
"vkCreateGraphicsPipelines: pCreateInfo[%" PRIu32
"].pMultisampleState->rasterizationSamples (%s) != max "
"attachment samples (%s) used in subpass %u.",
pipelineIndex, string_VkSampleCountFlagBits(multisample_state->rasterizationSamples),
string_VkSampleCountFlagBits(max_sample_count), create_info.subpass);
}
}
if (IsExtEnabled(device_extensions.vk_nv_framebuffer_mixed_samples)) {
uint32_t raster_samples = static_cast<uint32_t>(GetNumSamples(pPipeline));
uint32_t subpass_color_samples = 0;
accum_color_samples(subpass_color_samples);
if (subpass_desc->pDepthStencilAttachment &&
subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
const auto attachment = subpass_desc->pDepthStencilAttachment->attachment;
const uint32_t subpass_depth_samples =
static_cast<uint32_t>(pPipeline->rp_state->createInfo.pAttachments[attachment].samples);
if (create_info.pDepthStencilState) {
const bool ds_test_enabled = (create_info.pDepthStencilState->depthTestEnable == VK_TRUE) ||
(create_info.pDepthStencilState->depthBoundsTestEnable == VK_TRUE) ||
(create_info.pDepthStencilState->stencilTestEnable == VK_TRUE);
if (ds_test_enabled && (!IsPowerOfTwo(subpass_depth_samples) || (raster_samples != subpass_depth_samples))) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-subpass-01411",
"vkCreateGraphicsPipelines: pCreateInfo[%" PRIu32
"].pMultisampleState->rasterizationSamples (%u) "
"does not match the number of samples of the RenderPass depth attachment (%u).",
pipelineIndex, raster_samples, subpass_depth_samples);
}
}
}
if (IsPowerOfTwo(subpass_color_samples)) {
if (raster_samples < subpass_color_samples) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-subpass-01412",
"vkCreateGraphicsPipelines: pCreateInfo[%" PRIu32
"].pMultisampleState->rasterizationSamples (%u) "
"is not greater or equal to the number of samples of the RenderPass color attachment (%u).",
pipelineIndex, raster_samples, subpass_color_samples);
}
if (multisample_state) {
if ((raster_samples > subpass_color_samples) && (multisample_state->sampleShadingEnable == VK_TRUE)) {
skip |= LogError(device, "VUID-VkPipelineMultisampleStateCreateInfo-rasterizationSamples-01415",
"vkCreateGraphicsPipelines: pCreateInfo[%" PRIu32
"].pMultisampleState->sampleShadingEnable must be "
"VK_FALSE when "
"pCreateInfo[%" PRIu32
"].pMultisampleState->rasterizationSamples (%u) is greater than the number of "
"samples of the "
"subpass color attachment (%u).",
pipelineIndex, pipelineIndex, raster_samples, subpass_color_samples);
}
const auto *coverage_modulation_state =
LvlFindInChain<VkPipelineCoverageModulationStateCreateInfoNV>(multisample_state->pNext);
if (coverage_modulation_state && (coverage_modulation_state->coverageModulationTableEnable == VK_TRUE)) {
if (coverage_modulation_state->coverageModulationTableCount != (raster_samples / subpass_color_samples)) {
skip |= LogError(
device, "VUID-VkPipelineCoverageModulationStateCreateInfoNV-coverageModulationTableEnable-01405",
"vkCreateGraphicsPipelines: pCreateInfos[%" PRIu32
"] VkPipelineCoverageModulationStateCreateInfoNV "
"coverageModulationTableCount of %u is invalid.",
pipelineIndex, coverage_modulation_state->coverageModulationTableCount);
}
}
}
}
}
if (IsExtEnabled(device_extensions.vk_nv_coverage_reduction_mode)) {
uint32_t raster_samples = static_cast<uint32_t>(GetNumSamples(pPipeline));
uint32_t subpass_color_samples = 0;
uint32_t subpass_depth_samples = 0;
accum_color_samples(subpass_color_samples);
if (subpass_desc->pDepthStencilAttachment &&
subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
const auto attachment = subpass_desc->pDepthStencilAttachment->attachment;
subpass_depth_samples = static_cast<uint32_t>(pPipeline->rp_state->createInfo.pAttachments[attachment].samples);
}
if (multisample_state && IsPowerOfTwo(subpass_color_samples) &&
(subpass_depth_samples == 0 || IsPowerOfTwo(subpass_depth_samples))) {
const auto *coverage_reduction_state =
LvlFindInChain<VkPipelineCoverageReductionStateCreateInfoNV>(multisample_state->pNext);
if (coverage_reduction_state) {
const VkCoverageReductionModeNV coverage_reduction_mode = coverage_reduction_state->coverageReductionMode;
uint32_t combination_count = 0;
std::vector<VkFramebufferMixedSamplesCombinationNV> combinations;
DispatchGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV(physical_device, &combination_count,
nullptr);
combinations.resize(combination_count);
DispatchGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV(physical_device, &combination_count,
&combinations[0]);
bool combination_found = false;
for (const auto &combination : combinations) {
if (coverage_reduction_mode == combination.coverageReductionMode &&
raster_samples == combination.rasterizationSamples &&
subpass_depth_samples == combination.depthStencilSamples &&
subpass_color_samples == combination.colorSamples) {
combination_found = true;
break;
}
}
if (!combination_found) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-coverageReductionMode-02722",
"vkCreateGraphicsPipelines: pCreateInfos[%" PRIu32
"] the specified combination of coverage "
"reduction mode (%s), pMultisampleState->rasterizationSamples (%u), sample counts for "
"the subpass color and depth/stencil attachments is not a valid combination returned by "
"vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV.",
pipelineIndex, string_VkCoverageReductionModeNV(coverage_reduction_mode), raster_samples);
}
}
}
}
if (IsExtEnabled(device_extensions.vk_nv_fragment_coverage_to_color)) {
const auto coverage_to_color_state = LvlFindInChain<VkPipelineCoverageToColorStateCreateInfoNV>(multisample_state);
if (coverage_to_color_state && coverage_to_color_state->coverageToColorEnable == VK_TRUE) {
bool attachment_is_valid = false;
std::string error_detail;
if (coverage_to_color_state->coverageToColorLocation < subpass_desc->colorAttachmentCount) {
const auto& color_attachment_ref =
subpass_desc->pColorAttachments[coverage_to_color_state->coverageToColorLocation];
if (color_attachment_ref.attachment != VK_ATTACHMENT_UNUSED) {
const auto& color_attachment = pPipeline->rp_state->createInfo.pAttachments[color_attachment_ref.attachment];
switch (color_attachment.format) {
case VK_FORMAT_R8_UINT:
case VK_FORMAT_R8_SINT:
case VK_FORMAT_R16_UINT:
case VK_FORMAT_R16_SINT:
case VK_FORMAT_R32_UINT:
case VK_FORMAT_R32_SINT:
attachment_is_valid = true;
break;
default:
std::ostringstream str;
str << "references an attachment with an invalid format ("
<< string_VkFormat(color_attachment.format) << ").";
error_detail = str.str();
break;
}
} else {
std::ostringstream str;
str << "references an invalid attachment. The subpass pColorAttachments["
<< coverage_to_color_state->coverageToColorLocation
<< "].attachment has the value VK_ATTACHMENT_UNUSED.";
error_detail = str.str();
}
} else {
std::ostringstream str;
str << "references an non-existing attachment since the subpass colorAttachmentCount is "
<< subpass_desc->colorAttachmentCount << ".";
error_detail = str.str();
}
if (!attachment_is_valid) {
skip |= LogError(device, "VUID-VkPipelineCoverageToColorStateCreateInfoNV-coverageToColorEnable-01404",
"vkCreateGraphicsPipelines: pCreateInfos[%" PRId32
"].pMultisampleState VkPipelineCoverageToColorStateCreateInfoNV "
"coverageToColorLocation = %" PRIu32 " %s",
pipelineIndex, coverage_to_color_state->coverageToColorLocation, error_detail.c_str());
}
}
}
if (IsExtEnabled(device_extensions.vk_ext_sample_locations)) {
const VkPipelineSampleLocationsStateCreateInfoEXT *sample_location_state =
LvlFindInChain<VkPipelineSampleLocationsStateCreateInfoEXT>(multisample_state->pNext);
if (sample_location_state != nullptr) {
if ((sample_location_state->sampleLocationsEnable == VK_TRUE) &&
(IsDynamic(pPipeline, VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT) == false)) {
const VkSampleLocationsInfoEXT sample_location_info = sample_location_state->sampleLocationsInfo;
skip |= ValidateSampleLocationsInfo(&sample_location_info, "vkCreateGraphicsPipelines");
const VkExtent2D grid_size = sample_location_info.sampleLocationGridSize;
auto multisample_prop = LvlInitStruct<VkMultisamplePropertiesEXT>();
DispatchGetPhysicalDeviceMultisamplePropertiesEXT(physical_device, multisample_state->rasterizationSamples,
&multisample_prop);
const VkExtent2D max_grid_size = multisample_prop.maxSampleLocationGridSize;
// Note order or "divide" in "sampleLocationsInfo must evenly divide VkMultisamplePropertiesEXT"
if (SafeModulo(max_grid_size.width, grid_size.width) != 0) {
skip |= LogError(
device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-01521",
"vkCreateGraphicsPipelines() pCreateInfo[%" PRIu32
"]: Because there is no dynamic state for Sample Location "
"and sampleLocationEnable is true, the "
"VkPipelineSampleLocationsStateCreateInfoEXT::sampleLocationsInfo::sampleLocationGridSize.width (%u) "
"must be evenly divided by VkMultisamplePropertiesEXT::sampleLocationGridSize.width (%u).",
pipelineIndex, grid_size.width, max_grid_size.width);
}
if (SafeModulo(max_grid_size.height, grid_size.height) != 0) {
skip |= LogError(
device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-01522",
"vkCreateGraphicsPipelines() pCreateInfo[%" PRIu32
"]: Because there is no dynamic state for Sample Location "
"and sampleLocationEnable is true, the "
"VkPipelineSampleLocationsStateCreateInfoEXT::sampleLocationsInfo::sampleLocationGridSize.height (%u) "
"must be evenly divided by VkMultisamplePropertiesEXT::sampleLocationGridSize.height (%u).",
pipelineIndex, grid_size.height, max_grid_size.height);
}
if (sample_location_info.sampleLocationsPerPixel != multisample_state->rasterizationSamples) {
skip |= LogError(
device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-01523",
"vkCreateGraphicsPipelines() pCreateInfo[%" PRIu32
"]: Because there is no dynamic state for Sample Location "
"and sampleLocationEnable is true, the "
"VkPipelineSampleLocationsStateCreateInfoEXT::sampleLocationsInfo::sampleLocationsPerPixel (%s) must "
"be the same as the VkPipelineMultisampleStateCreateInfo::rasterizationSamples (%s).",
pipelineIndex, string_VkSampleCountFlagBits(sample_location_info.sampleLocationsPerPixel),
string_VkSampleCountFlagBits(multisample_state->rasterizationSamples));
}
}
}
}
if (IsExtEnabled(device_extensions.vk_qcom_render_pass_shader_resolve)) {
uint32_t raster_samples = static_cast<uint32_t>(GetNumSamples(pPipeline));
uint32_t subpass_input_attachment_samples = 0;
for (uint32_t i = 0; i < subpass_desc->inputAttachmentCount; i++) {
const auto attachment = subpass_desc->pInputAttachments[i].attachment;
if (attachment != VK_ATTACHMENT_UNUSED) {
subpass_input_attachment_samples |=
static_cast<uint32_t>(pPipeline->rp_state->createInfo.pAttachments[attachment].samples);
}
}
if ((subpass_desc->flags & VK_SUBPASS_DESCRIPTION_FRAGMENT_REGION_BIT_QCOM) != 0) {
if (raster_samples != subpass_input_attachment_samples) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-rasterizationSamples-04899",
"vkCreateGraphicsPipelines() pCreateInfo[%" PRIu32
"]: The subpass includes "
"VK_SUBPASS_DESCRIPTION_FRAGMENT_REGION_BIT_QCOM "
"but the input attachment VkSampleCountFlagBits (%u) does not match the "
"VkPipelineMultisampleStateCreateInfo::rasterizationSamples (%u) VkSampleCountFlagBits.",
pipelineIndex, subpass_input_attachment_samples, multisample_state->rasterizationSamples);
}
if (multisample_state->sampleShadingEnable == VK_TRUE) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-sampleShadingEnable-04900",
"vkCreateGraphicsPipelines() pCreateInfo[%" PRIu32
"]: The subpass includes "
"VK_SUBPASS_DESCRIPTION_FRAGMENT_REGION_BIT_QCOM "
"which requires sample shading is disabled, but "
"VkPipelineMultisampleStateCreateInfo::sampleShadingEnable is true. ",
pipelineIndex);
}
}
}
}
skip |= ValidatePipelineCacheControlFlags(create_info.flags, pipelineIndex, "vkCreateGraphicsPipelines",
"VUID-VkGraphicsPipelineCreateInfo-pipelineCreationCacheControl-02878");
// VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-03378
if (!enabled_features.extended_dynamic_state_features.extendedDynamicState &&
(IsDynamic(pPipeline, VK_DYNAMIC_STATE_CULL_MODE_EXT) || IsDynamic(pPipeline, VK_DYNAMIC_STATE_FRONT_FACE_EXT) ||
IsDynamic(pPipeline, VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY_EXT) ||
IsDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT) ||
IsDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT) ||
IsDynamic(pPipeline, VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT) ||
IsDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE_EXT) ||
IsDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE_EXT) ||
IsDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_COMPARE_OP_EXT) ||
IsDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_BOUNDS_TEST_ENABLE_EXT) ||
IsDynamic(pPipeline, VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE_EXT) || IsDynamic(pPipeline, VK_DYNAMIC_STATE_STENCIL_OP_EXT))) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-03378",
"vkCreateGraphicsPipelines() pCreateInfos[%" PRIu32
"]: Extended dynamic state used by the extendedDynamicState "
"feature is not enabled",
pipelineIndex);
}
// VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-04868
if (!enabled_features.extended_dynamic_state2_features.extendedDynamicState2 &&
(IsDynamic(pPipeline, VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE_EXT) ||
IsDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_BIAS_ENABLE_EXT) ||
IsDynamic(pPipeline, VK_DYNAMIC_STATE_PRIMITIVE_RESTART_ENABLE_EXT))) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-04868",
"vkCreateGraphicsPipelines() pCreateInfos[%" PRIu32
"]: Extended dynamic state used by the extendedDynamicState2 "
"feature is not enabled",
pipelineIndex);
}
// VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-04869
if (!enabled_features.extended_dynamic_state2_features.extendedDynamicState2LogicOp &&
IsDynamic(pPipeline, VK_DYNAMIC_STATE_LOGIC_OP_EXT)) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-04869",
"vkCreateGraphicsPipelines() pCreateInfos[%" PRIu32
"]: Extended dynamic state used by the "
"extendedDynamicState2LogicOp feature is not enabled",
pipelineIndex);
}
// VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-04870
if (!enabled_features.extended_dynamic_state2_features.extendedDynamicState2PatchControlPoints &&
IsDynamic(pPipeline, VK_DYNAMIC_STATE_PATCH_CONTROL_POINTS_EXT)) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-04870",
"vkCreateGraphicsPipelines() pCreateInfos[%" PRIu32
"]: Extended dynamic state used by the "
"extendedDynamicState2PatchControlPoints "
"feature is not enabled",
pipelineIndex);
}
const VkPipelineFragmentShadingRateStateCreateInfoKHR *fragment_shading_rate_state =
LvlFindInChain<VkPipelineFragmentShadingRateStateCreateInfoKHR>(create_info.pNext);
if (fragment_shading_rate_state && !IsDynamic(pPipeline, VK_DYNAMIC_STATE_FRAGMENT_SHADING_RATE_KHR)) {
const char *struct_name = "VkPipelineFragmentShadingRateStateCreateInfoKHR";
if (fragment_shading_rate_state->fragmentSize.width == 0) {
skip |=
LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04494",
"vkCreateGraphicsPipelines() pCreateInfos[%" PRIu32 "]: Fragment width of %u has been specified in %s.",
pipelineIndex, fragment_shading_rate_state->fragmentSize.width, struct_name);
}
if (fragment_shading_rate_state->fragmentSize.height == 0) {
skip |=
LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04495",
"vkCreateGraphicsPipelines() pCreateInfos[%" PRIu32 "]: Fragment height of %u has been specified in %s.",
pipelineIndex, fragment_shading_rate_state->fragmentSize.height, struct_name);
}
if (fragment_shading_rate_state->fragmentSize.width != 0 &&
!IsPowerOfTwo(fragment_shading_rate_state->fragmentSize.width)) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04496",
"vkCreateGraphicsPipelines() pCreateInfos[%" PRIu32
"]: Non-power-of-two fragment width of %u has been specified in %s.",
pipelineIndex, fragment_shading_rate_state->fragmentSize.width, struct_name);
}
if (fragment_shading_rate_state->fragmentSize.height != 0 &&
!IsPowerOfTwo(fragment_shading_rate_state->fragmentSize.height)) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04497",
"vkCreateGraphicsPipelines() pCreateInfos[%" PRIu32
"]: Non-power-of-two fragment height of %u has been specified in %s.",
pipelineIndex, fragment_shading_rate_state->fragmentSize.height, struct_name);
}
if (fragment_shading_rate_state->fragmentSize.width > 4) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04498",
"vkCreateGraphicsPipelines() pCreateInfos[%" PRIu32
"]: Fragment width of %u specified in %s is too large.",
pipelineIndex, fragment_shading_rate_state->fragmentSize.width, struct_name);
}
if (fragment_shading_rate_state->fragmentSize.height > 4) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04499",
"vkCreateGraphicsPipelines() pCreateInfos[%" PRIu32
"]: Fragment height of %u specified in %s is too large",
pipelineIndex, fragment_shading_rate_state->fragmentSize.height, struct_name);
}
if (!enabled_features.fragment_shading_rate_features.pipelineFragmentShadingRate &&
fragment_shading_rate_state->fragmentSize.width != 1) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04500",
"vkCreateGraphicsPipelines() pCreateInfos[%" PRIu32
"]: Pipeline fragment width of %u has been specified in %s, but "
"pipelineFragmentShadingRate is not enabled",
pipelineIndex, fragment_shading_rate_state->fragmentSize.width, struct_name);
}
if (!enabled_features.fragment_shading_rate_features.pipelineFragmentShadingRate &&
fragment_shading_rate_state->fragmentSize.height != 1) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04500",
"vkCreateGraphicsPipelines() pCreateInfos[%" PRIu32
"]: Pipeline fragment height of %u has been specified in %s, but "
"pipelineFragmentShadingRate is not enabled",
pipelineIndex, fragment_shading_rate_state->fragmentSize.height, struct_name);
}
if (!enabled_features.fragment_shading_rate_features.primitiveFragmentShadingRate &&
fragment_shading_rate_state->combinerOps[0] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04501",
"vkCreateGraphicsPipelines() pCreateInfos[%" PRIu32
"]: First combiner operation of %s has been specified in %s, but "
"primitiveFragmentShadingRate is not enabled",
pipelineIndex, string_VkFragmentShadingRateCombinerOpKHR(fragment_shading_rate_state->combinerOps[0]),
struct_name);
}
if (!enabled_features.fragment_shading_rate_features.attachmentFragmentShadingRate &&
fragment_shading_rate_state->combinerOps[1] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04502",
"vkCreateGraphicsPipelines() pCreateInfos[%" PRIu32
"]: Second combiner operation of %s has been specified in %s, but "
"attachmentFragmentShadingRate is not enabled",
pipelineIndex, string_VkFragmentShadingRateCombinerOpKHR(fragment_shading_rate_state->combinerOps[1]),
struct_name);
}
if (!phys_dev_ext_props.fragment_shading_rate_props.fragmentShadingRateNonTrivialCombinerOps &&
(fragment_shading_rate_state->combinerOps[0] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR &&
fragment_shading_rate_state->combinerOps[0] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_REPLACE_KHR)) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-fragmentShadingRateNonTrivialCombinerOps-04506",
"vkCreateGraphicsPipelines() pCreateInfos[%" PRIu32
"]: First combiner operation of %s has been specified in %s, but "
"fragmentShadingRateNonTrivialCombinerOps is not supported",
pipelineIndex, string_VkFragmentShadingRateCombinerOpKHR(fragment_shading_rate_state->combinerOps[0]),
struct_name);
}
if (!phys_dev_ext_props.fragment_shading_rate_props.fragmentShadingRateNonTrivialCombinerOps &&
(fragment_shading_rate_state->combinerOps[1] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR &&
fragment_shading_rate_state->combinerOps[1] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_REPLACE_KHR)) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-fragmentShadingRateNonTrivialCombinerOps-04506",
"vkCreateGraphicsPipelines() pCreateInfos[%" PRIu32
"]: Second combiner operation of %s has been specified in %s, but "
"fragmentShadingRateNonTrivialCombinerOps is not supported",
pipelineIndex, string_VkFragmentShadingRateCombinerOpKHR(fragment_shading_rate_state->combinerOps[1]),
struct_name);
}
}
const auto *discard_rectangle_state = LvlFindInChain<VkPipelineDiscardRectangleStateCreateInfoEXT>(create_info.pNext);
if (discard_rectangle_state) {
if (discard_rectangle_state->discardRectangleCount > phys_dev_ext_props.discard_rectangle_props.maxDiscardRectangles) {
skip |= LogError(
device, "VUID-VkPipelineDiscardRectangleStateCreateInfoEXT-discardRectangleCount-00582",
"vkCreateGraphicsPipelines(): VkPipelineDiscardRectangleStateCreateInfoEXT::discardRectangleCount (%" PRIu32
") in pNext chain of pCreateInfo[%" PRIu32
"] is not less than VkPhysicalDeviceDiscardRectanglePropertiesEXT::maxDiscardRectangles (%" PRIu32 ".",
discard_rectangle_state->discardRectangleCount, pipelineIndex,
phys_dev_ext_props.discard_rectangle_props.maxDiscardRectangles);
}
}
// VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-04807
if (!enabled_features.vertex_input_dynamic_state_features.vertexInputDynamicState &&
IsDynamic(pPipeline, VK_DYNAMIC_STATE_VERTEX_INPUT_EXT)) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-04807",
"vkCreateGraphicsPipelines() pCreateInfos[%" PRIu32
"]: The vertexInputDynamicState feature must be enabled to use "
"the VK_DYNAMIC_STATE_VERTEX_INPUT_EXT dynamic state",
pipelineIndex);
}
if (!enabled_features.color_write_features.colorWriteEnable && IsDynamic(pPipeline, VK_DYNAMIC_STATE_COLOR_WRITE_ENABLE_EXT)) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-04800",
"vkCreateGraphicsPipelines() pCreateInfos[%" PRIu32
"]: The colorWriteEnable feature must be enabled to use the "
"VK_DYNAMIC_STATE_COLOR_WRITE_ENABLE_EXT dynamic state",
pipelineIndex);
}
return skip;
}
// Block of code at start here specifically for managing/tracking DSs
// Validate that given set is valid and that it's not being used by an in-flight CmdBuffer
// func_str is the name of the calling function
// Return false if no errors occur
// Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain)
bool CoreChecks::ValidateIdleDescriptorSet(VkDescriptorSet set, const char *func_str) const {
if (disabled[object_in_use]) return false;
bool skip = false;
auto set_node = setMap.find(set);
if (set_node != setMap.end()) {
// TODO : This covers various error cases so should pass error enum into this function and use passed in enum here
if (set_node->second->InUse()) {
skip |= LogError(set, "VUID-vkFreeDescriptorSets-pDescriptorSets-00309",
"Cannot call %s() on %s that is in use by a command buffer.", func_str,
report_data->FormatHandle(set).c_str());
}
}
return skip;
}
// If a renderpass is active, verify that the given command type is appropriate for current subpass state
bool CoreChecks::ValidateCmdSubpassState(const CMD_BUFFER_STATE *pCB, const CMD_TYPE cmd_type) const {
if (!pCB->activeRenderPass) return false;
bool skip = false;
if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS &&
(cmd_type != CMD_EXECUTECOMMANDS && cmd_type != CMD_NEXTSUBPASS && cmd_type != CMD_ENDRENDERPASS &&
cmd_type != CMD_NEXTSUBPASS2 && cmd_type != CMD_NEXTSUBPASS2KHR && cmd_type != CMD_ENDRENDERPASS2 &&
cmd_type != CMD_ENDRENDERPASS2KHR)) {
skip |= LogError(pCB->commandBuffer(), kVUID_Core_DrawState_InvalidCommandBuffer,
"Commands cannot be called in a subpass using secondary command buffers.");
} else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) {
skip |= LogError(pCB->commandBuffer(), kVUID_Core_DrawState_InvalidCommandBuffer,
"vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
}
return skip;
}
bool CoreChecks::ValidateCmdQueueFlags(const CMD_BUFFER_STATE *cb_node, const char *caller_name, VkQueueFlags required_flags,
const char *error_code) const {
auto pool = cb_node->command_pool.get();
if (pool) {
const uint32_t queue_family_index = pool->queueFamilyIndex;
const VkQueueFlags queue_flags = GetPhysicalDeviceState()->queue_family_properties[queue_family_index].queueFlags;
if (!(required_flags & queue_flags)) {
string required_flags_string;
for (auto flag : {VK_QUEUE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT, VK_QUEUE_COMPUTE_BIT, VK_QUEUE_SPARSE_BINDING_BIT,
VK_QUEUE_PROTECTED_BIT}) {
if (flag & required_flags) {
if (required_flags_string.size()) {
required_flags_string += " or ";
}
required_flags_string += string_VkQueueFlagBits(flag);
}
}
return LogError(cb_node->commandBuffer(), error_code,
"%s(): Called in command buffer %s which was allocated from the command pool %s which was created with "
"queueFamilyIndex %u which doesn't contain the required %s capability flags.",
caller_name, report_data->FormatHandle(cb_node->commandBuffer()).c_str(),
report_data->FormatHandle(pool->commandPool()).c_str(), queue_family_index,
required_flags_string.c_str());
}
}
return false;
}
bool CoreChecks::ValidateSampleLocationsInfo(const VkSampleLocationsInfoEXT *pSampleLocationsInfo, const char *apiName) const {
bool skip = false;
const VkSampleCountFlagBits sample_count = pSampleLocationsInfo->sampleLocationsPerPixel;
const uint32_t sample_total_size = pSampleLocationsInfo->sampleLocationGridSize.width *
pSampleLocationsInfo->sampleLocationGridSize.height * SampleCountSize(sample_count);
if (pSampleLocationsInfo->sampleLocationsCount != sample_total_size) {
skip |= LogError(device, "VUID-VkSampleLocationsInfoEXT-sampleLocationsCount-01527",
"%s: VkSampleLocationsInfoEXT::sampleLocationsCount (%u) must equal grid width * grid height * pixel "
"sample rate which currently is (%u * %u * %u).",
apiName, pSampleLocationsInfo->sampleLocationsCount, pSampleLocationsInfo->sampleLocationGridSize.width,
pSampleLocationsInfo->sampleLocationGridSize.height, SampleCountSize(sample_count));
}
if ((phys_dev_ext_props.sample_locations_props.sampleLocationSampleCounts & sample_count) == 0) {
skip |= LogError(device, "VUID-VkSampleLocationsInfoEXT-sampleLocationsPerPixel-01526",
"%s: VkSampleLocationsInfoEXT::sampleLocationsPerPixel of %s is not supported by the device, please check "
"VkPhysicalDeviceSampleLocationsPropertiesEXT::sampleLocationSampleCounts for valid sample counts.",
apiName, string_VkSampleCountFlagBits(sample_count));
}
return skip;
}
bool CoreChecks::MatchSampleLocationsInfo(const VkSampleLocationsInfoEXT *pSampleLocationsInfo1,
const VkSampleLocationsInfoEXT *pSampleLocationsInfo2) const {
if (pSampleLocationsInfo1->sampleLocationsPerPixel != pSampleLocationsInfo2->sampleLocationsPerPixel ||
pSampleLocationsInfo1->sampleLocationGridSize.width != pSampleLocationsInfo2->sampleLocationGridSize.width ||
pSampleLocationsInfo1->sampleLocationGridSize.height != pSampleLocationsInfo2->sampleLocationGridSize.height ||
pSampleLocationsInfo1->sampleLocationsCount != pSampleLocationsInfo2->sampleLocationsCount) {
return false;
}
for (uint32_t i = 0; i < pSampleLocationsInfo1->sampleLocationsCount; ++i) {
if (pSampleLocationsInfo1->pSampleLocations[i].x != pSampleLocationsInfo2->pSampleLocations[i].x ||
pSampleLocationsInfo1->pSampleLocations[i].y != pSampleLocationsInfo2->pSampleLocations[i].y) {
return false;
}
}
return true;
}
static char const *GetCauseStr(VulkanTypedHandle obj) {
if (obj.type == kVulkanObjectTypeDescriptorSet) return "destroyed or updated";
if (obj.type == kVulkanObjectTypeCommandBuffer) return "destroyed or rerecorded";
return "destroyed";
}
bool CoreChecks::ReportInvalidCommandBuffer(const CMD_BUFFER_STATE *cb_state, const char *call_source) const {
bool skip = false;
for (const auto& entry: cb_state->broken_bindings) {
const auto& obj = entry.first;
const char *cause_str = GetCauseStr(obj);
string vuid;
std::ostringstream str;
str << kVUID_Core_DrawState_InvalidCommandBuffer << "-" << object_string[obj.type];
vuid = str.str();
auto objlist = entry.second; //intentional copy
objlist.add(cb_state->commandBuffer());
skip |=
LogError(objlist, vuid, "You are adding %s to %s that is invalid because bound %s was %s.", call_source,
report_data->FormatHandle(cb_state->commandBuffer()).c_str(), report_data->FormatHandle(obj).c_str(), cause_str);
}
return skip;
}
bool CoreChecks::ValidateIndirectCmd(VkCommandBuffer command_buffer, VkBuffer buffer, CMD_TYPE cmd_type) const {
bool skip = false;
const DrawDispatchVuid vuid = GetDrawDispatchVuid(cmd_type);
const char *caller_name = CommandTypeString(cmd_type);
const CMD_BUFFER_STATE *cb_state = GetCBState(command_buffer);
const BUFFER_STATE *buffer_state = GetBufferState(buffer);
if ((cb_state != nullptr) && (buffer_state != nullptr)) {
skip |= ValidateMemoryIsBoundToBuffer(buffer_state, caller_name, vuid.indirect_contiguous_memory);
skip |= ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT, true, vuid.indirect_buffer_bit,
caller_name, "VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT");
if (cb_state->unprotected == false) {
skip |= LogError(cb_state->commandBuffer(), vuid.indirect_protected_cb,
"%s: Indirect commands can't be used in protected command buffers.", caller_name);
}
}
return skip;
}
template <typename T1>
bool CoreChecks::ValidateDeviceMaskToPhysicalDeviceCount(uint32_t deviceMask, const T1 object, const char *VUID) const {
bool skip = false;
uint32_t count = 1 << physical_device_count;
if (count <= deviceMask) {
skip |= LogError(object, VUID, "deviceMask(0x%" PRIx32 ") is invalid. Physical device count is %" PRIu32 ".", deviceMask,
physical_device_count);
}
return skip;
}
template <typename T1>
bool CoreChecks::ValidateDeviceMaskToZero(uint32_t deviceMask, const T1 object, const char *VUID) const {
bool skip = false;
if (deviceMask == 0) {
skip |= LogError(object, VUID, "deviceMask(0x%" PRIx32 ") must be non-zero.", deviceMask);
}
return skip;
}
template <typename T1>
bool CoreChecks::ValidateDeviceMaskToCommandBuffer(const CMD_BUFFER_STATE *pCB, uint32_t deviceMask, const T1 object,
const char *VUID) const {
bool skip = false;
if ((deviceMask & pCB->initial_device_mask) != deviceMask) {
skip |= LogError(object, VUID, "deviceMask(0x%" PRIx32 ") is not a subset of %s initial device mask(0x%" PRIx32 ").",
deviceMask, report_data->FormatHandle(pCB->commandBuffer()).c_str(), pCB->initial_device_mask);
}
return skip;
}
bool CoreChecks::ValidateDeviceMaskToRenderPass(const CMD_BUFFER_STATE *pCB, uint32_t deviceMask, const char *VUID) const {
bool skip = false;
if ((deviceMask & pCB->active_render_pass_device_mask) != deviceMask) {
skip |= LogError(pCB->commandBuffer(), VUID, "deviceMask(0x%" PRIx32 ") is not a subset of %s device mask(0x%" PRIx32 ").",
deviceMask, report_data->FormatHandle(pCB->activeRenderPass->renderPass()).c_str(),
pCB->active_render_pass_device_mask);
}
return skip;
}
// Flags validation error if the associated call is made inside a render pass. The apiName routine should ONLY be called outside a
// render pass.
bool CoreChecks::InsideRenderPass(const CMD_BUFFER_STATE *pCB, const char *apiName, const char *msgCode) const {
bool inside = false;
if (pCB->activeRenderPass) {
inside = LogError(pCB->commandBuffer(), msgCode, "%s: It is invalid to issue this call inside an active %s.", apiName,
report_data->FormatHandle(pCB->activeRenderPass->renderPass()).c_str());
}
return inside;
}
// Flags validation error if the associated call is made outside a render pass. The apiName
// routine should ONLY be called inside a render pass.
bool CoreChecks::OutsideRenderPass(const CMD_BUFFER_STATE *pCB, const char *apiName, const char *msgCode) const {
bool outside = false;
if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
!(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) {
outside = LogError(pCB->commandBuffer(), msgCode, "%s: This call must be issued inside an active render pass.", apiName);
}
return outside;
}
bool CoreChecks::ValidateQueueFamilyIndex(const PHYSICAL_DEVICE_STATE *pd_state, uint32_t requested_queue_family,
const char *err_code, const char *cmd_name, const char *queue_family_var_name) const {
bool skip = false;
if (requested_queue_family >= pd_state->queue_family_known_count) {
const char *conditional_ext_cmd =
instance_extensions.vk_khr_get_physical_device_properties2 ? " or vkGetPhysicalDeviceQueueFamilyProperties2[KHR]" : "";
skip |= LogError(pd_state->Handle(), err_code,
"%s: %s (= %" PRIu32
") is not less than any previously obtained pQueueFamilyPropertyCount from "
"vkGetPhysicalDeviceQueueFamilyProperties%s (i.e. is not less than %s).",
cmd_name, queue_family_var_name, requested_queue_family, conditional_ext_cmd,
std::to_string(pd_state->queue_family_known_count).c_str());
}
return skip;
}
// Verify VkDeviceQueueCreateInfos
bool CoreChecks::ValidateDeviceQueueCreateInfos(const PHYSICAL_DEVICE_STATE *pd_state, uint32_t info_count,
const VkDeviceQueueCreateInfo *infos) const {
bool skip = false;
const uint32_t not_used = std::numeric_limits<uint32_t>::max();
struct create_flags {
// uint32_t is to represent the queue family index to allow for better error messages
uint32_t unprocted_index;
uint32_t protected_index;
create_flags(uint32_t a, uint32_t b) : unprocted_index(a), protected_index(b) {}
};
layer_data::unordered_map<uint32_t, create_flags> queue_family_map;
for (uint32_t i = 0; i < info_count; ++i) {
const auto requested_queue_family = infos[i].queueFamilyIndex;
std::string queue_family_var_name = "pCreateInfo->pQueueCreateInfos[" + std::to_string(i) + "].queueFamilyIndex";
skip |= ValidateQueueFamilyIndex(pd_state, requested_queue_family, "VUID-VkDeviceQueueCreateInfo-queueFamilyIndex-00381",
"vkCreateDevice", queue_family_var_name.c_str());
if (api_version == VK_API_VERSION_1_0) {
// Vulkan 1.0 didn't have protected memory so always needed unique info
create_flags flags = {requested_queue_family, not_used};
if (queue_family_map.emplace(requested_queue_family, flags).second == false) {
skip |= LogError(pd_state->Handle(), "VUID-VkDeviceCreateInfo-queueFamilyIndex-00372",
"CreateDevice(): %s (=%" PRIu32
") is not unique and was also used in pCreateInfo->pQueueCreateInfos[%d].",
queue_family_var_name.c_str(), requested_queue_family,
queue_family_map.at(requested_queue_family).unprocted_index);
}
} else {
// Vulkan 1.1 and up can have 2 queues be same family index if one is protected and one isn't
auto it = queue_family_map.find(requested_queue_family);
if (it == queue_family_map.end()) {
// Add first time seeing queue family index and what the create flags were
create_flags new_flags = {not_used, not_used};
if ((infos[i].flags & VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT) != 0) {
new_flags.protected_index = requested_queue_family;
} else {
new_flags.unprocted_index = requested_queue_family;
}
queue_family_map.emplace(requested_queue_family, new_flags);
} else {
// The queue family was seen, so now need to make sure the flags were different
if ((infos[i].flags & VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT) != 0) {
if (it->second.protected_index != not_used) {
skip |= LogError(pd_state->Handle(), "VUID-VkDeviceCreateInfo-queueFamilyIndex-02802",
"CreateDevice(): %s (=%" PRIu32
") is not unique and was also used in pCreateInfo->pQueueCreateInfos[%d] which both have "
"VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT.",
queue_family_var_name.c_str(), requested_queue_family,
queue_family_map.at(requested_queue_family).protected_index);
} else {
it->second.protected_index = requested_queue_family;
}
} else {
if (it->second.unprocted_index != not_used) {
skip |= LogError(pd_state->Handle(), "VUID-VkDeviceCreateInfo-queueFamilyIndex-02802",
"CreateDevice(): %s (=%" PRIu32
") is not unique and was also used in pCreateInfo->pQueueCreateInfos[%d].",
queue_family_var_name.c_str(), requested_queue_family,
queue_family_map.at(requested_queue_family).unprocted_index);
} else {
it->second.unprocted_index = requested_queue_family;
}
}
}
}
// Verify that requested queue count of queue family is known to be valid at this point in time
if (requested_queue_family < pd_state->queue_family_known_count) {
const auto requested_queue_count = infos[i].queueCount;
const bool queue_family_has_props = requested_queue_family < pd_state->queue_family_properties.size();
// spec guarantees at least one queue for each queue family
const uint32_t available_queue_count =
queue_family_has_props ? pd_state->queue_family_properties[requested_queue_family].queueCount : 1;
const char *conditional_ext_cmd = instance_extensions.vk_khr_get_physical_device_properties2
? " or vkGetPhysicalDeviceQueueFamilyProperties2[KHR]"
: "";
if (requested_queue_count > available_queue_count) {
const std::string count_note =
queue_family_has_props
? "i.e. is not less than or equal to " +
std::to_string(pd_state->queue_family_properties[requested_queue_family].queueCount)
: "the pQueueFamilyProperties[" + std::to_string(requested_queue_family) + "] was never obtained";
skip |= LogError(
pd_state->Handle(), "VUID-VkDeviceQueueCreateInfo-queueCount-00382",
"vkCreateDevice: pCreateInfo->pQueueCreateInfos[%" PRIu32 "].queueCount (=%" PRIu32
") is not less than or equal to available queue count for this pCreateInfo->pQueueCreateInfos[%" PRIu32
"].queueFamilyIndex} (=%" PRIu32 ") obtained previously from vkGetPhysicalDeviceQueueFamilyProperties%s (%s).",
i, requested_queue_count, i, requested_queue_family, conditional_ext_cmd, count_note.c_str());
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) const {
bool skip = false;
auto pd_state = GetPhysicalDeviceState(gpu);
// TODO: object_tracker should perhaps do this instead
// and it does not seem to currently work anyway -- the loader just crashes before this point
if (!pd_state) {
skip |= LogError(device, kVUID_Core_DevLimit_MustQueryCount,
"Invalid call to vkCreateDevice() w/o first calling vkEnumeratePhysicalDevices().");
} else {
skip |= ValidateDeviceQueueCreateInfos(pd_state, pCreateInfo->queueCreateInfoCount, pCreateInfo->pQueueCreateInfos);
const VkPhysicalDeviceFragmentShadingRateFeaturesKHR *fragment_shading_rate_features =
LvlFindInChain<VkPhysicalDeviceFragmentShadingRateFeaturesKHR>(pCreateInfo->pNext);
if (fragment_shading_rate_features) {
const VkPhysicalDeviceShadingRateImageFeaturesNV *shading_rate_image_features =
LvlFindInChain<VkPhysicalDeviceShadingRateImageFeaturesNV>(pCreateInfo->pNext);
if (shading_rate_image_features && shading_rate_image_features->shadingRateImage) {
if (fragment_shading_rate_features->pipelineFragmentShadingRate) {
skip |= LogError(
pd_state->Handle(), "VUID-VkDeviceCreateInfo-shadingRateImage-04478",
"vkCreateDevice: Cannot enable shadingRateImage and pipelineFragmentShadingRate features simultaneously.");
}
if (fragment_shading_rate_features->primitiveFragmentShadingRate) {
skip |= LogError(
pd_state->Handle(), "VUID-VkDeviceCreateInfo-shadingRateImage-04479",
"vkCreateDevice: Cannot enable shadingRateImage and primitiveFragmentShadingRate features simultaneously.");
}
if (fragment_shading_rate_features->attachmentFragmentShadingRate) {
skip |= LogError(pd_state->Handle(), "VUID-VkDeviceCreateInfo-shadingRateImage-04480",
"vkCreateDevice: Cannot enable shadingRateImage and attachmentFragmentShadingRate features "
"simultaneously.");
}
}
const VkPhysicalDeviceFragmentDensityMapFeaturesEXT *fragment_density_map_features =
LvlFindInChain<VkPhysicalDeviceFragmentDensityMapFeaturesEXT>(pCreateInfo->pNext);
if (fragment_density_map_features && fragment_density_map_features->fragmentDensityMap) {
if (fragment_shading_rate_features->pipelineFragmentShadingRate) {
skip |= LogError(pd_state->Handle(), "VUID-VkDeviceCreateInfo-fragmentDensityMap-04481",
"vkCreateDevice: Cannot enable fragmentDensityMap and pipelineFragmentShadingRate features "
"simultaneously.");
}
if (fragment_shading_rate_features->primitiveFragmentShadingRate) {
skip |= LogError(pd_state->Handle(), "VUID-VkDeviceCreateInfo-fragmentDensityMap-04482",
"vkCreateDevice: Cannot enable fragmentDensityMap and primitiveFragmentShadingRate features "
"simultaneously.");
}
if (fragment_shading_rate_features->attachmentFragmentShadingRate) {
skip |= LogError(pd_state->Handle(), "VUID-VkDeviceCreateInfo-fragmentDensityMap-04483",
"vkCreateDevice: Cannot enable fragmentDensityMap and attachmentFragmentShadingRate features "
"simultaneously.");
}
}
}
const auto *shader_image_atomic_int64_features =
LvlFindInChain<VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT>(pCreateInfo->pNext);
if (shader_image_atomic_int64_features) {
if (shader_image_atomic_int64_features->sparseImageInt64Atomics &&
!shader_image_atomic_int64_features->shaderImageInt64Atomics) {
skip |= LogError(pd_state->Handle(), "VUID-VkDeviceCreateInfo-None-04896",
"vkCreateDevice: if shaderImageInt64Atomics feature is enabled then sparseImageInt64Atomics "
"feature must also be enabled.");
}
}
const auto *shader_atomic_float_features = LvlFindInChain<VkPhysicalDeviceShaderAtomicFloatFeaturesEXT>(pCreateInfo->pNext);
if (shader_atomic_float_features) {
if (shader_atomic_float_features->sparseImageFloat32Atomics &&
!shader_atomic_float_features->shaderImageFloat32Atomics) {
skip |= LogError(pd_state->Handle(), "VUID-VkDeviceCreateInfo-None-04897",
"vkCreateDevice: if sparseImageFloat32Atomics feature is enabled then shaderImageFloat32Atomics "
"feature must also be enabled.");
}
if (shader_atomic_float_features->sparseImageFloat32AtomicAdd &&
!shader_atomic_float_features->shaderImageFloat32AtomicAdd) {
skip |=
LogError(pd_state->Handle(), "VUID-VkDeviceCreateInfo-None-04898",
"vkCreateDevice: if sparseImageFloat32AtomicAdd feature is enabled then shaderImageFloat32AtomicAdd "
"feature must also be enabled.");
}
}
const auto *shader_atomic_float2_features =
LvlFindInChain<VkPhysicalDeviceShaderAtomicFloat2FeaturesEXT>(pCreateInfo->pNext);
if (shader_atomic_float2_features) {
if (shader_atomic_float2_features->sparseImageFloat32AtomicMinMax &&
!shader_atomic_float2_features->shaderImageFloat32AtomicMinMax) {
skip |= LogError(
pd_state->Handle(), "VUID-VkDeviceCreateInfo-sparseImageFloat32AtomicMinMax-04975",
"vkCreateDevice: if sparseImageFloat32AtomicMinMax feature is enabled then shaderImageFloat32AtomicMinMax "
"feature must also be enabled.");
}
}
const auto *device_group_ci = LvlFindInChain<VkDeviceGroupDeviceCreateInfo>(pCreateInfo->pNext);
if (device_group_ci) {
for (uint32_t i = 0; i < device_group_ci->physicalDeviceCount - 1; ++i) {
for (uint32_t j = i + 1; j < device_group_ci->physicalDeviceCount; ++j) {
if (device_group_ci->pPhysicalDevices[i] == device_group_ci->pPhysicalDevices[j]) {
skip |= LogError(pd_state->Handle(), "VUID-VkDeviceGroupDeviceCreateInfo-pPhysicalDevices-00375",
"vkCreateDevice: VkDeviceGroupDeviceCreateInfo has a duplicated physical device "
"in pPhysicalDevices [%" PRIu32 "] and [%" PRIu32 "].",
i, j);
}
}
}
}
}
return skip;
}
void CoreChecks::PostCallRecordCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkDevice *pDevice, VkResult result) {
// The state tracker sets up the device state
StateTracker::PostCallRecordCreateDevice(gpu, pCreateInfo, pAllocator, pDevice, result);
// Add the callback hooks for the functions that are either broadly or deeply used and that the ValidationStateTracker refactor
// would be messier without.
// TODO: Find a good way to do this hooklessly.
ValidationObject *device_object = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map);
ValidationObject *validation_data = GetValidationObject(device_object->object_dispatch, LayerObjectTypeCoreValidation);
CoreChecks *core_checks = static_cast<CoreChecks *>(validation_data);
core_checks->SetSetImageViewInitialLayoutCallback(
[](CMD_BUFFER_STATE *cb_node, const IMAGE_VIEW_STATE &iv_state, VkImageLayout layout) -> void {
cb_node->SetImageViewInitialLayout(iv_state, layout);
});
// Allocate shader validation cache
if (!disabled[shader_validation_caching] && !disabled[shader_validation] && !core_checks->core_validation_cache) {
std::string validation_cache_path;
auto tmp_path = GetEnvironment("TMPDIR");
if (!tmp_path.size()) tmp_path = GetEnvironment("TMP");
if (!tmp_path.size()) tmp_path = GetEnvironment("TEMP");
if (!tmp_path.size()) tmp_path = "//tmp";
core_checks->validation_cache_path = tmp_path + "//shader_validation_cache";
#if defined(__linux__) || defined(__FreeBSD__)
core_checks->validation_cache_path += "-" + std::to_string(getuid());
#endif
core_checks->validation_cache_path += ".bin";
std::vector<char> validation_cache_data;
std::ifstream read_file(core_checks->validation_cache_path.c_str(), std::ios::in | std::ios::binary);
if (read_file) {
std::copy(std::istreambuf_iterator<char>(read_file), {}, std::back_inserter(validation_cache_data));
read_file.close();
} else {
LogInfo(core_checks->device, "VUID-NONE",
"Cannot open shader validation cache at %s for reading (it may not exist yet)",
core_checks->validation_cache_path.c_str());
}
VkValidationCacheCreateInfoEXT cacheCreateInfo = {};
cacheCreateInfo.sType = VK_STRUCTURE_TYPE_VALIDATION_CACHE_CREATE_INFO_EXT;
cacheCreateInfo.pNext = NULL;
cacheCreateInfo.initialDataSize = validation_cache_data.size();
cacheCreateInfo.pInitialData = validation_cache_data.data();
cacheCreateInfo.flags = 0;
CoreLayerCreateValidationCacheEXT(*pDevice, &cacheCreateInfo, nullptr, &core_checks->core_validation_cache);
}
}
void CoreChecks::PreCallRecordDestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
if (!device) return;
imageLayoutMap.clear();
StateTracker::PreCallRecordDestroyDevice(device, pAllocator);
if (core_validation_cache) {
size_t validation_cache_size = 0;
void *validation_cache_data = nullptr;
CoreLayerGetValidationCacheDataEXT(device, core_validation_cache, &validation_cache_size, nullptr);
validation_cache_data = (char *)malloc(sizeof(char) * validation_cache_size);
if (!validation_cache_data) {
LogInfo(device, "VUID-NONE", "Validation Cache Memory Error");
return;
}
VkResult result =
CoreLayerGetValidationCacheDataEXT(device, core_validation_cache, &validation_cache_size, validation_cache_data);
if (result != VK_SUCCESS) {
LogInfo(device, "VUID-NONE", "Validation Cache Retrieval Error");
return;
}
FILE *write_file = fopen(validation_cache_path.c_str(), "wb");
if (write_file) {
fwrite(validation_cache_data, sizeof(char), validation_cache_size, write_file);
fclose(write_file);
} else {
LogInfo(device, "VUID-NONE", "Cannot open shader validation cache at %s for writing", validation_cache_path.c_str());
}
free(validation_cache_data);
CoreLayerDestroyValidationCacheEXT(device, core_validation_cache, NULL);
}
}
bool CoreChecks::ValidateStageMaskHost(const Location &loc, VkPipelineStageFlags2KHR stageMask) const {
bool skip = false;
if ((stageMask & VK_PIPELINE_STAGE_HOST_BIT) != 0) {
const auto &vuid = sync_vuid_maps::GetQueueSubmitVUID(loc, sync_vuid_maps::SubmitError::kHostStageMask);
skip |= LogError(
device, vuid,
"%s stage mask must not include VK_PIPELINE_STAGE_HOST_BIT as the stage can't be invoked inside a command buffer.",
loc.Message().c_str());
}
return skip;
}
// Note: This function assumes that the global lock is held by the calling thread.
// For the given queue, verify the queue state up to the given seq number.
// Currently the only check is to make sure that if there are events to be waited on prior to
// a QueryReset, make sure that all such events have been signalled.
bool CoreChecks::VerifyQueueStateToSeq(const QUEUE_STATE *initial_queue, uint64_t initial_seq) const {
bool skip = false;
// sequence number we want to validate up to, per queue
layer_data::unordered_map<const QUEUE_STATE *, uint64_t> target_seqs{{initial_queue, initial_seq}};
// sequence number we've completed validation for, per queue
layer_data::unordered_map<const QUEUE_STATE *, uint64_t> done_seqs;
std::vector<const QUEUE_STATE *> worklist{initial_queue};
while (worklist.size()) {
auto queue = worklist.back();
worklist.pop_back();
auto target_seq = target_seqs[queue];
auto seq = std::max(done_seqs[queue], queue->seq);
auto sub_it = queue->submissions.begin() + int(seq - queue->seq); // seq >= queue->seq
for (; seq < target_seq; ++sub_it, ++seq) {
for (auto &wait : sub_it->waitSemaphores) {
auto other_queue = GetQueueState(wait.queue);
if (other_queue == queue) continue; // semaphores /always/ point backwards, so no point here.
auto other_target_seq = std::max(target_seqs[other_queue], wait.seq);
auto other_done_seq = std::max(done_seqs[other_queue], other_queue->seq);
// if this wait is for another queue, and covers new sequence
// numbers beyond what we've already validated, mark the new
// target seq and (possibly-re)add the queue to the worklist.
if (other_done_seq < other_target_seq) {
target_seqs[other_queue] = other_target_seq;
worklist.push_back(other_queue);
}
}
}
// finally mark the point we've now validated this queue to.
done_seqs[queue] = seq;
}
return skip;
}
// When the given fence is retired, verify outstanding queue operations through the point of the fence
bool CoreChecks::VerifyQueueStateToFence(VkFence fence) const {
auto fence_state = GetFenceState(fence);
if (fence_state && fence_state->scope == kSyncScopeInternal && VK_NULL_HANDLE != fence_state->signaler.first) {
return VerifyQueueStateToSeq(GetQueueState(fence_state->signaler.first), fence_state->signaler.second);
}
return false;
}
bool CoreChecks::ValidateCommandBufferSimultaneousUse(const Location &loc, const CMD_BUFFER_STATE *pCB,
int current_submit_count) const {
using sync_vuid_maps::GetQueueSubmitVUID;
using sync_vuid_maps::SubmitError;
bool skip = false;
if ((pCB->InUse() || current_submit_count > 1) &&
!(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
const auto &vuid = sync_vuid_maps::GetQueueSubmitVUID(loc, SubmitError::kCmdNotSimultaneous);
skip |= LogError(device, vuid, "%s %s is already in use and is not marked for simultaneous use.", loc.Message().c_str(),
report_data->FormatHandle(pCB->commandBuffer()).c_str());
}
return skip;
}
bool CoreChecks::ValidateCommandBufferState(const CMD_BUFFER_STATE *cb_state, const char *call_source, int current_submit_count,
const char *vu_id) const {
bool skip = false;
if (disabled[command_buffer_state]) return skip;
// Validate ONE_TIME_SUBMIT_BIT CB is not being submitted more than once
if ((cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) &&
(cb_state->submitCount + current_submit_count > 1)) {
skip |= LogError(cb_state->commandBuffer(), kVUID_Core_DrawState_CommandBufferSingleSubmitViolation,
"%s was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT set, but has been submitted 0x%" PRIxLEAST64
"times.",
report_data->FormatHandle(cb_state->commandBuffer()).c_str(), cb_state->submitCount + current_submit_count);
}
// Validate that cmd buffers have been updated
switch (cb_state->state) {
case CB_INVALID_INCOMPLETE:
case CB_INVALID_COMPLETE:
skip |= ReportInvalidCommandBuffer(cb_state, call_source);
break;
case CB_NEW:
skip |= LogError(cb_state->commandBuffer(), vu_id, "%s used in the call to %s is unrecorded and contains no commands.",
report_data->FormatHandle(cb_state->commandBuffer()).c_str(), call_source);
break;
case CB_RECORDING:
skip |= LogError(cb_state->commandBuffer(), kVUID_Core_DrawState_NoEndCommandBuffer,
"You must call vkEndCommandBuffer() on %s before this call to %s!",
report_data->FormatHandle(cb_state->commandBuffer()).c_str(), call_source);
break;
default: /* recorded */
break;
}
return skip;
}
// Check that the queue family index of 'queue' matches one of the entries in pQueueFamilyIndices
bool CoreChecks::ValidImageBufferQueue(const CMD_BUFFER_STATE *cb_node, const VulkanTypedHandle &object, uint32_t queueFamilyIndex,
uint32_t count, const uint32_t *indices) const {
bool found = false;
bool skip = false;
for (uint32_t i = 0; i < count; i++) {
if (indices[i] == queueFamilyIndex) {
found = true;
break;
}
}
if (!found) {
LogObjectList objlist(cb_node->commandBuffer());
objlist.add(object);
skip = LogError(objlist, "VUID-vkQueueSubmit-pSubmits-04626",
"vkQueueSubmit: %s contains %s which was not created allowing concurrent access to "
"this queue family %d.",
report_data->FormatHandle(cb_node->commandBuffer()).c_str(), report_data->FormatHandle(object).c_str(),
queueFamilyIndex);
}
return skip;
}
// Validate that queueFamilyIndices of primary command buffers match this queue
// Secondary command buffers were previously validated in vkCmdExecuteCommands().
bool CoreChecks::ValidateQueueFamilyIndices(const Location &loc, const CMD_BUFFER_STATE *pCB, VkQueue queue) const {
using sync_vuid_maps::GetQueueSubmitVUID;
using sync_vuid_maps::SubmitError;
bool skip = false;
auto pool = pCB->command_pool.get();
auto queue_state = GetQueueState(queue);
if (pool && queue_state) {
if (pool->queueFamilyIndex != queue_state->queueFamilyIndex) {
LogObjectList objlist(pCB->commandBuffer());
objlist.add(queue);
const auto &vuid = GetQueueSubmitVUID(loc, SubmitError::kCmdWrongQueueFamily);
skip |= LogError(objlist, vuid,
"%s Primary %s created in queue family %d is being submitted on %s "
"from queue family %d.",
loc.Message().c_str(), report_data->FormatHandle(pCB->commandBuffer()).c_str(), pool->queueFamilyIndex,
report_data->FormatHandle(queue).c_str(), queue_state->queueFamilyIndex);
}
// Ensure that any bound images or buffers created with SHARING_MODE_CONCURRENT have access to the current queue family
for (const auto &object : pCB->object_bindings) {
if (object.type == kVulkanObjectTypeImage) {
auto image_state = object.node ? (IMAGE_STATE *)object.node : GetImageState(object.Cast<VkImage>());
if (image_state && image_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
skip |= ValidImageBufferQueue(pCB, object, queue_state->queueFamilyIndex,
image_state->createInfo.queueFamilyIndexCount,
image_state->createInfo.pQueueFamilyIndices);
}
} else if (object.type == kVulkanObjectTypeBuffer) {
auto buffer_state = object.node ? (BUFFER_STATE *)object.node : GetBufferState(object.Cast<VkBuffer>());
if (buffer_state && buffer_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
skip |= ValidImageBufferQueue(pCB, object, queue_state->queueFamilyIndex,
buffer_state->createInfo.queueFamilyIndexCount,
buffer_state->createInfo.pQueueFamilyIndices);
}
}
}
}
return skip;
}
bool CoreChecks::ValidatePrimaryCommandBufferState(
const Location &loc, const CMD_BUFFER_STATE *pCB, int current_submit_count,
QFOTransferCBScoreboards<QFOImageTransferBarrier> *qfo_image_scoreboards,
QFOTransferCBScoreboards<QFOBufferTransferBarrier> *qfo_buffer_scoreboards) const {
using sync_vuid_maps::GetQueueSubmitVUID;
using sync_vuid_maps::SubmitError;
// Track in-use for resources off of primary and any secondary CBs
bool skip = false;
if (pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
const auto &vuid = GetQueueSubmitVUID(loc, SubmitError::kSecondaryCmdInSubmit);
skip |= LogError(pCB->commandBuffer(), vuid, "%s Command buffer %s must be allocated with VK_COMMAND_BUFFER_LEVEL_PRIMARY.",
loc.Message().c_str(), report_data->FormatHandle(pCB->commandBuffer()).c_str());
} else {
for (const auto *sub_cb : pCB->linkedCommandBuffers) {
skip |= ValidateQueuedQFOTransfers(sub_cb, qfo_image_scoreboards, qfo_buffer_scoreboards);
// TODO: replace with InvalidateCommandBuffers() at recording.
if ((sub_cb->primaryCommandBuffer != pCB->commandBuffer()) &&
!(sub_cb->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
LogObjectList objlist(device);
objlist.add(pCB->commandBuffer());
objlist.add(sub_cb->commandBuffer());
objlist.add(sub_cb->primaryCommandBuffer);
const auto &vuid = GetQueueSubmitVUID(loc, SubmitError::kSecondaryCmdNotSimultaneous);
skip |= LogError(objlist, vuid,
"%s %s was submitted with secondary %s but that buffer has subsequently been bound to "
"primary %s and it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.",
loc.Message().c_str(), report_data->FormatHandle(pCB->commandBuffer()).c_str(),
report_data->FormatHandle(sub_cb->commandBuffer()).c_str(),
report_data->FormatHandle(sub_cb->primaryCommandBuffer).c_str());
}
}
}
// If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing on device
skip |= ValidateCommandBufferSimultaneousUse(loc, pCB, current_submit_count);
skip |= ValidateQueuedQFOTransfers(pCB, qfo_image_scoreboards, qfo_buffer_scoreboards);
const char *vuid = loc.function == Func::vkQueueSubmit ? "VUID-vkQueueSubmit-pCommandBuffers-00072"
: "VUID-vkQueueSubmit2KHR-commandBuffer-03876";
skip |= ValidateCommandBufferState(pCB, loc.StringFunc().c_str(), current_submit_count, vuid);
return skip;
}
bool CoreChecks::ValidateFenceForSubmit(const FENCE_STATE *pFence, const char *inflight_vuid, const char *retired_vuid,
const char *func_name) const {
bool skip = false;
if (pFence && pFence->scope == kSyncScopeInternal) {
if (pFence->state == FENCE_INFLIGHT) {
skip |= LogError(pFence->fence(), inflight_vuid, "%s: %s is already in use by another submission.", func_name,
report_data->FormatHandle(pFence->fence()).c_str());
}
else if (pFence->state == FENCE_RETIRED) {
skip |= LogError(pFence->fence(), retired_vuid,
"%s: %s submitted in SIGNALED state. Fences must be reset before being submitted", func_name,
report_data->FormatHandle(pFence->fence()).c_str());
}
}
return skip;
}
void CoreChecks::PostCallRecordQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence,
VkResult result) {
StateTracker::PostCallRecordQueueSubmit(queue, submitCount, pSubmits, fence, result);
if (result != VK_SUCCESS) return;
// The triply nested for duplicates that in the StateTracker, but avoids the need for two additional callbacks.
for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
const VkSubmitInfo *submit = &pSubmits[submit_idx];
for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
auto cb_node = GetCBState(submit->pCommandBuffers[i]);
if (cb_node) {
for (auto *secondary_cmd_buffer : cb_node->linkedCommandBuffers) {
UpdateCmdBufImageLayouts(secondary_cmd_buffer);
RecordQueuedQFOTransfers(secondary_cmd_buffer);
}
UpdateCmdBufImageLayouts(cb_node);
RecordQueuedQFOTransfers(cb_node);
}
}
}
}
void CoreChecks::PostCallRecordQueueSubmit2KHR(VkQueue queue, uint32_t submitCount, const VkSubmitInfo2KHR *pSubmits, VkFence fence,
VkResult result) {
StateTracker::PostCallRecordQueueSubmit2KHR(queue, submitCount, pSubmits, fence, result);
if (result != VK_SUCCESS) return;
// The triply nested for duplicates that in the StateTracker, but avoids the need for two additional callbacks.
for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
const VkSubmitInfo2KHR *submit = &pSubmits[submit_idx];
for (uint32_t i = 0; i < submit->commandBufferInfoCount; i++) {
auto cb_node = GetCBState(submit->pCommandBufferInfos[i].commandBuffer);
if (cb_node) {
for (auto *secondaryCmdBuffer : cb_node->linkedCommandBuffers) {
UpdateCmdBufImageLayouts(secondaryCmdBuffer);
RecordQueuedQFOTransfers(secondaryCmdBuffer);
}
UpdateCmdBufImageLayouts(cb_node);
RecordQueuedQFOTransfers(cb_node);
}
}
}
}
bool CoreChecks::SemaphoreWasSignaled(VkSemaphore semaphore) const {
for (auto &pair : queueMap) {
const auto &queue_state = pair.second;
for (const auto &submission : queue_state->submissions) {
for (const auto &signal_semaphore : submission.signalSemaphores) {
if (signal_semaphore.semaphore == semaphore) {
return true;
}
}
}
}
return false;
}
struct SemaphoreSubmitState {
const CoreChecks *core;
VkQueueFlags queue_flags;
layer_data::unordered_set<VkSemaphore> signaled_semaphores;
layer_data::unordered_set<VkSemaphore> unsignaled_semaphores;
layer_data::unordered_set<VkSemaphore> internal_semaphores;
SemaphoreSubmitState(const CoreChecks *core_, VkQueueFlags queue_flags_) : core(core_), queue_flags(queue_flags_) {}
bool ValidateWaitSemaphore(const core_error::Location &loc, VkQueue queue, VkSemaphore semaphore, uint64_t value,
uint32_t device_Index) {
using sync_vuid_maps::GetQueueSubmitVUID;
using sync_vuid_maps::SubmitError;
bool skip = false;
LogObjectList objlist(semaphore);
objlist.add(queue);
const auto *pSemaphore = core->GetSemaphoreState(semaphore);
if (pSemaphore && pSemaphore->type == VK_SEMAPHORE_TYPE_BINARY_KHR &&
(pSemaphore->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) {
if (unsignaled_semaphores.count(semaphore) ||
(!(signaled_semaphores.count(semaphore)) && !(pSemaphore->signaled) && !core->SemaphoreWasSignaled(semaphore))) {
auto error = IsExtEnabled(core->device_extensions.vk_khr_timeline_semaphore)
? SubmitError::kTimelineCannotBeSignalled
: SubmitError::kBinaryCannotBeSignalled;
const auto &vuid = GetQueueSubmitVUID(loc, error);
skip |= core->LogError(
objlist, pSemaphore->scope == kSyncScopeInternal ? vuid : kVUID_Core_DrawState_QueueForwardProgress,
"%s Queue %s is waiting on semaphore (%s) that has no way to be signaled.", loc.Message().c_str(),
core->report_data->FormatHandle(queue).c_str(), core->report_data->FormatHandle(semaphore).c_str());
} else {
signaled_semaphores.erase(semaphore);
unsignaled_semaphores.insert(semaphore);
}
}
if (pSemaphore && pSemaphore->type == VK_SEMAPHORE_TYPE_BINARY_KHR && pSemaphore->scope == kSyncScopeExternalTemporary) {
internal_semaphores.insert(semaphore);
}
if (pSemaphore && pSemaphore->type == VK_SEMAPHORE_TYPE_BINARY_KHR) {
for (const auto &q : core->queueMap) {
if (q.first != queue) {
for (const auto &cb : q.second->submissions) {
for (const auto &wait_semaphore : cb.waitSemaphores) {
if (wait_semaphore.semaphore == semaphore) {
const char *vuid = loc.function == core_error::Func::vkQueueSubmit
? "VUID-vkQueueSubmit-pWaitSemaphores-00068"
: "VUID-vkQueueSubmit2KHR-semaphore-03871";
skip |= core->LogError(objlist, vuid, "%s Queue %s is already waiting on semaphore (%s).",
loc.Message().c_str(), core->report_data->FormatHandle(q.first).c_str(),
core->report_data->FormatHandle(semaphore).c_str());
}
}
}
}
}
}
return skip;
}
bool ValidateSignalSemaphore(const core_error::Location &loc, VkQueue queue, VkSemaphore semaphore, uint64_t value,
uint32_t deviceIndex) {
using sync_vuid_maps::GetQueueSubmitVUID;
using sync_vuid_maps::SubmitError;
bool skip = false;
LogObjectList objlist(semaphore);
objlist.add(queue);
const auto *pSemaphore = core->GetSemaphoreState(semaphore);
if (pSemaphore && pSemaphore->type == VK_SEMAPHORE_TYPE_TIMELINE_KHR && value <= pSemaphore->payload) {
const auto &vuid = GetQueueSubmitVUID(loc, SubmitError::kTimelineSemSmallValue);
skip |= core->LogError(objlist, vuid,
"%s signal value (0x%" PRIx64
") in %s must be greater than current timeline semaphore %s value (0x%" PRIx64 ")",
loc.Message().c_str(), pSemaphore->payload, core->report_data->FormatHandle(queue).c_str(),
core->report_data->FormatHandle(semaphore).c_str(), value);
}
if (pSemaphore && pSemaphore->type == VK_SEMAPHORE_TYPE_BINARY_KHR &&
(pSemaphore->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) {
if (signaled_semaphores.count(semaphore) || (!(unsignaled_semaphores.count(semaphore)) && pSemaphore->signaled)) {
objlist.add(pSemaphore->signaler.first);
skip |= core->LogError(objlist, kVUID_Core_DrawState_QueueForwardProgress,
"%s is signaling %s (%s) that was previously "
"signaled by %s but has not since been waited on by any queue.",
loc.Message().c_str(), core->report_data->FormatHandle(queue).c_str(),
core->report_data->FormatHandle(semaphore).c_str(),
core->report_data->FormatHandle(pSemaphore->signaler.first).c_str());
} else {
unsignaled_semaphores.erase(semaphore);
signaled_semaphores.insert(semaphore);
}
}
return skip;
}
};
bool CoreChecks::ValidateSemaphoresForSubmit(SemaphoreSubmitState &state, VkQueue queue, const VkSubmitInfo *submit,
const Location &outer_loc) const {
bool skip = false;
auto *timeline_semaphore_submit_info = LvlFindInChain<VkTimelineSemaphoreSubmitInfo>(submit->pNext);
for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
uint64_t value = 0;
uint32_t device_index = 0; // TODO:
VkSemaphore semaphore = submit->pWaitSemaphores[i];
LogObjectList objlist(semaphore);
objlist.add(queue);
if (submit->pWaitDstStageMask) {
auto loc = outer_loc.dot(Field::pWaitDstStageMask, i);
skip |= ValidatePipelineStage(objlist, loc, state.queue_flags, submit->pWaitDstStageMask[i]);
skip |= ValidateStageMaskHost(loc, submit->pWaitDstStageMask[i]);
}
const auto *semaphore_state = GetSemaphoreState(semaphore);
if (!semaphore_state) {
continue;
}
auto loc = outer_loc.dot(Field::pWaitSemaphores, i);
if (semaphore_state->type == VK_SEMAPHORE_TYPE_TIMELINE) {
if (timeline_semaphore_submit_info == nullptr) {
skip |= LogError(semaphore, "VUID-VkSubmitInfo-pWaitSemaphores-03239",
"%s (%s) is a timeline semaphore, but VkSubmitInfo does "
"not include an instance of VkTimelineSemaphoreSubmitInfo",
loc.Message().c_str(), report_data->FormatHandle(semaphore).c_str());
continue;
} else if (submit->waitSemaphoreCount != timeline_semaphore_submit_info->waitSemaphoreValueCount) {
skip |= LogError(semaphore, "VUID-VkSubmitInfo-pNext-03240",
"%s (%s) is a timeline semaphore, it contains an "
"instance of VkTimelineSemaphoreSubmitInfo, but waitSemaphoreValueCount (%u) is different than "
"waitSemaphoreCount (%u)",
loc.Message().c_str(), report_data->FormatHandle(semaphore).c_str(),
timeline_semaphore_submit_info->waitSemaphoreValueCount, submit->waitSemaphoreCount);
continue;
}
value = timeline_semaphore_submit_info->pWaitSemaphoreValues[i];
}
skip |= state.ValidateWaitSemaphore(outer_loc.dot(Field::pWaitSemaphores, i), queue, semaphore, value, device_index);
}
for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
VkSemaphore semaphore = submit->pSignalSemaphores[i];
uint64_t value = 0;
uint32_t device_index = 0;
const auto *semaphore_state = GetSemaphoreState(semaphore);
if (!semaphore_state) {
continue;
}
auto loc = outer_loc.dot(Field::pSignalSemaphores, i);
if (semaphore_state->type == VK_SEMAPHORE_TYPE_TIMELINE) {
if (timeline_semaphore_submit_info == nullptr) {
skip |= LogError(semaphore, "VUID-VkSubmitInfo-pWaitSemaphores-03239",
"%s (%s) is a timeline semaphore, but VkSubmitInfo"
"does not include an instance of VkTimelineSemaphoreSubmitInfo",
loc.Message().c_str(), report_data->FormatHandle(semaphore).c_str());
continue;
} else if (submit->signalSemaphoreCount != timeline_semaphore_submit_info->signalSemaphoreValueCount) {
skip |= LogError(semaphore, "VUID-VkSubmitInfo-pNext-03241",
"%s (%s) is a timeline semaphore, it contains an "
"instance of VkTimelineSemaphoreSubmitInfo, but signalSemaphoreValueCount (%u) is different than "
"signalSemaphoreCount (%u)",
loc.Message().c_str(), report_data->FormatHandle(semaphore).c_str(),
timeline_semaphore_submit_info->signalSemaphoreValueCount, submit->signalSemaphoreCount);
continue;
}
value = timeline_semaphore_submit_info->pSignalSemaphoreValues[i];
}
skip |= state.ValidateSignalSemaphore(loc, queue, semaphore, value, device_index);
}
return skip;
}
bool CoreChecks::ValidateSemaphoresForSubmit(SemaphoreSubmitState &state, VkQueue queue, const VkSubmitInfo2KHR *submit,
const Location &outer_loc) const {
bool skip = false;
for (uint32_t i = 0; i < submit->waitSemaphoreInfoCount; ++i) {
const auto &sem_info = submit->pWaitSemaphoreInfos[i];
Location loc = outer_loc.dot(Field::pWaitSemaphoreInfos, i);
skip |= ValidatePipelineStage(LogObjectList(sem_info.semaphore), loc.dot(Field::stageMask), state.queue_flags,
sem_info.stageMask);
skip |= ValidateStageMaskHost(loc.dot(Field::stageMask), sem_info.stageMask);
skip |= state.ValidateWaitSemaphore(loc, queue, sem_info.semaphore, sem_info.value, sem_info.deviceIndex);
}
for (uint32_t i = 0; i < submit->signalSemaphoreInfoCount; ++i) {
const auto &sem_info = submit->pSignalSemaphoreInfos[i];
auto loc = outer_loc.dot(Field::pSignalSemaphoreInfos, i);
skip |= ValidatePipelineStage(LogObjectList(sem_info.semaphore), loc.dot(Field::stageMask), state.queue_flags,
sem_info.stageMask);
skip |= ValidateStageMaskHost(loc.dot(Field::stageMask), sem_info.stageMask);
skip |= state.ValidateSignalSemaphore(loc, queue, sem_info.semaphore, sem_info.value, sem_info.deviceIndex);
}
return skip;
}
bool CoreChecks::ValidateMaxTimelineSemaphoreValueDifference(const Location &loc, VkSemaphore semaphore, uint64_t value) const {
using sync_vuid_maps::GetQueueSubmitVUID;
using sync_vuid_maps::SubmitError;
bool skip = false;
const auto semaphore_state = GetSemaphoreState(semaphore);
if (semaphore_state->type != VK_SEMAPHORE_TYPE_TIMELINE) return false;
uint64_t diff = value > semaphore_state->payload ? value - semaphore_state->payload : semaphore_state->payload - value;
if (diff > phys_dev_props_core12.maxTimelineSemaphoreValueDifference) {
const auto &vuid = GetQueueSubmitVUID(loc, SubmitError::kTimelineSemMaxDiff);
skip |= LogError(semaphore, vuid, "%s value exceeds limit regarding current semaphore %s payload", loc.Message().c_str(),
report_data->FormatHandle(semaphore).c_str());
}
for (auto &pair : queueMap) {
const auto &queue_state = pair.second;
for (const auto &submission : queue_state->submissions) {
for (const auto &signal_semaphore : submission.signalSemaphores) {
if (signal_semaphore.semaphore == semaphore) {
diff = value > signal_semaphore.payload ? value - signal_semaphore.payload : signal_semaphore.payload - value;
if (diff > phys_dev_props_core12.maxTimelineSemaphoreValueDifference) {
const auto &vuid = GetQueueSubmitVUID(loc, SubmitError::kTimelineSemMaxDiff);
skip |= LogError(semaphore, vuid, "%s value exceeds limit regarding pending semaphore %s signal value",
loc.Message().c_str(), report_data->FormatHandle(semaphore).c_str());
}
}
}
for (const auto &wait_semaphore : submission.waitSemaphores) {
if (wait_semaphore.semaphore == semaphore) {
diff = value > wait_semaphore.payload ? value - wait_semaphore.payload : wait_semaphore.payload - value;
if (diff > phys_dev_props_core12.maxTimelineSemaphoreValueDifference) {
const auto &vuid = GetQueueSubmitVUID(loc, SubmitError::kTimelineSemMaxDiff);
skip |= LogError(semaphore, vuid, "%s value exceeds limit regarding pending semaphore %s wait value",
loc.Message().c_str(), report_data->FormatHandle(semaphore).c_str());
}
}
}
}
}
return skip;
}
struct CommandBufferSubmitState {
const CoreChecks *core;
const QUEUE_STATE *queue_state;
QFOTransferCBScoreboards<QFOImageTransferBarrier> qfo_image_scoreboards;
QFOTransferCBScoreboards<QFOBufferTransferBarrier> qfo_buffer_scoreboards;
vector<VkCommandBuffer> current_cmds;
GlobalImageLayoutMap overlay_image_layout_map;
QueryMap local_query_to_state_map;
EventToStageMap local_event_to_stage_map;
CommandBufferSubmitState(const CoreChecks *c, const char *func, const QUEUE_STATE *q) : core(c), queue_state(q) {}
bool Validate(const core_error::Location &loc, VkCommandBuffer cmd, uint32_t perf_pass) {
bool skip = false;
const auto *cb_node = core->GetCBState(cmd);
if (cb_node == nullptr) {
return skip;
}
skip |= core->ValidateCmdBufImageLayouts(loc, cb_node, core->imageLayoutMap, overlay_image_layout_map);
current_cmds.push_back(cmd);
skip |= core->ValidatePrimaryCommandBufferState(loc, cb_node,
static_cast<int>(std::count(current_cmds.begin(), current_cmds.end(), cmd)),
&qfo_image_scoreboards, &qfo_buffer_scoreboards);
skip |= core->ValidateQueueFamilyIndices(loc, cb_node, queue_state->Queue());
for (const auto &descriptor_set : cb_node->validate_descriptorsets_in_queuesubmit) {
const cvdescriptorset::DescriptorSet *set_node = core->GetSetNode(descriptor_set.first);
if (!set_node) {
continue;
}
for (const auto &cmd_info : descriptor_set.second) {
std::string function = loc.StringFunc();
function += ", ";
function += CommandTypeString(cmd_info.cmd_type);
for (const auto &binding_info : cmd_info.binding_infos) {
std::string error;
std::vector<uint32_t> dynamic_offsets;
// dynamic data isn't allowed in UPDATE_AFTER_BIND, so dynamicOffsets is always empty.
// This submit time not record time...
const bool record_time_validate = false;
layer_data::optional<layer_data::unordered_map<VkImageView, VkImageLayout>> checked_layouts;
if (set_node->GetTotalDescriptorCount() > cvdescriptorset::PrefilterBindRequestMap::kManyDescriptors_) {
checked_layouts.emplace();
}
skip |= core->ValidateDescriptorSetBindingData(
cb_node, set_node, dynamic_offsets, binding_info, cmd_info.framebuffer, cmd_info.attachments.get(),
cmd_info.subpasses.get(), record_time_validate, function.c_str(),
core->GetDrawDispatchVuid(cmd_info.cmd_type), checked_layouts);
}
}
}
// Potential early exit here as bad object state may crash in delayed function calls
if (skip) {
return true;
}
// Call submit-time functions to validate or update local mirrors of state (to preserve const-ness at validate time)
for (auto &function : cb_node->queue_submit_functions) {
skip |= function(core, queue_state);
}
for (auto &function : cb_node->eventUpdates) {
skip |= function(core, /*do_validate*/ true, &local_event_to_stage_map);
}
VkQueryPool first_perf_query_pool = VK_NULL_HANDLE;
for (auto &function : cb_node->queryUpdates) {
skip |= function(core, /*do_validate*/ true, first_perf_query_pool, perf_pass, &local_query_to_state_map);
}
return skip;
}
};
bool CoreChecks::PreCallValidateQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits,
VkFence fence) const {
const auto *fence_state = GetFenceState(fence);
bool skip =
ValidateFenceForSubmit(fence_state, "VUID-vkQueueSubmit-fence-00064", "VUID-vkQueueSubmit-fence-00063", "vkQueueSubmit()");
if (skip) {
return true;
}
const auto queue_state = GetQueueState(queue);
CommandBufferSubmitState cb_submit_state(this, "vkQueueSubmit()", queue_state);
SemaphoreSubmitState sem_submit_state(
this, GetPhysicalDeviceState()->queue_family_properties[queue_state->queueFamilyIndex].queueFlags);
// Now verify each individual submit
for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
const VkSubmitInfo *submit = &pSubmits[submit_idx];
const auto perf_submit = LvlFindInChain<VkPerformanceQuerySubmitInfoKHR>(submit->pNext);
uint32_t perf_pass = perf_submit ? perf_submit->counterPassIndex : 0;
Location loc(Func::vkQueueSubmit, Struct::VkSubmitInfo, Field::pSubmits, submit_idx);
for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
skip |= cb_submit_state.Validate(loc.dot(Field::pCommandBuffers, i), submit->pCommandBuffers[i], perf_pass);
}
skip |= ValidateSemaphoresForSubmit(sem_submit_state, queue, submit, loc);
auto chained_device_group_struct = LvlFindInChain<VkDeviceGroupSubmitInfo>(submit->pNext);
if (chained_device_group_struct && chained_device_group_struct->commandBufferCount > 0) {
for (uint32_t i = 0; i < chained_device_group_struct->commandBufferCount; ++i) {
skip |= ValidateDeviceMaskToPhysicalDeviceCount(chained_device_group_struct->pCommandBufferDeviceMasks[i], queue,
"VUID-VkDeviceGroupSubmitInfo-pCommandBufferDeviceMasks-00086");
}
if (chained_device_group_struct->signalSemaphoreCount != submit->signalSemaphoreCount) {
skip |= LogError(queue, "VUID-VkDeviceGroupSubmitInfo-signalSemaphoreCount-00084",
"pSubmits[%" PRIu32 "] signalSemaphoreCount (%" PRIu32
") is different than signalSemaphoreCount (%" PRIu32
") of the VkDeviceGroupSubmitInfo in its pNext chain",
submit_idx, submit->signalSemaphoreCount, chained_device_group_struct->signalSemaphoreCount);
}
if (chained_device_group_struct->waitSemaphoreCount != submit->waitSemaphoreCount) {
skip |=
LogError(queue, "VUID-VkDeviceGroupSubmitInfo-waitSemaphoreCount-00082",
"pSubmits[%" PRIu32 "] waitSemaphoreCount (%" PRIu32 ") is different than waitSemaphoreCount (%" PRIu32
") of the VkDeviceGroupSubmitInfo in its pNext chain",
submit_idx, submit->waitSemaphoreCount, chained_device_group_struct->waitSemaphoreCount);
}
if (chained_device_group_struct->commandBufferCount != submit->commandBufferCount) {
skip |=
LogError(queue, "VUID-VkDeviceGroupSubmitInfo-commandBufferCount-00083",
"pSubmits[%" PRIu32 "] commandBufferCount (%" PRIu32 ") is different than commandBufferCount (%" PRIu32
") of the VkDeviceGroupSubmitInfo in its pNext chain",
submit_idx, submit->commandBufferCount, chained_device_group_struct->commandBufferCount);
}
}
auto protected_submit_info = LvlFindInChain<VkProtectedSubmitInfo>(submit->pNext);
if (protected_submit_info) {
const bool protected_submit = protected_submit_info->protectedSubmit == VK_TRUE;
// Only check feature once for submit
if ((protected_submit == true) && (enabled_features.core11.protectedMemory == VK_FALSE)) {
skip |= LogError(queue, "VUID-VkProtectedSubmitInfo-protectedSubmit-01816",
"vkQueueSubmit(): The protectedMemory device feature is disabled, can't submit a protected queue "
"to %s pSubmits[%u]",
report_data->FormatHandle(queue).c_str(), submit_idx);
}
// Make sure command buffers are all protected or unprotected
for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
const CMD_BUFFER_STATE *cb_state = GetCBState(submit->pCommandBuffers[i]);
if (cb_state != nullptr) {
if ((cb_state->unprotected == true) && (protected_submit == true)) {
LogObjectList objlist(cb_state->commandBuffer());
objlist.add(queue);
skip |= LogError(objlist, "VUID-VkSubmitInfo-pNext-04148",
"vkQueueSubmit(): command buffer %s is unprotected while queue %s pSubmits[%u] has "
"VkProtectedSubmitInfo:protectedSubmit set to VK_TRUE",
report_data->FormatHandle(cb_state->commandBuffer()).c_str(),
report_data->FormatHandle(queue).c_str(), submit_idx);
}
if ((cb_state->unprotected == false) && (protected_submit == false)) {
LogObjectList objlist(cb_state->commandBuffer());
objlist.add(queue);
skip |= LogError(objlist, "VUID-VkSubmitInfo-pNext-04120",
"vkQueueSubmit(): command buffer %s is protected while queue %s pSubmits[%u] has "
"VkProtectedSubmitInfo:protectedSubmit set to VK_FALSE",
report_data->FormatHandle(cb_state->commandBuffer()).c_str(),
report_data->FormatHandle(queue).c_str(), submit_idx);
}
}
}
}
}
if (skip) return skip;
// Now verify maxTimelineSemaphoreValueDifference
for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
Location loc(Func::vkQueueSubmit, Struct::VkSubmitInfo, Field::pSubmits, submit_idx);
const VkSubmitInfo *submit = &pSubmits[submit_idx];
auto *info = LvlFindInChain<VkTimelineSemaphoreSubmitInfo>(submit->pNext);
if (info) {
// If there are any timeline semaphores, this condition gets checked before the early return above
if (info->waitSemaphoreValueCount) {
for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
VkSemaphore semaphore = submit->pWaitSemaphores[i];
skip |= ValidateMaxTimelineSemaphoreValueDifference(loc.dot(Field::pWaitSemaphores, i), semaphore,
info->pWaitSemaphoreValues[i]);
}
}
// If there are any timeline semaphores, this condition gets checked before the early return above
if (info->signalSemaphoreValueCount) {
for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
VkSemaphore semaphore = submit->pSignalSemaphores[i];
skip |= ValidateMaxTimelineSemaphoreValueDifference(loc.dot(Field::pSignalSemaphores, i), semaphore,
info->pSignalSemaphoreValues[i]);
}
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateQueueSubmit2KHR(VkQueue queue, uint32_t submitCount, const VkSubmitInfo2KHR *pSubmits,
VkFence fence) const {
const auto *pFence = GetFenceState(fence);
bool skip = ValidateFenceForSubmit(pFence, "VUID-vkQueueSubmit2KHR-fence-04895", "VUID-vkQueueSubmit2KHR-fence-04894",
"vkQueueSubmit2KHR()");
if (skip) {
return true;
}
if (!enabled_features.synchronization2_features.synchronization2) {
skip |= LogError(queue, "VUID-vkQueueSubmit2KHR-synchronization2-03866",
"vkQueueSubmit2KHR(): Synchronization2 feature is not enabled");
}
const auto queue_state = GetQueueState(queue);
CommandBufferSubmitState cb_submit_state(this, "vkQueueSubmit2KHR()", queue_state);
SemaphoreSubmitState sem_submit_state(
this, GetPhysicalDeviceState()->queue_family_properties[queue_state->queueFamilyIndex].queueFlags);
// Now verify each individual submit
for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
const VkSubmitInfo2KHR *submit = &pSubmits[submit_idx];
const auto perf_submit = LvlFindInChain<VkPerformanceQuerySubmitInfoKHR>(submit->pNext);
uint32_t perf_pass = perf_submit ? perf_submit->counterPassIndex : 0;
Location loc(Func::vkQueueSubmit2KHR, Struct::VkSubmitInfo2KHR, Field::pSubmits, submit_idx);
skip |= ValidateSemaphoresForSubmit(sem_submit_state, queue, submit, loc);
bool protectedSubmit = (submit->flags & VK_SUBMIT_PROTECTED_BIT_KHR) != 0;
// Only check feature once for submit
if ((protectedSubmit == true) && (enabled_features.core11.protectedMemory == VK_FALSE)) {
skip |= LogError(queue, "VUID-VkSubmitInfo2KHR-flags-03885",
"vkQueueSubmit2KHR(): The protectedMemory device feature is disabled, can't submit a protected queue "
"to %s pSubmits[%u]",
report_data->FormatHandle(queue).c_str(), submit_idx);
}
for (uint32_t i = 0; i < submit->commandBufferInfoCount; i++) {
auto info_loc = loc.dot(Field::pCommandBufferInfos, i);
info_loc.structure = Struct::VkCommandBufferSubmitInfoKHR;
skip |= cb_submit_state.Validate(info_loc.dot(Field::commandBuffer), submit->pCommandBufferInfos[i].commandBuffer,
perf_pass);
skip |= ValidateDeviceMaskToPhysicalDeviceCount(submit->pCommandBufferInfos[i].deviceMask, queue,
"VUID-VkCommandBufferSubmitInfoKHR-deviceMask-03891");
// Make sure command buffers are all protected or unprotected
const CMD_BUFFER_STATE *cb_state = GetCBState(submit->pCommandBufferInfos[i].commandBuffer);
if (cb_state != nullptr) {
if ((cb_state->unprotected == true) && (protectedSubmit == true)) {
LogObjectList objlist(cb_state->commandBuffer());
objlist.add(queue);
skip |= LogError(objlist, "VUID-VkSubmitInfo2KHR-flags-03886",
"vkQueueSubmit2KHR(): command buffer %s is unprotected while queue %s pSubmits[%u] has "
"VK_SUBMIT_PROTECTED_BIT_KHR set",
report_data->FormatHandle(cb_state->commandBuffer()).c_str(),
report_data->FormatHandle(queue).c_str(), submit_idx);
}
if ((cb_state->unprotected == false) && (protectedSubmit == false)) {
LogObjectList objlist(cb_state->commandBuffer());
objlist.add(queue);
skip |= LogError(objlist, "VUID-VkSubmitInfo2KHR-flags-03887",
"vkQueueSubmit2KHR(): command buffer %s is protected while queue %s pSubmitInfos[%u] has "
"VK_SUBMIT_PROTECTED_BIT_KHR not set",
report_data->FormatHandle(cb_state->commandBuffer()).c_str(),
report_data->FormatHandle(queue).c_str(), submit_idx);
}
}
}
}
if (skip) return skip;
// Now verify maxTimelineSemaphoreValueDifference
for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
const VkSubmitInfo2KHR *submit = &pSubmits[submit_idx];
Location outer_loc(Func::vkQueueSubmit2KHR, Struct::VkSubmitInfo2KHR, Field::pSubmits, submit_idx);
// If there are any timeline semaphores, this condition gets checked before the early return above
for (uint32_t i = 0; i < submit->waitSemaphoreInfoCount; ++i) {
const auto *sem_info = &submit->pWaitSemaphoreInfos[i];
auto loc = outer_loc.dot(Field::pWaitSemaphoreInfos, i);
skip |= ValidateMaxTimelineSemaphoreValueDifference(loc.dot(Field::semaphore), sem_info->semaphore, sem_info->value);
}
// If there are any timeline semaphores, this condition gets checked before the early return above
for (uint32_t i = 0; i < submit->signalSemaphoreInfoCount; ++i) {
const auto *sem_info = &submit->pSignalSemaphoreInfos[i];
auto loc = outer_loc.dot(Field::pSignalSemaphoreInfos, i);
skip |= ValidateMaxTimelineSemaphoreValueDifference(loc.dot(Field::semaphore), sem_info->semaphore, sem_info->value);
}
}
return skip;
}
#ifdef AHB_VALIDATION_SUPPORT
// Android-specific validation that uses types defined only on Android and only for NDK versions
// that support the VK_ANDROID_external_memory_android_hardware_buffer extension.
// This chunk could move into a seperate core_validation_android.cpp file... ?
// clang-format off
// Map external format and usage flags to/from equivalent Vulkan flags
// (Tables as of v1.1.92)
// AHardwareBuffer Format Vulkan Format
// ====================== =============
// AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM VK_FORMAT_R8G8B8A8_UNORM
// AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM VK_FORMAT_R8G8B8A8_UNORM
// AHARDWAREBUFFER_FORMAT_R8G8B8_UNORM VK_FORMAT_R8G8B8_UNORM
// AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM VK_FORMAT_R5G6B5_UNORM_PACK16
// AHARDWAREBUFFER_FORMAT_R16G16B16A16_FLOAT VK_FORMAT_R16G16B16A16_SFLOAT
// AHARDWAREBUFFER_FORMAT_R10G10B10A2_UNORM VK_FORMAT_A2B10G10R10_UNORM_PACK32
// AHARDWAREBUFFER_FORMAT_D16_UNORM VK_FORMAT_D16_UNORM
// AHARDWAREBUFFER_FORMAT_D24_UNORM VK_FORMAT_X8_D24_UNORM_PACK32
// AHARDWAREBUFFER_FORMAT_D24_UNORM_S8_UINT VK_FORMAT_D24_UNORM_S8_UINT
// AHARDWAREBUFFER_FORMAT_D32_FLOAT VK_FORMAT_D32_SFLOAT
// AHARDWAREBUFFER_FORMAT_D32_FLOAT_S8_UINT VK_FORMAT_D32_SFLOAT_S8_UINT
// AHARDWAREBUFFER_FORMAT_S8_UINT VK_FORMAT_S8_UINT
// The AHARDWAREBUFFER_FORMAT_* are an enum in the NDK headers, but get passed in to Vulkan
// as uint32_t. Casting the enums here avoids scattering casts around in the code.
std::map<uint32_t, VkFormat> ahb_format_map_a2v = {
{ (uint32_t)AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM, VK_FORMAT_R8G8B8A8_UNORM },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM, VK_FORMAT_R8G8B8A8_UNORM },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_R8G8B8_UNORM, VK_FORMAT_R8G8B8_UNORM },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM, VK_FORMAT_R5G6B5_UNORM_PACK16 },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_R16G16B16A16_FLOAT, VK_FORMAT_R16G16B16A16_SFLOAT },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_R10G10B10A2_UNORM, VK_FORMAT_A2B10G10R10_UNORM_PACK32 },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_D16_UNORM, VK_FORMAT_D16_UNORM },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_D24_UNORM, VK_FORMAT_X8_D24_UNORM_PACK32 },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_D24_UNORM_S8_UINT, VK_FORMAT_D24_UNORM_S8_UINT },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_D32_FLOAT, VK_FORMAT_D32_SFLOAT },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_D32_FLOAT_S8_UINT, VK_FORMAT_D32_SFLOAT_S8_UINT },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_S8_UINT, VK_FORMAT_S8_UINT }
};
// AHardwareBuffer Usage Vulkan Usage or Creation Flag (Intermixed - Aargh!)
// ===================== ===================================================
// None VK_IMAGE_USAGE_TRANSFER_SRC_BIT
// None VK_IMAGE_USAGE_TRANSFER_DST_BIT
// AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE VK_IMAGE_USAGE_SAMPLED_BIT
// AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT
// AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT
// AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT
// AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT
// AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE None
// AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT VK_IMAGE_CREATE_PROTECTED_BIT
// None VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT
// None VK_IMAGE_CREATE_EXTENDED_USAGE_BIT
// Same casting rationale. De-mixing the table to prevent type confusion and aliasing
std::map<uint64_t, VkImageUsageFlags> ahb_usage_map_a2v = {
{ (uint64_t)AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE, (VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT) },
{ (uint64_t)AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER, (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) },
{ (uint64_t)AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE, 0 }, // No equivalent
};
std::map<uint64_t, VkImageCreateFlags> ahb_create_map_a2v = {
{ (uint64_t)AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP, VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT },
{ (uint64_t)AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT, VK_IMAGE_CREATE_PROTECTED_BIT },
{ (uint64_t)AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE, 0 }, // No equivalent
};
std::map<VkImageUsageFlags, uint64_t> ahb_usage_map_v2a = {
{ VK_IMAGE_USAGE_SAMPLED_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE },
{ VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE },
{ VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER },
{ VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER },
};
std::map<VkImageCreateFlags, uint64_t> ahb_create_map_v2a = {
{ VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP },
{ VK_IMAGE_CREATE_PROTECTED_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT },
};
// clang-format on
//
// AHB-extension new APIs
//
bool CoreChecks::PreCallValidateGetAndroidHardwareBufferPropertiesANDROID(
VkDevice device, const struct AHardwareBuffer *buffer, VkAndroidHardwareBufferPropertiesANDROID *pProperties) const {
bool skip = false;
// buffer must be a valid Android hardware buffer object with at least one of the AHARDWAREBUFFER_USAGE_GPU_* usage flags.
AHardwareBuffer_Desc ahb_desc;
AHardwareBuffer_describe(buffer, &ahb_desc);
uint32_t required_flags = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE | AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER |
AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP | AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE |
AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER;
if (0 == (ahb_desc.usage & required_flags)) {
skip |= LogError(device, "VUID-vkGetAndroidHardwareBufferPropertiesANDROID-buffer-01884",
"vkGetAndroidHardwareBufferPropertiesANDROID: The AHardwareBuffer's AHardwareBuffer_Desc.usage (0x%" PRIx64
") does not have any AHARDWAREBUFFER_USAGE_GPU_* flags set.",
ahb_desc.usage);
}
return skip;
}
bool CoreChecks::PreCallValidateGetMemoryAndroidHardwareBufferANDROID(VkDevice device,
const VkMemoryGetAndroidHardwareBufferInfoANDROID *pInfo,
struct AHardwareBuffer **pBuffer) const {
bool skip = false;
const DEVICE_MEMORY_STATE *mem_info = GetDevMemState(pInfo->memory);
// VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID must have been included in
// VkExportMemoryAllocateInfo::handleTypes when memory was created.
if (!mem_info->IsExport() ||
(0 == (mem_info->export_handle_type_flags & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID))) {
skip |= LogError(device, "VUID-VkMemoryGetAndroidHardwareBufferInfoANDROID-handleTypes-01882",
"vkGetMemoryAndroidHardwareBufferANDROID: %s was not allocated for export, or the "
"export handleTypes (0x%" PRIx32
") did not contain VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID.",
report_data->FormatHandle(pInfo->memory).c_str(), mem_info->export_handle_type_flags);
}
// If the pNext chain of the VkMemoryAllocateInfo used to allocate memory included a VkMemoryDedicatedAllocateInfo
// with non-NULL image member, then that image must already be bound to memory.
if (mem_info->IsDedicatedImage()) {
const auto image_state = GetImageState(mem_info->dedicated->handle.Cast<VkImage>());
if ((nullptr == image_state) || (0 == (image_state->GetBoundMemory().count(mem_info->mem())))) {
LogObjectList objlist(device);
objlist.add(pInfo->memory);
objlist.add(mem_info->dedicated->handle);
skip |= LogError(objlist, "VUID-VkMemoryGetAndroidHardwareBufferInfoANDROID-pNext-01883",
"vkGetMemoryAndroidHardwareBufferANDROID: %s was allocated using a dedicated "
"%s, but that image is not bound to the VkDeviceMemory object.",
report_data->FormatHandle(pInfo->memory).c_str(),
report_data->FormatHandle(mem_info->dedicated->handle).c_str());
}
}
return skip;
}
//
// AHB-specific validation within non-AHB APIs
//
bool CoreChecks::ValidateAllocateMemoryANDROID(const VkMemoryAllocateInfo *alloc_info) const {
bool skip = false;
auto import_ahb_info = LvlFindInChain<VkImportAndroidHardwareBufferInfoANDROID>(alloc_info->pNext);
auto exp_mem_alloc_info = LvlFindInChain<VkExportMemoryAllocateInfo>(alloc_info->pNext);
auto mem_ded_alloc_info = LvlFindInChain<VkMemoryDedicatedAllocateInfo>(alloc_info->pNext);
if ((import_ahb_info) && (NULL != import_ahb_info->buffer)) {
// This is an import with handleType of VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID
AHardwareBuffer_Desc ahb_desc = {};
AHardwareBuffer_describe(import_ahb_info->buffer, &ahb_desc);
// Validate AHardwareBuffer_Desc::usage is a valid usage for imported AHB
//
// BLOB & GPU_DATA_BUFFER combo specifically allowed
if ((AHARDWAREBUFFER_FORMAT_BLOB != ahb_desc.format) || (0 == (ahb_desc.usage & AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER))) {
// Otherwise, must be a combination from the AHardwareBuffer Format and Usage Equivalence tables
// Usage must have at least one bit from the table. It may have additional bits not in the table
uint64_t ahb_equiv_usage_bits = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE | AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER |
AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP | AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE |
AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT;
if (0 == (ahb_desc.usage & ahb_equiv_usage_bits)) {
skip |=
LogError(device, "VUID-VkImportAndroidHardwareBufferInfoANDROID-buffer-01881",
"vkAllocateMemory: The AHardwareBuffer_Desc's usage (0x%" PRIx64 ") is not compatible with Vulkan.",
ahb_desc.usage);
}
}
// Collect external buffer info
auto pdebi = LvlInitStruct<VkPhysicalDeviceExternalBufferInfo>();
pdebi.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
if (AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE & ahb_desc.usage) {
pdebi.usage |= ahb_usage_map_a2v[AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE];
}
if (AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER & ahb_desc.usage) {
pdebi.usage |= ahb_usage_map_a2v[AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER];
}
auto ext_buf_props = LvlInitStruct<VkExternalBufferProperties>();
DispatchGetPhysicalDeviceExternalBufferProperties(physical_device, &pdebi, &ext_buf_props);
// If buffer is not NULL, Android hardware buffers must be supported for import, as reported by
// VkExternalImageFormatProperties or VkExternalBufferProperties.
if (0 == (ext_buf_props.externalMemoryProperties.externalMemoryFeatures & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT)) {
// Collect external format info
auto pdeifi = LvlInitStruct<VkPhysicalDeviceExternalImageFormatInfo>();
pdeifi.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
auto pdifi2 = LvlInitStruct<VkPhysicalDeviceImageFormatInfo2>(&pdeifi);
if (0 < ahb_format_map_a2v.count(ahb_desc.format)) pdifi2.format = ahb_format_map_a2v[ahb_desc.format];
pdifi2.type = VK_IMAGE_TYPE_2D; // Seems likely
pdifi2.tiling = VK_IMAGE_TILING_OPTIMAL; // Ditto
if (AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE & ahb_desc.usage) {
pdifi2.usage |= ahb_usage_map_a2v[AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE];
}
if (AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER & ahb_desc.usage) {
pdifi2.usage |= ahb_usage_map_a2v[AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER];
}
if (AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP & ahb_desc.usage) {
pdifi2.flags |= ahb_create_map_a2v[AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP];
}
if (AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT & ahb_desc.usage) {
pdifi2.flags |= ahb_create_map_a2v[AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT];
}
auto ext_img_fmt_props = LvlInitStruct<VkExternalImageFormatProperties>();
auto ifp2 = LvlInitStruct<VkImageFormatProperties2>(&ext_img_fmt_props);
VkResult fmt_lookup_result = DispatchGetPhysicalDeviceImageFormatProperties2(physical_device, &pdifi2, &ifp2);
if ((VK_SUCCESS != fmt_lookup_result) || (0 == (ext_img_fmt_props.externalMemoryProperties.externalMemoryFeatures &
VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT))) {
skip |= LogError(device, "VUID-VkImportAndroidHardwareBufferInfoANDROID-buffer-01880",
"vkAllocateMemory: Neither the VkExternalImageFormatProperties nor the VkExternalBufferProperties "
"structs for the AHardwareBuffer include the VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT flag.");
}
}
// Retrieve buffer and format properties of the provided AHardwareBuffer
auto ahb_format_props = LvlInitStruct<VkAndroidHardwareBufferFormatPropertiesANDROID>();
auto ahb_props = LvlInitStruct<VkAndroidHardwareBufferPropertiesANDROID>(&ahb_format_props);
DispatchGetAndroidHardwareBufferPropertiesANDROID(device, import_ahb_info->buffer, &ahb_props);
// allocationSize must be the size returned by vkGetAndroidHardwareBufferPropertiesANDROID for the Android hardware buffer
if (alloc_info->allocationSize != ahb_props.allocationSize) {
skip |= LogError(device, "VUID-VkMemoryAllocateInfo-allocationSize-02383",
"vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID "
"struct, allocationSize (%" PRId64
") does not match the AHardwareBuffer's reported allocationSize (%" PRId64 ").",
alloc_info->allocationSize, ahb_props.allocationSize);
}
// memoryTypeIndex must be one of those returned by vkGetAndroidHardwareBufferPropertiesANDROID for the AHardwareBuffer
// Note: memoryTypeIndex is an index, memoryTypeBits is a bitmask
uint32_t mem_type_bitmask = 1 << alloc_info->memoryTypeIndex;
if (0 == (mem_type_bitmask & ahb_props.memoryTypeBits)) {
skip |= LogError(device, "VUID-VkMemoryAllocateInfo-memoryTypeIndex-02385",
"vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID "
"struct, memoryTypeIndex (%" PRId32
") does not correspond to a bit set in AHardwareBuffer's reported "
"memoryTypeBits bitmask (0x%" PRIx32 ").",
alloc_info->memoryTypeIndex, ahb_props.memoryTypeBits);
}
// Checks for allocations without a dedicated allocation requirement
if ((nullptr == mem_ded_alloc_info) || (VK_NULL_HANDLE == mem_ded_alloc_info->image)) {
// the Android hardware buffer must have a format of AHARDWAREBUFFER_FORMAT_BLOB and a usage that includes
// AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER
if (((uint64_t)AHARDWAREBUFFER_FORMAT_BLOB != ahb_desc.format) ||
(0 == (ahb_desc.usage & AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER))) {
skip |= LogError(
device, "VUID-VkMemoryAllocateInfo-pNext-02384",
"vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID "
"struct without a dedicated allocation requirement, while the AHardwareBuffer_Desc's format ( %u ) is not "
"AHARDWAREBUFFER_FORMAT_BLOB or usage (0x%" PRIx64 ") does not include AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER.",
ahb_desc.format, ahb_desc.usage);
}
} else { // Checks specific to import with a dedicated allocation requirement
const VkImageCreateInfo *ici = &(GetImageState(mem_ded_alloc_info->image)->createInfo);
// The Android hardware buffer's usage must include at least one of AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER or
// AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE
if (0 == (ahb_desc.usage & (AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER | AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE))) {
skip |= LogError(
device, "VUID-VkMemoryAllocateInfo-pNext-02386",
"vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID and a "
"dedicated allocation requirement, while the AHardwareBuffer's usage (0x%" PRIx64
") contains neither AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER nor AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE.",
ahb_desc.usage);
}
// the format of image must be VK_FORMAT_UNDEFINED or the format returned by
// vkGetAndroidHardwareBufferPropertiesANDROID
if ((ici->format != ahb_format_props.format) && (VK_FORMAT_UNDEFINED != ici->format)) {
skip |= LogError(device, "VUID-VkMemoryAllocateInfo-pNext-02387",
"vkAllocateMemory: VkMemoryAllocateInfo struct with chained "
"VkImportAndroidHardwareBufferInfoANDROID, the dedicated allocation image's "
"format (%s) is not VK_FORMAT_UNDEFINED and does not match the AHardwareBuffer's format (%s).",
string_VkFormat(ici->format), string_VkFormat(ahb_format_props.format));
}
// The width, height, and array layer dimensions of image and the Android hardwarebuffer must be identical
if ((ici->extent.width != ahb_desc.width) || (ici->extent.height != ahb_desc.height) ||
(ici->arrayLayers != ahb_desc.layers)) {
skip |= LogError(device, "VUID-VkMemoryAllocateInfo-pNext-02388",
"vkAllocateMemory: VkMemoryAllocateInfo struct with chained "
"VkImportAndroidHardwareBufferInfoANDROID, the dedicated allocation image's "
"width, height, and arrayLayers (%" PRId32 " %" PRId32 " %" PRId32
") do not match those of the AHardwareBuffer (%" PRId32 " %" PRId32 " %" PRId32 ").",
ici->extent.width, ici->extent.height, ici->arrayLayers, ahb_desc.width, ahb_desc.height,
ahb_desc.layers);
}
// If the Android hardware buffer's usage includes AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE, the image must
// have either a full mipmap chain or exactly 1 mip level.
//
// NOTE! The language of this VUID contradicts the language in the spec (1.1.93), which says "The
// AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE flag does not correspond to a Vulkan image usage or creation flag. Instead,
// its presence indicates that the Android hardware buffer contains a complete mipmap chain, and its absence indicates
// that the Android hardware buffer contains only a single mip level."
//
// TODO: This code implements the VUID's meaning, but it seems likely that the spec text is actually correct.
// Clarification requested.
if ((ahb_desc.usage & AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE) && (ici->mipLevels != 1) &&
(ici->mipLevels != FullMipChainLevels(ici->extent))) {
skip |=
LogError(device, "VUID-VkMemoryAllocateInfo-pNext-02389",
"vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID, "
"usage includes AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE but mipLevels (%" PRId32
") is neither 1 nor full mip "
"chain levels (%" PRId32 ").",
ici->mipLevels, FullMipChainLevels(ici->extent));
}
// each bit set in the usage of image must be listed in AHardwareBuffer Usage Equivalence, and if there is a
// corresponding AHARDWAREBUFFER_USAGE bit listed that bit must be included in the Android hardware buffer's
// AHardwareBuffer_Desc::usage
if (ici->usage & ~(VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT |
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
skip |=
LogError(device, "VUID-VkMemoryAllocateInfo-pNext-02390",
"vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID, "
"dedicated image usage bits (0x%" PRIx32
") include an issue not listed in the AHardwareBuffer Usage Equivalence table.",
ici->usage);
}
std::vector<VkImageUsageFlags> usages = {VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT,
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT};
for (VkImageUsageFlags ubit : usages) {
if (ici->usage & ubit) {
uint64_t ahb_usage = ahb_usage_map_v2a[ubit];
if (0 == (ahb_usage & ahb_desc.usage)) {
skip |= LogError(
device, "VUID-VkMemoryAllocateInfo-pNext-02390",
"vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID, "
"The dedicated image usage bit %s equivalent is not in AHardwareBuffer_Desc.usage (0x%" PRIx64 ") ",
string_VkImageUsageFlags(ubit).c_str(), ahb_desc.usage);
}
}
}
}
} else { // Not an import
if ((exp_mem_alloc_info) && (mem_ded_alloc_info) &&
(0 != (VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID & exp_mem_alloc_info->handleTypes)) &&
(VK_NULL_HANDLE != mem_ded_alloc_info->image)) {
// This is an Android HW Buffer export
if (0 != alloc_info->allocationSize) {
skip |= LogError(device, "VUID-VkMemoryAllocateInfo-pNext-01874",
"vkAllocateMemory: pNext chain indicates a dedicated Android Hardware Buffer export allocation, "
"but allocationSize is non-zero.");
}
} else {
if (0 == alloc_info->allocationSize) {
skip |= LogError(
device, "VUID-VkMemoryAllocateInfo-pNext-01874",
"vkAllocateMemory: pNext chain does not indicate a dedicated export allocation, but allocationSize is 0.");
};
}
}
return skip;
}
bool CoreChecks::ValidateGetImageMemoryRequirementsANDROID(const VkImage image, const char *func_name) const {
bool skip = false;
const IMAGE_STATE *image_state = GetImageState(image);
if (image_state != nullptr) {
if (image_state->IsExternalAHB() && (0 == image_state->GetBoundMemory().size())) {
const char *vuid = strcmp(func_name, "vkGetImageMemoryRequirements()") == 0
? "VUID-vkGetImageMemoryRequirements-image-04004"
: "VUID-VkImageMemoryRequirementsInfo2-image-01897";
skip |=
LogError(image, vuid,
"%s: Attempt get image memory requirements for an image created with a "
"VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID handleType, which has not yet been "
"bound to memory.",
func_name);
}
}
return skip;
}
bool CoreChecks::ValidateGetPhysicalDeviceImageFormatProperties2ANDROID(
const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo, const VkImageFormatProperties2 *pImageFormatProperties) const {
bool skip = false;
const VkAndroidHardwareBufferUsageANDROID *ahb_usage =
LvlFindInChain<VkAndroidHardwareBufferUsageANDROID>(pImageFormatProperties->pNext);
if (nullptr != ahb_usage) {
const VkPhysicalDeviceExternalImageFormatInfo *pdeifi =
LvlFindInChain<VkPhysicalDeviceExternalImageFormatInfo>(pImageFormatInfo->pNext);
if ((nullptr == pdeifi) || (VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID != pdeifi->handleType)) {
skip |= LogError(device, "VUID-vkGetPhysicalDeviceImageFormatProperties2-pNext-01868",
"vkGetPhysicalDeviceImageFormatProperties2: pImageFormatProperties includes a chained "
"VkAndroidHardwareBufferUsageANDROID struct, but pImageFormatInfo does not include a chained "
"VkPhysicalDeviceExternalImageFormatInfo struct with handleType "
"VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID.");
}
}
return skip;
}
bool CoreChecks::ValidateBufferImportedHandleANDROID(const char *func_name, VkExternalMemoryHandleTypeFlags handleType,
VkDeviceMemory memory, VkBuffer buffer) const {
bool skip = false;
if ((handleType & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID) == 0) {
const char *vuid = (strcmp(func_name, "vkBindBufferMemory()") == 0) ? "VUID-vkBindBufferMemory-memory-02986"
: "VUID-VkBindBufferMemoryInfo-memory-02986";
LogObjectList objlist(buffer);
objlist.add(memory);
skip |= LogError(objlist, vuid,
"%s: The VkDeviceMemory (%s) was created with an AHB import operation which is not set "
"VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID in the VkBuffer (%s) "
"VkExternalMemoryBufferreateInfo::handleType (%s)",
func_name, report_data->FormatHandle(memory).c_str(), report_data->FormatHandle(buffer).c_str(),
string_VkExternalMemoryHandleTypeFlags(handleType).c_str());
}
return skip;
}
bool CoreChecks::ValidateImageImportedHandleANDROID(const char *func_name, VkExternalMemoryHandleTypeFlags handleType,
VkDeviceMemory memory, VkImage image) const {
bool skip = false;
if ((handleType & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID) == 0) {
const char *vuid = (strcmp(func_name, "vkBindImageMemory()") == 0) ? "VUID-vkBindImageMemory-memory-02990"
: "VUID-VkBindImageMemoryInfo-memory-02990";
LogObjectList objlist(image);
objlist.add(memory);
skip |= LogError(objlist, vuid,
"%s: The VkDeviceMemory (%s) was created with an AHB import operation which is not set "
"VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID in the VkImage (%s) "
"VkExternalMemoryImageCreateInfo::handleType (%s)",
func_name, report_data->FormatHandle(memory).c_str(), report_data->FormatHandle(image).c_str(),
string_VkExternalMemoryHandleTypeFlags(handleType).c_str());
}
return skip;
}
#else // !AHB_VALIDATION_SUPPORT
// Case building for Android without AHB Validation
#ifdef VK_USE_PLATFORM_ANDROID_KHR
bool CoreChecks::PreCallValidateGetAndroidHardwareBufferPropertiesANDROID(
VkDevice device, const struct AHardwareBuffer *buffer, VkAndroidHardwareBufferPropertiesANDROID *pProperties) const {
return false;
}
bool CoreChecks::PreCallValidateGetMemoryAndroidHardwareBufferANDROID(VkDevice device,
const VkMemoryGetAndroidHardwareBufferInfoANDROID *pInfo,
struct AHardwareBuffer **pBuffer) const {
return false;
}
#endif // VK_USE_PLATFORM_ANDROID_KHR
bool CoreChecks::ValidateAllocateMemoryANDROID(const VkMemoryAllocateInfo *alloc_info) const { return false; }
bool CoreChecks::ValidateGetPhysicalDeviceImageFormatProperties2ANDROID(
const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo, const VkImageFormatProperties2 *pImageFormatProperties) const {
return false;
}
bool CoreChecks::ValidateGetImageMemoryRequirementsANDROID(const VkImage image, const char *func_name) const { return false; }
bool CoreChecks::ValidateBufferImportedHandleANDROID(const char *func_name, VkExternalMemoryHandleTypeFlags handleType,
VkDeviceMemory memory, VkBuffer buffer) const {
return false;
}
bool CoreChecks::ValidateImageImportedHandleANDROID(const char *func_name, VkExternalMemoryHandleTypeFlags handleType,
VkDeviceMemory memory, VkImage image) const {
return false;
}
#endif // AHB_VALIDATION_SUPPORT
bool CoreChecks::PreCallValidateAllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) const {
bool skip = false;
if (memObjMap.size() >= phys_dev_props.limits.maxMemoryAllocationCount) {
skip |= LogError(device, "VUID-vkAllocateMemory-maxMemoryAllocationCount-04101",
"vkAllocateMemory: Number of currently valid memory objects is not less than the maximum allowed (%u).",
phys_dev_props.limits.maxMemoryAllocationCount);
}
if (IsExtEnabled(device_extensions.vk_android_external_memory_android_hardware_buffer)) {
skip |= ValidateAllocateMemoryANDROID(pAllocateInfo);
} else {
if (0 == pAllocateInfo->allocationSize) {
skip |= LogError(device, "VUID-VkMemoryAllocateInfo-allocationSize-00638", "vkAllocateMemory: allocationSize is 0.");
};
}
auto chained_flags_struct = LvlFindInChain<VkMemoryAllocateFlagsInfo>(pAllocateInfo->pNext);
if (chained_flags_struct && chained_flags_struct->flags == VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT) {
skip |= ValidateDeviceMaskToPhysicalDeviceCount(chained_flags_struct->deviceMask, device,
"VUID-VkMemoryAllocateFlagsInfo-deviceMask-00675");
skip |=
ValidateDeviceMaskToZero(chained_flags_struct->deviceMask, device, "VUID-VkMemoryAllocateFlagsInfo-deviceMask-00676");
}
if (pAllocateInfo->memoryTypeIndex >= phys_dev_mem_props.memoryTypeCount) {
skip |= LogError(device, "VUID-vkAllocateMemory-pAllocateInfo-01714",
"vkAllocateMemory: attempting to allocate memory type %u, which is not a valid index. Device only "
"advertises %u memory types.",
pAllocateInfo->memoryTypeIndex, phys_dev_mem_props.memoryTypeCount);
} else {
const VkMemoryType memory_type = phys_dev_mem_props.memoryTypes[pAllocateInfo->memoryTypeIndex];
if (pAllocateInfo->allocationSize > phys_dev_mem_props.memoryHeaps[memory_type.heapIndex].size) {
skip |= LogError(device, "VUID-vkAllocateMemory-pAllocateInfo-01713",
"vkAllocateMemory: attempting to allocate %" PRIu64
" bytes from heap %u,"
"but size of that heap is only %" PRIu64 " bytes.",
pAllocateInfo->allocationSize, memory_type.heapIndex,
phys_dev_mem_props.memoryHeaps[memory_type.heapIndex].size);
}
if (!enabled_features.device_coherent_memory_features.deviceCoherentMemory &&
((memory_type.propertyFlags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD) != 0)) {
skip |= LogError(device, "VUID-vkAllocateMemory-deviceCoherentMemory-02790",
"vkAllocateMemory: attempting to allocate memory type %u, which includes the "
"VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD memory property, but the deviceCoherentMemory feature "
"is not enabled.",
pAllocateInfo->memoryTypeIndex);
}
if ((enabled_features.core11.protectedMemory == VK_FALSE) &&
((memory_type.propertyFlags & VK_MEMORY_PROPERTY_PROTECTED_BIT) != 0)) {
skip |= LogError(device, "VUID-VkMemoryAllocateInfo-memoryTypeIndex-01872",
"vkAllocateMemory(): attempting to allocate memory type %u, which includes the "
"VK_MEMORY_PROPERTY_PROTECTED_BIT memory property, but the protectedMemory feature "
"is not enabled.",
pAllocateInfo->memoryTypeIndex);
}
}
bool imported_ahb = false;
#ifdef AHB_VALIDATION_SUPPORT
// "memory is not an imported Android Hardware Buffer" refers to VkImportAndroidHardwareBufferInfoANDROID with a non-NULL
// buffer value. Memory imported has another VUID to check size and allocationSize match up
auto imported_ahb_info = LvlFindInChain<VkImportAndroidHardwareBufferInfoANDROID>(pAllocateInfo->pNext);
if (imported_ahb_info != nullptr) {
imported_ahb = imported_ahb_info->buffer != nullptr;
}
#endif // AHB_VALIDATION_SUPPORT
auto dedicated_allocate_info = LvlFindInChain<VkMemoryDedicatedAllocateInfo>(pAllocateInfo->pNext);
if (dedicated_allocate_info) {
if ((dedicated_allocate_info->buffer != VK_NULL_HANDLE) && (dedicated_allocate_info->image != VK_NULL_HANDLE)) {
skip |= LogError(device, "VUID-VkMemoryDedicatedAllocateInfo-image-01432",
"vkAllocateMemory: Either buffer or image has to be VK_NULL_HANDLE in VkMemoryDedicatedAllocateInfo");
} else if (dedicated_allocate_info->image != VK_NULL_HANDLE) {
// Dedicated VkImage
const IMAGE_STATE *image_state = GetImageState(dedicated_allocate_info->image);
if (image_state->disjoint == true) {
skip |= LogError(
device, "VUID-VkMemoryDedicatedAllocateInfo-image-01797",
"vkAllocateMemory: VkImage %s can't be used in VkMemoryDedicatedAllocateInfo because it was created with "
"VK_IMAGE_CREATE_DISJOINT_BIT",
report_data->FormatHandle(dedicated_allocate_info->image).c_str());
} else {
if ((pAllocateInfo->allocationSize != image_state->requirements[0].size) && (imported_ahb == false)) {
const char *vuid = IsExtEnabled(device_extensions.vk_android_external_memory_android_hardware_buffer)
? "VUID-VkMemoryDedicatedAllocateInfo-image-02964"
: "VUID-VkMemoryDedicatedAllocateInfo-image-01433";
skip |=
LogError(device, vuid,
"vkAllocateMemory: Allocation Size (%" PRIu64
") needs to be equal to VkImage %s VkMemoryRequirements::size (%" PRIu64 ")",
pAllocateInfo->allocationSize, report_data->FormatHandle(dedicated_allocate_info->image).c_str(),
image_state->requirements[0].size);
}
if ((image_state->createInfo.flags & VK_IMAGE_CREATE_SPARSE_BINDING_BIT) != 0) {
skip |= LogError(
device, "VUID-VkMemoryDedicatedAllocateInfo-image-01434",
"vkAllocateMemory: VkImage %s can't be used in VkMemoryDedicatedAllocateInfo because it was created with "
"VK_IMAGE_CREATE_SPARSE_BINDING_BIT",
report_data->FormatHandle(dedicated_allocate_info->image).c_str());
}
}
} else if (dedicated_allocate_info->buffer != VK_NULL_HANDLE) {
// Dedicated VkBuffer
const BUFFER_STATE *buffer_state = GetBufferState(dedicated_allocate_info->buffer);
if ((pAllocateInfo->allocationSize != buffer_state->requirements.size) && (imported_ahb == false)) {
const char *vuid = IsExtEnabled(device_extensions.vk_android_external_memory_android_hardware_buffer)
? "VUID-VkMemoryDedicatedAllocateInfo-buffer-02965"
: "VUID-VkMemoryDedicatedAllocateInfo-buffer-01435";
skip |= LogError(
device, vuid,
"vkAllocateMemory: Allocation Size (%" PRIu64 ") needs to be equal to VkBuffer %s VkMemoryRequirements::size (%" PRIu64 ")",
pAllocateInfo->allocationSize, report_data->FormatHandle(dedicated_allocate_info->buffer).c_str(),
buffer_state->requirements.size);
}
if ((buffer_state->createInfo.flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT) != 0) {
skip |= LogError(
device, "VUID-VkMemoryDedicatedAllocateInfo-buffer-01436",
"vkAllocateMemory: VkBuffer %s can't be used in VkMemoryDedicatedAllocateInfo because it was created with "
"VK_BUFFER_CREATE_SPARSE_BINDING_BIT",
report_data->FormatHandle(dedicated_allocate_info->buffer).c_str());
}
}
}
// TODO: VUIDs ending in 00643, 00644, 00646, 00647, 01742, 01743, 01745, 00645, 00648, 01744
return skip;
}
// For given obj node, if it is use, flag a validation error and return callback result, else return false
bool CoreChecks::ValidateObjectNotInUse(const BASE_NODE *obj_node, const char *caller_name, const char *error_code) const {
if (disabled[object_in_use]) return false;
auto obj_struct = obj_node->Handle();
bool skip = false;
if (obj_node->InUse()) {
skip |= LogError(device, error_code, "Cannot call %s on %s that is currently in use by a command buffer.", caller_name,
report_data->FormatHandle(obj_struct).c_str());
}
return skip;
}
bool CoreChecks::PreCallValidateFreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) const {
const DEVICE_MEMORY_STATE *mem_info = GetDevMemState(mem);
bool skip = false;
if (mem_info) {
skip |= ValidateObjectNotInUse(mem_info, "vkFreeMemory", "VUID-vkFreeMemory-memory-00677");
}
return skip;
}
// Validate that given Map memory range is valid. This means that the memory should not already be mapped,
// and that the size of the map range should be:
// 1. Not zero
// 2. Within the size of the memory allocation
bool CoreChecks::ValidateMapMemRange(const DEVICE_MEMORY_STATE *mem_info, VkDeviceSize offset, VkDeviceSize size) const {
bool skip = false;
assert(mem_info);
const auto mem = mem_info->mem();
if (size == 0) {
skip = LogError(mem, "VUID-vkMapMemory-size-00680", "VkMapMemory: Attempting to map memory range of size zero");
}
// It is an application error to call VkMapMemory on an object that is already mapped
if (mem_info->mapped_range.size != 0) {
skip = LogError(mem, "VUID-vkMapMemory-memory-00678", "VkMapMemory: Attempting to map memory on an already-mapped %s.",
report_data->FormatHandle(mem).c_str());
}
// Validate offset is not over allocaiton size
if (offset >= mem_info->alloc_info.allocationSize) {
skip = LogError(mem, "VUID-vkMapMemory-offset-00679",
"VkMapMemory: Attempting to map memory with an offset of 0x%" PRIx64
" which is larger than the total array size 0x%" PRIx64,
offset, mem_info->alloc_info.allocationSize);
}
// Validate that offset + size is within object's allocationSize
if (size != VK_WHOLE_SIZE) {
if ((offset + size) > mem_info->alloc_info.allocationSize) {
skip = LogError(mem, "VUID-vkMapMemory-size-00681",
"VkMapMemory: Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64 " oversteps total array size 0x%" PRIx64
".",
offset, size + offset, mem_info->alloc_info.allocationSize);
}
}
return skip;
}
bool CoreChecks::PreCallValidateWaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll,
uint64_t timeout) const {
// Verify fence status of submitted fences
bool skip = false;
for (uint32_t i = 0; i < fenceCount; i++) {
skip |= VerifyQueueStateToFence(pFences[i]);
}
return skip;
}
bool CoreChecks::PreCallValidateGetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex,
VkQueue *pQueue) const {
bool skip = false;
skip |= ValidateDeviceQueueFamily(queueFamilyIndex, "vkGetDeviceQueue", "queueFamilyIndex",
"VUID-vkGetDeviceQueue-queueFamilyIndex-00384");
for (size_t i = 0; i < device_queue_info_list.size(); i++) {
const auto device_queue_info = device_queue_info_list.at(i);
if (device_queue_info.queue_family_index != queueFamilyIndex) {
continue;
}
// flag must be zero
if (device_queue_info.flags != 0) {
skip |= LogError(
device, "VUID-vkGetDeviceQueue-flags-01841",
"vkGetDeviceQueue: queueIndex (=%" PRIu32
") was created with a non-zero VkDeviceQueueCreateFlags in vkCreateDevice::pCreateInfo->pQueueCreateInfos[%" PRIu32
"]. Need to use vkGetDeviceQueue2 instead.",
queueIndex, device_queue_info.index);
}
if (device_queue_info.queue_count <= queueIndex) {
skip |= LogError(device, "VUID-vkGetDeviceQueue-queueIndex-00385",
"vkGetDeviceQueue: queueIndex (=%" PRIu32
") is not less than the number of queues requested from queueFamilyIndex (=%" PRIu32
") when the device was created vkCreateDevice::pCreateInfo->pQueueCreateInfos[%" PRIu32
"] (i.e. is not less than %" PRIu32 ").",
queueIndex, queueFamilyIndex, device_queue_info.index, device_queue_info.queue_count);
}
}
return skip;
}
bool CoreChecks::PreCallValidateGetDeviceQueue2(VkDevice device, const VkDeviceQueueInfo2 *pQueueInfo, VkQueue *pQueue) const {
bool skip = false;
if (pQueueInfo) {
const uint32_t queueFamilyIndex = pQueueInfo->queueFamilyIndex;
const uint32_t queueIndex = pQueueInfo->queueIndex;
const VkDeviceQueueCreateFlags flags = pQueueInfo->flags;
skip |= ValidateDeviceQueueFamily(queueFamilyIndex, "vkGetDeviceQueue2", "pQueueInfo->queueFamilyIndex",
"VUID-VkDeviceQueueInfo2-queueFamilyIndex-01842");
// ValidateDeviceQueueFamily() already checks if queueFamilyIndex but need to make sure flags match with it
bool valid_flags = false;
for (size_t i = 0; i < device_queue_info_list.size(); i++) {
const auto device_queue_info = device_queue_info_list.at(i);
// vkGetDeviceQueue2 only checks if both family index AND flags are same as device creation
// this handle case where the same queueFamilyIndex is used with/without the protected flag
if ((device_queue_info.queue_family_index != queueFamilyIndex) || (device_queue_info.flags != flags)) {
continue;
}
valid_flags = true;
if (device_queue_info.queue_count <= queueIndex) {
skip |= LogError(
device, "VUID-VkDeviceQueueInfo2-queueIndex-01843",
"vkGetDeviceQueue2: queueIndex (=%" PRIu32
") is not less than the number of queues requested from [queueFamilyIndex (=%" PRIu32
"), flags (%s)] combination when the device was created vkCreateDevice::pCreateInfo->pQueueCreateInfos[%" PRIu32
"] (i.e. is not less than %" PRIu32 ").",
queueIndex, queueFamilyIndex, string_VkDeviceQueueCreateFlags(flags).c_str(), device_queue_info.index,
device_queue_info.queue_count);
}
}
// Don't double error message if already skipping from ValidateDeviceQueueFamily
if (!valid_flags && !skip) {
skip |= LogError(device, "VUID-VkDeviceQueueInfo2-flags-06225",
"vkGetDeviceQueue2: The combination of queueFamilyIndex (=%" PRIu32
") and flags (%s) were never both set together in any element of "
"vkCreateDevice::pCreateInfo->pQueueCreateInfos at device creation time.",
queueFamilyIndex, string_VkDeviceQueueCreateFlags(flags).c_str());
}
}
return skip;
}
bool CoreChecks::PreCallValidateQueueWaitIdle(VkQueue queue) const {
const QUEUE_STATE *queue_state = GetQueueState(queue);
return VerifyQueueStateToSeq(queue_state, queue_state->seq + queue_state->submissions.size());
}
bool CoreChecks::PreCallValidateDeviceWaitIdle(VkDevice device) const {
bool skip = false;
for (const auto &queue : queueMap) {
skip |= VerifyQueueStateToSeq(queue.second.get(), queue.second->seq + queue.second->submissions.size());
}
return skip;
}
bool CoreChecks::PreCallValidateCreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) const {
bool skip = false;
auto *sem_type_create_info = LvlFindInChain<VkSemaphoreTypeCreateInfo>(pCreateInfo->pNext);
if (sem_type_create_info) {
if (sem_type_create_info->semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE && !enabled_features.core12.timelineSemaphore) {
skip |= LogError(device, "VUID-VkSemaphoreTypeCreateInfo-timelineSemaphore-03252",
"VkCreateSemaphore: timelineSemaphore feature is not enabled, can not create timeline semaphores");
}
if (sem_type_create_info->semaphoreType == VK_SEMAPHORE_TYPE_BINARY && sem_type_create_info->initialValue != 0) {
skip |= LogError(device, "VUID-VkSemaphoreTypeCreateInfo-semaphoreType-03279",
"vkCreateSemaphore: if semaphoreType is VK_SEMAPHORE_TYPE_BINARY, initialValue must be zero");
}
}
return skip;
}
bool CoreChecks::PreCallValidateWaitSemaphores(VkDevice device, const VkSemaphoreWaitInfo *pWaitInfo, uint64_t timeout) const {
return ValidateWaitSemaphores(device, pWaitInfo, timeout, "VkWaitSemaphores");
}
bool CoreChecks::PreCallValidateWaitSemaphoresKHR(VkDevice device, const VkSemaphoreWaitInfo *pWaitInfo, uint64_t timeout) const {
return ValidateWaitSemaphores(device, pWaitInfo, timeout, "VkWaitSemaphoresKHR");
}
bool CoreChecks::ValidateWaitSemaphores(VkDevice device, const VkSemaphoreWaitInfo *pWaitInfo, uint64_t timeout,
const char *apiName) const {
bool skip = false;
for (uint32_t i = 0; i < pWaitInfo->semaphoreCount; i++) {
auto *semaphore_state = GetSemaphoreState(pWaitInfo->pSemaphores[i]);
if (semaphore_state && semaphore_state->type != VK_SEMAPHORE_TYPE_TIMELINE) {
skip |= LogError(pWaitInfo->pSemaphores[i], "VUID-VkSemaphoreWaitInfo-pSemaphores-03256",
"%s(): all semaphores in pWaitInfo must be timeline semaphores, but %s is not", apiName,
report_data->FormatHandle(pWaitInfo->pSemaphores[i]).c_str());
}
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) const {
const FENCE_STATE *fence_node = GetFenceState(fence);
bool skip = false;
if (fence_node) {
if (fence_node->scope == kSyncScopeInternal && fence_node->state == FENCE_INFLIGHT) {
skip |= LogError(fence, "VUID-vkDestroyFence-fence-01120", "%s is in use.", report_data->FormatHandle(fence).c_str());
}
}
return skip;
}
bool CoreChecks::PreCallValidateDestroySemaphore(VkDevice device, VkSemaphore semaphore,
const VkAllocationCallbacks *pAllocator) const {
const SEMAPHORE_STATE *sema_node = GetSemaphoreState(semaphore);
bool skip = false;
if (sema_node) {
skip |= ValidateObjectNotInUse(sema_node, "vkDestroySemaphore", "VUID-vkDestroySemaphore-semaphore-01137");
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) const {
const EVENT_STATE *event_state = GetEventState(event);
bool skip = false;
if (event_state) {
skip |= ValidateObjectNotInUse(event_state, "vkDestroyEvent", "VUID-vkDestroyEvent-event-01145");
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyQueryPool(VkDevice device, VkQueryPool queryPool,
const VkAllocationCallbacks *pAllocator) const {
if (disabled[query_validation]) return false;
const QUERY_POOL_STATE *qp_state = GetQueryPoolState(queryPool);
bool skip = false;
if (qp_state) {
bool completed_by_get_results = true;
for (uint32_t i = 0; i < qp_state->createInfo.queryCount; ++i) {
QueryObject obj(qp_state->pool(), i);
auto query_pass_iter = queryToStateMap.find(obj);
if (query_pass_iter != queryToStateMap.end() && query_pass_iter->second != QUERYSTATE_AVAILABLE) {
completed_by_get_results = false;
break;
}
}
if (!completed_by_get_results) {
skip |= ValidateObjectNotInUse(qp_state, "vkDestroyQueryPool", "VUID-vkDestroyQueryPool-queryPool-00793");
}
}
return skip;
}
bool CoreChecks::ValidatePerformanceQueryResults(const char *cmd_name, const QUERY_POOL_STATE *query_pool_state,
uint32_t firstQuery, uint32_t queryCount, VkQueryResultFlags flags) const {
bool skip = false;
if (flags & (VK_QUERY_RESULT_WITH_AVAILABILITY_BIT | VK_QUERY_RESULT_PARTIAL_BIT | VK_QUERY_RESULT_64_BIT)) {
string invalid_flags_string;
for (auto flag : {VK_QUERY_RESULT_WITH_AVAILABILITY_BIT, VK_QUERY_RESULT_PARTIAL_BIT, VK_QUERY_RESULT_64_BIT}) {
if (flag & flags) {
if (invalid_flags_string.size()) {
invalid_flags_string += " and ";
}
invalid_flags_string += string_VkQueryResultFlagBits(flag);
}
}
skip |= LogError(query_pool_state->pool(),
strcmp(cmd_name, "vkGetQueryPoolResults") == 0 ? "VUID-vkGetQueryPoolResults-queryType-03230"
: "VUID-vkCmdCopyQueryPoolResults-queryType-03233",
"%s: QueryPool %s was created with a queryType of"
"VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR but flags contains %s.",
cmd_name, report_data->FormatHandle(query_pool_state->pool()).c_str(), invalid_flags_string.c_str());
}
for (uint32_t query_index = firstQuery; query_index < queryCount; query_index++) {
uint32_t submitted = 0;
for (uint32_t pass_index = 0; pass_index < query_pool_state->n_performance_passes; pass_index++) {
QueryObject obj(QueryObject(query_pool_state->pool(), query_index), pass_index);
auto query_pass_iter = queryToStateMap.find(obj);
if (query_pass_iter != queryToStateMap.end() && query_pass_iter->second == QUERYSTATE_AVAILABLE) submitted++;
}
if (submitted < query_pool_state->n_performance_passes) {
skip |= LogError(query_pool_state->pool(), "VUID-vkGetQueryPoolResults-queryType-03231",
"%s: QueryPool %s has %u performance query passes, but the query has only been "
"submitted for %u of the passes.",
cmd_name, report_data->FormatHandle(query_pool_state->pool()).c_str(),
query_pool_state->n_performance_passes, submitted);
}
}
return skip;
}
bool CoreChecks::ValidateGetQueryPoolPerformanceResults(VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
void *pData, VkDeviceSize stride, VkQueryResultFlags flags,
const char *apiName) const {
bool skip = false;
const auto query_pool_state = GetQueryPoolState(queryPool);
if (!query_pool_state || query_pool_state->createInfo.queryType != VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) return skip;
if (((((uintptr_t)pData) % sizeof(VkPerformanceCounterResultKHR)) != 0 ||
(stride % sizeof(VkPerformanceCounterResultKHR)) != 0)) {
skip |= LogError(queryPool, "VUID-vkGetQueryPoolResults-queryType-03229",
"%s(): QueryPool %s was created with a queryType of "
"VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR but pData & stride are not multiple of the "
"size of VkPerformanceCounterResultKHR.",
apiName, report_data->FormatHandle(queryPool).c_str());
}
skip |= ValidatePerformanceQueryResults(apiName, query_pool_state, firstQuery, queryCount, flags);
return skip;
}
bool CoreChecks::PreCallValidateGetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery,
uint32_t queryCount, size_t dataSize, void *pData, VkDeviceSize stride,
VkQueryResultFlags flags) const {
if (disabled[query_validation]) return false;
bool skip = false;
skip |= ValidateQueryPoolStride("VUID-vkGetQueryPoolResults-flags-02827", "VUID-vkGetQueryPoolResults-flags-00815", stride,
"dataSize", dataSize, flags);
skip |= ValidateQueryPoolIndex(queryPool, firstQuery, queryCount, "vkGetQueryPoolResults()",
"VUID-vkGetQueryPoolResults-firstQuery-00813", "VUID-vkGetQueryPoolResults-firstQuery-00816");
skip |=
ValidateGetQueryPoolPerformanceResults(queryPool, firstQuery, queryCount, pData, stride, flags, "vkGetQueryPoolResults");
const auto query_pool_state = GetQueryPoolState(queryPool);
if (query_pool_state) {
if ((query_pool_state->createInfo.queryType == VK_QUERY_TYPE_TIMESTAMP) && (flags & VK_QUERY_RESULT_PARTIAL_BIT)) {
skip |= LogError(
queryPool, "VUID-vkGetQueryPoolResults-queryType-00818",
"%s was created with a queryType of VK_QUERY_TYPE_TIMESTAMP but flags contains VK_QUERY_RESULT_PARTIAL_BIT.",
report_data->FormatHandle(queryPool).c_str());
}
if (!skip) {
uint32_t query_avail_data = (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) ? 1 : 0;
uint32_t query_size_in_bytes = (flags & VK_QUERY_RESULT_64_BIT) ? sizeof(uint64_t) : sizeof(uint32_t);
uint32_t query_items = 0;
uint32_t query_size = 0;
switch (query_pool_state->createInfo.queryType) {
case VK_QUERY_TYPE_OCCLUSION:
// Occlusion queries write one integer value - the number of samples passed.
query_items = 1;
query_size = query_size_in_bytes * (query_items + query_avail_data);
break;
case VK_QUERY_TYPE_PIPELINE_STATISTICS:
// Pipeline statistics queries write one integer value for each bit that is enabled in the pipelineStatistics
// when the pool is created
{
const int num_bits = sizeof(VkFlags) * CHAR_BIT;
std::bitset<num_bits> pipe_stats_bits(query_pool_state->createInfo.pipelineStatistics);
query_items = static_cast<uint32_t>(pipe_stats_bits.count());
query_size = query_size_in_bytes * (query_items + query_avail_data);
}
break;
case VK_QUERY_TYPE_TIMESTAMP:
// Timestamp queries write one integer
query_items = 1;
query_size = query_size_in_bytes * (query_items + query_avail_data);
break;
case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
// Transform feedback queries write two integers
query_items = 2;
query_size = query_size_in_bytes * (query_items + query_avail_data);
break;
case VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR:
// Performance queries store results in a tightly packed array of VkPerformanceCounterResultsKHR
query_items = query_pool_state->perf_counter_index_count;
query_size = sizeof(VkPerformanceCounterResultKHR) * query_items;
if (query_size > stride) {
skip |= LogError(queryPool, "VUID-vkGetQueryPoolResults-queryType-04519",
"vkGetQueryPoolResults() on querypool %s specified stride %" PRIu64
" which must be at least counterIndexCount (%d) "
"multiplied by sizeof(VkPerformanceCounterResultKHR) (%zu).",
report_data->FormatHandle(queryPool).c_str(), stride, query_items,
sizeof(VkPerformanceCounterResultKHR));
}
break;
// These cases intentionally fall through to the default
case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR: // VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV
case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR:
case VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL:
default:
query_size = 0;
break;
}
if (query_size && (((queryCount - 1) * stride + query_size) > dataSize)) {
skip |= LogError(queryPool, "VUID-vkGetQueryPoolResults-dataSize-00817",
"vkGetQueryPoolResults() on querypool %s specified dataSize %zu which is "
"incompatible with the specified query type and options.",
report_data->FormatHandle(queryPool).c_str(), dataSize);
}
}
if (query_pool_state->createInfo.queryType == VK_QUERY_TYPE_RESULT_STATUS_ONLY_KHR &&
(flags & VK_QUERY_RESULT_WITH_STATUS_BIT_KHR) == 0) {
skip |= LogError(queryPool, "VUID-vkGetQueryPoolResults-queryType-04810",
"vkGetQueryPoolResults(): querypool %s was created with VK_QUERY_TYPE_RESULT_STATUS_ONLY_KHR "
"queryType, but flags do not contain VK_QUERY_RESULT_WITH_STATUS_BIT_KHR bit.",
report_data->FormatHandle(queryPool).c_str());
}
}
return skip;
}
bool CoreChecks::ValidateInsertMemoryRange(const VulkanTypedHandle &typed_handle, const DEVICE_MEMORY_STATE *mem_info,
VkDeviceSize memoryOffset, const char *api_name) const {
bool skip = false;
if (memoryOffset >= mem_info->alloc_info.allocationSize) {
const char *error_code = nullptr;
if (typed_handle.type == kVulkanObjectTypeBuffer) {
if (strcmp(api_name, "vkBindBufferMemory()") == 0) {
error_code = "VUID-vkBindBufferMemory-memoryOffset-01031";
} else {
error_code = "VUID-VkBindBufferMemoryInfo-memoryOffset-01031";
}
} else if (typed_handle.type == kVulkanObjectTypeImage) {
if (strcmp(api_name, "vkBindImageMemory()") == 0) {
error_code = "VUID-vkBindImageMemory-memoryOffset-01046";
} else {
error_code = "VUID-VkBindImageMemoryInfo-memoryOffset-01046";
}
} else if (typed_handle.type == kVulkanObjectTypeAccelerationStructureNV) {
error_code = "VUID-VkBindAccelerationStructureMemoryInfoNV-memoryOffset-03621";
} else {
// Unsupported object type
assert(false);
}
LogObjectList objlist(mem_info->mem());
objlist.add(typed_handle);
skip = LogError(objlist, error_code,
"In %s, attempting to bind %s to %s, memoryOffset=0x%" PRIxLEAST64
" must be less than the memory allocation size 0x%" PRIxLEAST64 ".",
api_name, report_data->FormatHandle(mem_info->mem()).c_str(), report_data->FormatHandle(typed_handle).c_str(),
memoryOffset, mem_info->alloc_info.allocationSize);
}
return skip;
}
bool CoreChecks::ValidateInsertImageMemoryRange(VkImage image, const DEVICE_MEMORY_STATE *mem_info, VkDeviceSize mem_offset,
const char *api_name) const {
return ValidateInsertMemoryRange(VulkanTypedHandle(image, kVulkanObjectTypeImage), mem_info, mem_offset, api_name);
}
bool CoreChecks::ValidateInsertBufferMemoryRange(VkBuffer buffer, const DEVICE_MEMORY_STATE *mem_info, VkDeviceSize mem_offset,
const char *api_name) const {
return ValidateInsertMemoryRange(VulkanTypedHandle(buffer, kVulkanObjectTypeBuffer), mem_info, mem_offset, api_name);
}
bool CoreChecks::ValidateInsertAccelerationStructureMemoryRange(VkAccelerationStructureNV as, const DEVICE_MEMORY_STATE *mem_info,
VkDeviceSize mem_offset, const char *api_name) const {
return ValidateInsertMemoryRange(VulkanTypedHandle(as, kVulkanObjectTypeAccelerationStructureNV), mem_info, mem_offset,
api_name);
}
bool CoreChecks::ValidateMemoryTypes(const DEVICE_MEMORY_STATE *mem_info, const uint32_t memory_type_bits, const char *funcName,
const char *msgCode) const {
bool skip = false;
if (((1 << mem_info->alloc_info.memoryTypeIndex) & memory_type_bits) == 0) {
skip = LogError(mem_info->mem(), msgCode,
"%s(): MemoryRequirements->memoryTypeBits (0x%X) for this object type are not compatible with the memory "
"type (0x%X) of %s.",
funcName, memory_type_bits, mem_info->alloc_info.memoryTypeIndex,
report_data->FormatHandle(mem_info->mem()).c_str());
}
return skip;
}
bool CoreChecks::ValidateBindBufferMemory(VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset,
const char *api_name) const {
const BUFFER_STATE *buffer_state = GetBufferState(buffer);
bool bind_buffer_mem_2 = strcmp(api_name, "vkBindBufferMemory()") != 0;
bool skip = false;
if (buffer_state) {
// Track objects tied to memory
skip = ValidateSetMemBinding(mem, buffer_state->Handle(), api_name);
const auto mem_info = GetDevMemState(mem);
// Validate memory requirements alignment
if (SafeModulo(memoryOffset, buffer_state->requirements.alignment) != 0) {
const char *vuid =
bind_buffer_mem_2 ? "VUID-VkBindBufferMemoryInfo-memoryOffset-01036" : "VUID-vkBindBufferMemory-memoryOffset-01036";
skip |= LogError(buffer, vuid,
"%s: memoryOffset is 0x%" PRIxLEAST64
" but must be an integer multiple of the VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
", returned from a call to vkGetBufferMemoryRequirements with buffer.",
api_name, memoryOffset, buffer_state->requirements.alignment);
}
if (mem_info) {
// Validate bound memory range information
skip |= ValidateInsertBufferMemoryRange(buffer, mem_info, memoryOffset, api_name);
const char *mem_type_vuid =
bind_buffer_mem_2 ? "VUID-VkBindBufferMemoryInfo-memory-01035" : "VUID-vkBindBufferMemory-memory-01035";
skip |= ValidateMemoryTypes(mem_info, buffer_state->requirements.memoryTypeBits, api_name, mem_type_vuid);
// Validate memory requirements size
if (buffer_state->requirements.size > (mem_info->alloc_info.allocationSize - memoryOffset)) {
const char *vuid =
bind_buffer_mem_2 ? "VUID-VkBindBufferMemoryInfo-size-01037" : "VUID-vkBindBufferMemory-size-01037";
skip |= LogError(buffer, vuid,
"%s: memory size minus memoryOffset is 0x%" PRIxLEAST64
" but must be at least as large as VkMemoryRequirements::size value 0x%" PRIxLEAST64
", returned from a call to vkGetBufferMemoryRequirements with buffer.",
api_name, mem_info->alloc_info.allocationSize - memoryOffset, buffer_state->requirements.size);
}
// Validate dedicated allocation
if (mem_info->IsDedicatedBuffer() &&
((mem_info->dedicated->handle.Cast<VkBuffer>() != buffer) || (memoryOffset != 0))) {
const char *vuid =
bind_buffer_mem_2 ? "VUID-VkBindBufferMemoryInfo-memory-01508" : "VUID-vkBindBufferMemory-memory-01508";
LogObjectList objlist(buffer);
objlist.add(mem);
objlist.add(mem_info->dedicated->handle);
skip |= LogError(objlist, vuid,
"%s: for dedicated %s, VkMemoryDedicatedAllocateInfo::buffer %s must be equal "
"to %s and memoryOffset 0x%" PRIxLEAST64 " must be zero.",
api_name, report_data->FormatHandle(mem).c_str(),
report_data->FormatHandle(mem_info->dedicated->handle).c_str(),
report_data->FormatHandle(buffer).c_str(), memoryOffset);
}
auto chained_flags_struct = LvlFindInChain<VkMemoryAllocateFlagsInfo>(mem_info->alloc_info.pNext);
if (enabled_features.core12.bufferDeviceAddress &&
(buffer_state->createInfo.usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT) &&
(!chained_flags_struct || !(chained_flags_struct->flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT))) {
skip |= LogError(buffer, "VUID-vkBindBufferMemory-bufferDeviceAddress-03339",
"%s: If buffer was created with the VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT bit set, "
"memory must have been allocated with the VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT bit set.",
api_name);
}
// Validate export memory handles
if ((mem_info->export_handle_type_flags != 0) &&
((mem_info->export_handle_type_flags & buffer_state->external_memory_handle) == 0)) {
const char *vuid =
bind_buffer_mem_2 ? "VUID-VkBindBufferMemoryInfo-memory-02726" : "VUID-vkBindBufferMemory-memory-02726";
LogObjectList objlist(buffer);
objlist.add(mem);
skip |= LogError(objlist, vuid,
"%s: The VkDeviceMemory (%s) has an external handleType of %s which does not include at least one "
"handle from VkBuffer (%s) handleType %s.",
api_name, report_data->FormatHandle(mem).c_str(),
string_VkExternalMemoryHandleTypeFlags(mem_info->export_handle_type_flags).c_str(),
report_data->FormatHandle(buffer).c_str(),
string_VkExternalMemoryHandleTypeFlags(buffer_state->external_memory_handle).c_str());
}
// Validate import memory handles
if (mem_info->IsImportAHB() == true) {
skip |= ValidateBufferImportedHandleANDROID(api_name, buffer_state->external_memory_handle, mem, buffer);
} else if (mem_info->IsImport() == true) {
if ((mem_info->import_handle_type_flags & buffer_state->external_memory_handle) == 0) {
const char *vuid = nullptr;
if ((bind_buffer_mem_2) && IsExtEnabled(device_extensions.vk_android_external_memory_android_hardware_buffer)) {
vuid = "VUID-VkBindBufferMemoryInfo-memory-02985";
} else if ((!bind_buffer_mem_2) &&
IsExtEnabled(device_extensions.vk_android_external_memory_android_hardware_buffer)) {
vuid = "VUID-vkBindBufferMemory-memory-02985";
} else if ((bind_buffer_mem_2) &&
!IsExtEnabled(device_extensions.vk_android_external_memory_android_hardware_buffer)) {
vuid = "VUID-VkBindBufferMemoryInfo-memory-02727";
} else if ((!bind_buffer_mem_2) &&
!IsExtEnabled(device_extensions.vk_android_external_memory_android_hardware_buffer)) {
vuid = "VUID-vkBindBufferMemory-memory-02727";
}
LogObjectList objlist(buffer);
objlist.add(mem);
skip |= LogError(objlist, vuid,
"%s: The VkDeviceMemory (%s) was created with an import operation with handleType of %s which "
"is not set in the VkBuffer (%s) VkExternalMemoryBufferCreateInfo::handleType (%s)",
api_name, report_data->FormatHandle(mem).c_str(),
string_VkExternalMemoryHandleTypeFlags(mem_info->import_handle_type_flags).c_str(),
report_data->FormatHandle(buffer).c_str(),
string_VkExternalMemoryHandleTypeFlags(buffer_state->external_memory_handle).c_str());
}
}
// Validate mix of protected buffer and memory
if ((buffer_state->unprotected == false) && (mem_info->unprotected == true)) {
const char *vuid =
bind_buffer_mem_2 ? "VUID-VkBindBufferMemoryInfo-None-01898" : "VUID-vkBindBufferMemory-None-01898";
LogObjectList objlist(buffer);
objlist.add(mem);
skip |= LogError(objlist, vuid,
"%s: The VkDeviceMemory (%s) was not created with protected memory but the VkBuffer (%s) was set "
"to use protected memory.",
api_name, report_data->FormatHandle(mem).c_str(), report_data->FormatHandle(buffer).c_str());
} else if ((buffer_state->unprotected == true) && (mem_info->unprotected == false)) {
const char *vuid =
bind_buffer_mem_2 ? "VUID-VkBindBufferMemoryInfo-None-01899" : "VUID-vkBindBufferMemory-None-01899";
LogObjectList objlist(buffer);
objlist.add(mem);
skip |= LogError(objlist, vuid,
"%s: The VkDeviceMemory (%s) was created with protected memory but the VkBuffer (%s) was not set "
"to use protected memory.",
api_name, report_data->FormatHandle(mem).c_str(), report_data->FormatHandle(buffer).c_str());
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateBindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem,
VkDeviceSize memoryOffset) const {
const char *api_name = "vkBindBufferMemory()";
return ValidateBindBufferMemory(buffer, mem, memoryOffset, api_name);
}
bool CoreChecks::PreCallValidateBindBufferMemory2(VkDevice device, uint32_t bindInfoCount,
const VkBindBufferMemoryInfo *pBindInfos) const {
char api_name[64];
bool skip = false;
for (uint32_t i = 0; i < bindInfoCount; i++) {
sprintf(api_name, "vkBindBufferMemory2() pBindInfos[%u]", i);
skip |= ValidateBindBufferMemory(pBindInfos[i].buffer, pBindInfos[i].memory, pBindInfos[i].memoryOffset, api_name);
}
return skip;
}
bool CoreChecks::PreCallValidateBindBufferMemory2KHR(VkDevice device, uint32_t bindInfoCount,
const VkBindBufferMemoryInfo *pBindInfos) const {
char api_name[64];
bool skip = false;
for (uint32_t i = 0; i < bindInfoCount; i++) {
sprintf(api_name, "vkBindBufferMemory2KHR() pBindInfos[%u]", i);
skip |= ValidateBindBufferMemory(pBindInfos[i].buffer, pBindInfos[i].memory, pBindInfos[i].memoryOffset, api_name);
}
return skip;
}
bool CoreChecks::PreCallValidateGetImageMemoryRequirements(VkDevice device, VkImage image,
VkMemoryRequirements *pMemoryRequirements) const {
bool skip = false;
if (IsExtEnabled(device_extensions.vk_android_external_memory_android_hardware_buffer)) {
skip |= ValidateGetImageMemoryRequirementsANDROID(image, "vkGetImageMemoryRequirements()");
}
const IMAGE_STATE *image_state = GetImageState(image);
if (image_state) {
// Checks for no disjoint bit
if (image_state->disjoint == true) {
skip |= LogError(image, "VUID-vkGetImageMemoryRequirements-image-01588",
"vkGetImageMemoryRequirements(): %s must not have been created with the VK_IMAGE_CREATE_DISJOINT_BIT "
"(need to use vkGetImageMemoryRequirements2).",
report_data->FormatHandle(image).c_str());
}
}
return skip;
}
bool CoreChecks::ValidateGetImageMemoryRequirements2(const VkImageMemoryRequirementsInfo2 *pInfo, const char *func_name) const {
bool skip = false;
if (IsExtEnabled(device_extensions.vk_android_external_memory_android_hardware_buffer)) {
skip |= ValidateGetImageMemoryRequirementsANDROID(pInfo->image, func_name);
}
const IMAGE_STATE *image_state = GetImageState(pInfo->image);
const VkFormat image_format = image_state->createInfo.format;
const VkImageTiling image_tiling = image_state->createInfo.tiling;
const VkImagePlaneMemoryRequirementsInfo *image_plane_info = LvlFindInChain<VkImagePlaneMemoryRequirementsInfo>(pInfo->pNext);
if ((FormatIsMultiplane(image_format)) && (image_state->disjoint == true) && (image_plane_info == nullptr)) {
skip |= LogError(pInfo->image, "VUID-VkImageMemoryRequirementsInfo2-image-01589",
"%s: %s image was created with a multi-planar format (%s) and "
"VK_IMAGE_CREATE_DISJOINT_BIT, but the current pNext doesn't include a "
"VkImagePlaneMemoryRequirementsInfo struct",
func_name, report_data->FormatHandle(pInfo->image).c_str(), string_VkFormat(image_format));
}
if ((image_state->disjoint == false) && (image_plane_info != nullptr)) {
skip |= LogError(pInfo->image, "VUID-VkImageMemoryRequirementsInfo2-image-01590",
"%s: %s image was not created with VK_IMAGE_CREATE_DISJOINT_BIT,"
"but the current pNext includes a VkImagePlaneMemoryRequirementsInfo struct",
func_name, report_data->FormatHandle(pInfo->image).c_str());
}
if ((FormatIsMultiplane(image_format) == false) && (image_tiling != VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) &&
(image_plane_info != nullptr)) {
const char *vuid = IsExtEnabled(device_extensions.vk_ext_image_drm_format_modifier)
? "VUID-VkImageMemoryRequirementsInfo2-image-02280"
: "VUID-VkImageMemoryRequirementsInfo2-image-01591";
skip |= LogError(pInfo->image, vuid,
"%s: %s image is a single-plane format (%s) and does not have tiling of "
"VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT,"
"but the current pNext includes a VkImagePlaneMemoryRequirementsInfo struct",
func_name, report_data->FormatHandle(pInfo->image).c_str(), string_VkFormat(image_format));
}
if (image_plane_info != nullptr) {
if ((image_tiling == VK_IMAGE_TILING_LINEAR) || (image_tiling == VK_IMAGE_TILING_OPTIMAL)) {
// Make sure planeAspect is only a single, valid plane
uint32_t planes = FormatPlaneCount(image_format);
VkImageAspectFlags aspect = image_plane_info->planeAspect;
if ((2 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT)) {
skip |= LogError(
pInfo->image, "VUID-VkImagePlaneMemoryRequirementsInfo-planeAspect-02281",
"%s: Image %s VkImagePlaneMemoryRequirementsInfo::planeAspect is %s but can only be VK_IMAGE_ASPECT_PLANE_0_BIT"
"or VK_IMAGE_ASPECT_PLANE_1_BIT.",
func_name, report_data->FormatHandle(image_state->image()).c_str(), string_VkImageAspectFlags(aspect).c_str());
}
if ((3 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT) &&
(aspect != VK_IMAGE_ASPECT_PLANE_2_BIT)) {
skip |= LogError(
pInfo->image, "VUID-VkImagePlaneMemoryRequirementsInfo-planeAspect-02281",
"%s: Image %s VkImagePlaneMemoryRequirementsInfo::planeAspect is %s but can only be VK_IMAGE_ASPECT_PLANE_0_BIT"
"or VK_IMAGE_ASPECT_PLANE_1_BIT or VK_IMAGE_ASPECT_PLANE_2_BIT.",
func_name, report_data->FormatHandle(image_state->image()).c_str(), string_VkImageAspectFlags(aspect).c_str());
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateGetImageMemoryRequirements2(VkDevice device, const VkImageMemoryRequirementsInfo2 *pInfo,
VkMemoryRequirements2 *pMemoryRequirements) const {
return ValidateGetImageMemoryRequirements2(pInfo, "vkGetImageMemoryRequirements2()");
}
bool CoreChecks::PreCallValidateGetImageMemoryRequirements2KHR(VkDevice device, const VkImageMemoryRequirementsInfo2 *pInfo,
VkMemoryRequirements2 *pMemoryRequirements) const {
return ValidateGetImageMemoryRequirements2(pInfo, "vkGetImageMemoryRequirements2KHR()");
}
bool CoreChecks::PreCallValidateGetPhysicalDeviceImageFormatProperties2(VkPhysicalDevice physicalDevice,
const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo,
VkImageFormatProperties2 *pImageFormatProperties) const {
// Can't wrap AHB-specific validation in a device extension check here, but no harm
bool skip = ValidateGetPhysicalDeviceImageFormatProperties2ANDROID(pImageFormatInfo, pImageFormatProperties);
return skip;
}
bool CoreChecks::PreCallValidateGetPhysicalDeviceImageFormatProperties2KHR(VkPhysicalDevice physicalDevice,
const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo,
VkImageFormatProperties2 *pImageFormatProperties) const {
// Can't wrap AHB-specific validation in a device extension check here, but no harm
bool skip = ValidateGetPhysicalDeviceImageFormatProperties2ANDROID(pImageFormatInfo, pImageFormatProperties);
return skip;
}
bool CoreChecks::PreCallValidateDestroyPipeline(VkDevice device, VkPipeline pipeline,
const VkAllocationCallbacks *pAllocator) const {
const PIPELINE_STATE *pipeline_state = GetPipelineState(pipeline);
bool skip = false;
if (pipeline_state) {
skip |= ValidateObjectNotInUse(pipeline_state, "vkDestroyPipeline", "VUID-vkDestroyPipeline-pipeline-00765");
}
return skip;
}
bool CoreChecks::PreCallValidateDestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) const {
const SAMPLER_STATE *sampler_state = GetSamplerState(sampler);
bool skip = false;
if (sampler_state) {
skip |= ValidateObjectNotInUse(sampler_state, "vkDestroySampler", "VUID-vkDestroySampler-sampler-01082");
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
const VkAllocationCallbacks *pAllocator) const {
const DESCRIPTOR_POOL_STATE *desc_pool_state = GetDescriptorPoolState(descriptorPool);
bool skip = false;
if (desc_pool_state) {
skip |= ValidateObjectNotInUse(desc_pool_state, "vkDestroyDescriptorPool",
"VUID-vkDestroyDescriptorPool-descriptorPool-00303");
}
return skip;
}
// Verify cmdBuffer in given cb_node is not in global in-flight set, and return skip result
// If this is a secondary command buffer, then make sure its primary is also in-flight
// If primary is not in-flight, then remove secondary from global in-flight set
// This function is only valid at a point when cmdBuffer is being reset or freed
bool CoreChecks::CheckCommandBufferInFlight(const CMD_BUFFER_STATE *cb_node, const char *action, const char *error_code) const {
bool skip = false;
if (cb_node->InUse()) {
skip |= LogError(cb_node->commandBuffer(), error_code, "Attempt to %s %s which is in use.", action,
report_data->FormatHandle(cb_node->commandBuffer()).c_str());
}
return skip;
}
// Iterate over all cmdBuffers in given commandPool and verify that each is not in use
bool CoreChecks::CheckCommandBuffersInFlight(const COMMAND_POOL_STATE *pPool, const char *action, const char *error_code) const {
bool skip = false;
for (auto cmd_buffer : pPool->commandBuffers) {
skip |= CheckCommandBufferInFlight(GetCBState(cmd_buffer), action, error_code);
}
return skip;
}
bool CoreChecks::PreCallValidateFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount,
const VkCommandBuffer *pCommandBuffers) const {
bool skip = false;
for (uint32_t i = 0; i < commandBufferCount; i++) {
const auto *cb_node = GetCBState(pCommandBuffers[i]);
// Delete CB information structure, and remove from commandBufferMap
if (cb_node) {
skip |= CheckCommandBufferInFlight(cb_node, "free", "VUID-vkFreeCommandBuffers-pCommandBuffers-00047");
}
}
return skip;
}
bool CoreChecks::PreCallValidateCreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkCommandPool *pCommandPool) const {
bool skip = false;
skip |= ValidateDeviceQueueFamily(pCreateInfo->queueFamilyIndex, "vkCreateCommandPool", "pCreateInfo->queueFamilyIndex",
"VUID-vkCreateCommandPool-queueFamilyIndex-01937");
if ((enabled_features.core11.protectedMemory == VK_FALSE) &&
((pCreateInfo->flags & VK_COMMAND_POOL_CREATE_PROTECTED_BIT) != 0)) {
skip |= LogError(device, "VUID-VkCommandPoolCreateInfo-flags-02860",
"vkCreateCommandPool(): the protectedMemory device feature is disabled: CommandPools cannot be created "
"with the VK_COMMAND_POOL_CREATE_PROTECTED_BIT set.");
}
return skip;
}
bool CoreChecks::PreCallValidateCreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) const {
if (disabled[query_validation]) return false;
bool skip = false;
if (pCreateInfo && pCreateInfo->queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
if (!enabled_features.core.pipelineStatisticsQuery) {
skip |= LogError(device, "VUID-VkQueryPoolCreateInfo-queryType-00791",
"vkCreateQueryPool(): Query pool with type VK_QUERY_TYPE_PIPELINE_STATISTICS created on a device with "
"VkDeviceCreateInfo.pEnabledFeatures.pipelineStatisticsQuery == VK_FALSE.");
}
}
if (pCreateInfo && pCreateInfo->queryType == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
if (!enabled_features.performance_query_features.performanceCounterQueryPools) {
skip |=
LogError(device, "VUID-VkQueryPoolPerformanceCreateInfoKHR-performanceCounterQueryPools-03237",
"vkCreateQueryPool(): Query pool with type VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR created on a device with "
"VkPhysicalDevicePerformanceQueryFeaturesKHR.performanceCounterQueryPools == VK_FALSE.");
}
auto perf_ci = LvlFindInChain<VkQueryPoolPerformanceCreateInfoKHR>(pCreateInfo->pNext);
if (!perf_ci) {
skip |= LogError(
device, "VUID-VkQueryPoolCreateInfo-queryType-03222",
"vkCreateQueryPool(): Query pool with type VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR created but the pNext chain of "
"pCreateInfo does not contain in instance of VkQueryPoolPerformanceCreateInfoKHR.");
} else {
const auto &perf_counter_iter = physical_device_state->perf_counters.find(perf_ci->queueFamilyIndex);
if (perf_counter_iter == physical_device_state->perf_counters.end()) {
skip |= LogError(
device, "VUID-VkQueryPoolPerformanceCreateInfoKHR-queueFamilyIndex-03236",
"vkCreateQueryPool(): VkQueryPerformanceCreateInfoKHR::queueFamilyIndex is not a valid queue family index.");
} else {
const QUEUE_FAMILY_PERF_COUNTERS *perf_counters = perf_counter_iter->second.get();
for (uint32_t idx = 0; idx < perf_ci->counterIndexCount; idx++) {
if (perf_ci->pCounterIndices[idx] >= perf_counters->counters.size()) {
skip |= LogError(
device, "VUID-VkQueryPoolPerformanceCreateInfoKHR-pCounterIndices-03321",
"vkCreateQueryPool(): VkQueryPerformanceCreateInfoKHR::pCounterIndices[%u] = %u is not a valid "
"counter index.",
idx, perf_ci->pCounterIndices[idx]);
}
}
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyCommandPool(VkDevice device, VkCommandPool commandPool,
const VkAllocationCallbacks *pAllocator) const {
const COMMAND_POOL_STATE *cp_state = GetCommandPoolState(commandPool);
bool skip = false;
if (cp_state) {
// Verify that command buffers in pool are complete (not in-flight)
skip |= CheckCommandBuffersInFlight(cp_state, "destroy command pool with", "VUID-vkDestroyCommandPool-commandPool-00041");
}
return skip;
}
bool CoreChecks::PreCallValidateResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) const {
const auto *command_pool_state = GetCommandPoolState(commandPool);
return CheckCommandBuffersInFlight(command_pool_state, "reset command pool with", "VUID-vkResetCommandPool-commandPool-00040");
}
bool CoreChecks::PreCallValidateResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) const {
bool skip = false;
for (uint32_t i = 0; i < fenceCount; ++i) {
const auto fence_state = GetFenceState(pFences[i]);
if (fence_state && fence_state->scope == kSyncScopeInternal && fence_state->state == FENCE_INFLIGHT) {
skip |= LogError(pFences[i], "VUID-vkResetFences-pFences-01123", "%s is in use.",
report_data->FormatHandle(pFences[i]).c_str());
}
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer,
const VkAllocationCallbacks *pAllocator) const {
const FRAMEBUFFER_STATE *framebuffer_state = GetFramebufferState(framebuffer);
bool skip = false;
if (framebuffer_state) {
skip |= ValidateObjectNotInUse(framebuffer_state, "vkDestroyFramebuffer",
"VUID-vkDestroyFramebuffer-framebuffer-00892");
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyRenderPass(VkDevice device, VkRenderPass renderPass,
const VkAllocationCallbacks *pAllocator) const {
const RENDER_PASS_STATE *rp_state = GetRenderPassState(renderPass);
bool skip = false;
if (rp_state) {
skip |= ValidateObjectNotInUse(rp_state, "vkDestroyRenderPass", "VUID-vkDestroyRenderPass-renderPass-00873");
}
return skip;
}
// Access helper functions for external modules
VkFormatProperties CoreChecks::GetPDFormatProperties(const VkFormat format) const {
VkFormatProperties format_properties;
DispatchGetPhysicalDeviceFormatProperties(physical_device, format, &format_properties);
return format_properties;
}
bool CoreChecks::ValidatePipelineVertexDivisors(std::vector<std::shared_ptr<PIPELINE_STATE>> const &pipe_state_vec,
const uint32_t count, const VkGraphicsPipelineCreateInfo *pipe_cis) const {
bool skip = false;
const VkPhysicalDeviceLimits *device_limits = &phys_dev_props.limits;
for (uint32_t i = 0; i < count; i++) {
auto pvids_ci = (pipe_cis[i].pVertexInputState) ? LvlFindInChain<VkPipelineVertexInputDivisorStateCreateInfoEXT>(pipe_cis[i].pVertexInputState->pNext) : nullptr;
if (nullptr == pvids_ci) continue;
const PIPELINE_STATE *pipe_state = pipe_state_vec[i].get();
for (uint32_t j = 0; j < pvids_ci->vertexBindingDivisorCount; j++) {
const VkVertexInputBindingDivisorDescriptionEXT *vibdd = &(pvids_ci->pVertexBindingDivisors[j]);
if (vibdd->binding >= device_limits->maxVertexInputBindings) {
skip |= LogError(
device, "VUID-VkVertexInputBindingDivisorDescriptionEXT-binding-01869",
"vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, "
"pVertexBindingDivisors[%1u] binding index of (%1u) exceeds device maxVertexInputBindings (%1u).",
i, j, vibdd->binding, device_limits->maxVertexInputBindings);
}
if (vibdd->divisor > phys_dev_ext_props.vtx_attrib_divisor_props.maxVertexAttribDivisor) {
skip |= LogError(
device, "VUID-VkVertexInputBindingDivisorDescriptionEXT-divisor-01870",
"vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, "
"pVertexBindingDivisors[%1u] divisor of (%1u) exceeds extension maxVertexAttribDivisor (%1u).",
i, j, vibdd->divisor, phys_dev_ext_props.vtx_attrib_divisor_props.maxVertexAttribDivisor);
}
if ((0 == vibdd->divisor) && !enabled_features.vtx_attrib_divisor_features.vertexAttributeInstanceRateZeroDivisor) {
skip |= LogError(
device, "VUID-VkVertexInputBindingDivisorDescriptionEXT-vertexAttributeInstanceRateZeroDivisor-02228",
"vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, "
"pVertexBindingDivisors[%1u] divisor must not be 0 when vertexAttributeInstanceRateZeroDivisor feature is not "
"enabled.",
i, j);
}
if ((1 != vibdd->divisor) && !enabled_features.vtx_attrib_divisor_features.vertexAttributeInstanceRateDivisor) {
skip |= LogError(
device, "VUID-VkVertexInputBindingDivisorDescriptionEXT-vertexAttributeInstanceRateDivisor-02229",
"vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, "
"pVertexBindingDivisors[%1u] divisor (%1u) must be 1 when vertexAttributeInstanceRateDivisor feature is not "
"enabled.",
i, j, vibdd->divisor);
}
// Find the corresponding binding description and validate input rate setting
bool failed_01871 = true;
for (size_t k = 0; k < pipe_state->vertex_binding_descriptions_.size(); k++) {
if ((vibdd->binding == pipe_state->vertex_binding_descriptions_[k].binding) &&
(VK_VERTEX_INPUT_RATE_INSTANCE == pipe_state->vertex_binding_descriptions_[k].inputRate)) {
failed_01871 = false;
break;
}
}
if (failed_01871) { // Description not found, or has incorrect inputRate value
skip |= LogError(
device, "VUID-VkVertexInputBindingDivisorDescriptionEXT-inputRate-01871",
"vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, "
"pVertexBindingDivisors[%1u] specifies binding index (%1u), but that binding index's "
"VkVertexInputBindingDescription.inputRate member is not VK_VERTEX_INPUT_RATE_INSTANCE.",
i, j, vibdd->binding);
}
}
}
return skip;
}
bool CoreChecks::ValidatePipelineCacheControlFlags(VkPipelineCreateFlags flags, uint32_t index, const char *caller_name,
const char *vuid) const {
bool skip = false;
if (enabled_features.pipeline_creation_cache_control_features.pipelineCreationCacheControl == VK_FALSE) {
const VkPipelineCreateFlags invalid_flags =
VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT | VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT_EXT;
if ((flags & invalid_flags) != 0) {
skip |= LogError(device, vuid,
"%s(): pipelineCreationCacheControl is turned off but pipeline[%u] has VkPipelineCreateFlags "
"containing VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT or "
"VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT_EXT",
caller_name, index);
}
}
return skip;
}
bool CoreChecks::PreCallValidateCreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkPipelineCache *pPipelineCache) const {
bool skip = false;
if (enabled_features.pipeline_creation_cache_control_features.pipelineCreationCacheControl == VK_FALSE) {
if ((pCreateInfo->flags & VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT_EXT) != 0) {
skip |= LogError(device, "VUID-VkPipelineCacheCreateInfo-pipelineCreationCacheControl-02892",
"vkCreatePipelineCache(): pipelineCreationCacheControl is turned off but pCreateInfo::flags contains "
"VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT_EXT");
}
}
return skip;
}
bool CoreChecks::PreCallValidateCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
const VkGraphicsPipelineCreateInfo *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines,
void *cgpl_state_data) const {
bool skip = StateTracker::PreCallValidateCreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
pPipelines, cgpl_state_data);
create_graphics_pipeline_api_state *cgpl_state = reinterpret_cast<create_graphics_pipeline_api_state *>(cgpl_state_data);
for (uint32_t i = 0; i < count; i++) {
skip |= ValidatePipelineLocked(cgpl_state->pipe_state, i);
}
for (uint32_t i = 0; i < count; i++) {
skip |= ValidatePipelineUnlocked(cgpl_state->pipe_state[i].get(), i);
}
if (IsExtEnabled(device_extensions.vk_ext_vertex_attribute_divisor)) {
skip |= ValidatePipelineVertexDivisors(cgpl_state->pipe_state, count, pCreateInfos);
}
if (IsExtEnabled(device_extensions.vk_khr_portability_subset)) {
for (uint32_t i = 0; i < count; ++i) {
// Validate depth-stencil state
auto raster_state_ci = pCreateInfos[i].pRasterizationState;
if ((VK_FALSE == enabled_features.portability_subset_features.separateStencilMaskRef) && raster_state_ci &&
(VK_CULL_MODE_NONE == raster_state_ci->cullMode)) {
auto depth_stencil_ci = pCreateInfos[i].pDepthStencilState;
if (depth_stencil_ci && (VK_TRUE == depth_stencil_ci->stencilTestEnable) &&
(depth_stencil_ci->front.reference != depth_stencil_ci->back.reference)) {
skip |= LogError(device, "VUID-VkPipelineDepthStencilStateCreateInfo-separateStencilMaskRef-04453",
"Invalid Pipeline CreateInfo[%d] (portability error): VkStencilOpState::reference must be the "
"same for front and back",
i);
}
}
// Validate color attachments
uint32_t subpass = pCreateInfos[i].subpass;
const auto *render_pass = GetRenderPassState(pCreateInfos[i].renderPass);
bool ignore_color_blend_state = pCreateInfos[i].pRasterizationState->rasterizerDiscardEnable ||
render_pass->createInfo.pSubpasses[subpass].colorAttachmentCount == 0;
if ((VK_FALSE == enabled_features.portability_subset_features.constantAlphaColorBlendFactors) &&
!ignore_color_blend_state) {
auto color_blend_state = pCreateInfos[i].pColorBlendState;
const auto attachments = color_blend_state->pAttachments;
for (uint32_t color_attachment_index = 0; i < color_blend_state->attachmentCount; ++i) {
if ((VK_BLEND_FACTOR_CONSTANT_ALPHA == attachments[color_attachment_index].srcColorBlendFactor) ||
(VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA == attachments[color_attachment_index].srcColorBlendFactor)) {
skip |= LogError(
device, "VUID-VkPipelineColorBlendAttachmentState-constantAlphaColorBlendFactors-04454",
"Invalid Pipeline CreateInfo[%d] (portability error): srcColorBlendFactor for color attachment %d must "
"not be VK_BLEND_FACTOR_CONSTANT_ALPHA or VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA",
i, color_attachment_index);
}
if ((VK_BLEND_FACTOR_CONSTANT_ALPHA == attachments[color_attachment_index].dstColorBlendFactor) ||
(VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA == attachments[color_attachment_index].dstColorBlendFactor)) {
skip |= LogError(
device, "VUID-VkPipelineColorBlendAttachmentState-constantAlphaColorBlendFactors-04455",
"Invalid Pipeline CreateInfo[%d] (portability error): dstColorBlendFactor for color attachment %d must "
"not be VK_BLEND_FACTOR_CONSTANT_ALPHA or VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA",
i, color_attachment_index);
}
}
}
}
}
return skip;
}
void CoreChecks::PostCallRecordCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
const VkGraphicsPipelineCreateInfo *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines,
VkResult result, void *cgpl_state_data) {
ValidationStateTracker::PostCallRecordCreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
pPipelines, result, cgpl_state_data);
if (enabled_features.fragment_shading_rate_features.primitiveFragmentShadingRate) {
for (uint32_t i = 0; i < count; i++) {
PIPELINE_STATE *pipeline_state = GetPipelineState(pPipelines[i]);
RecordGraphicsPipelineShaderDynamicState(pipeline_state);
}
}
}
bool CoreChecks::PreCallValidateCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
const VkComputePipelineCreateInfo *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines,
void *ccpl_state_data) const {
bool skip = StateTracker::PreCallValidateCreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
pPipelines, ccpl_state_data);
auto *ccpl_state = reinterpret_cast<create_compute_pipeline_api_state *>(ccpl_state_data);
for (uint32_t i = 0; i < count; i++) {
// TODO: Add Compute Pipeline Verification
skip |= ValidateComputePipelineShaderState(ccpl_state->pipe_state[i].get());
skip |= ValidatePipelineCacheControlFlags(pCreateInfos->flags, i, "vkCreateComputePipelines",
"VUID-VkComputePipelineCreateInfo-pipelineCreationCacheControl-02875");
}
return skip;
}
bool CoreChecks::PreCallValidateCreateRayTracingPipelinesNV(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
const VkRayTracingPipelineCreateInfoNV *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines,
void *crtpl_state_data) const {
bool skip = StateTracker::PreCallValidateCreateRayTracingPipelinesNV(device, pipelineCache, count, pCreateInfos, pAllocator,
pPipelines, crtpl_state_data);
auto *crtpl_state = reinterpret_cast<create_ray_tracing_pipeline_api_state *>(crtpl_state_data);
for (uint32_t i = 0; i < count; i++) {
PIPELINE_STATE *pipeline = crtpl_state->pipe_state[i].get();
const auto &create_info = pipeline->create_info.raytracing;
if (create_info.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
const PIPELINE_STATE *base_pipeline = nullptr;
if (create_info.basePipelineIndex != -1) {
base_pipeline = crtpl_state->pipe_state[create_info.basePipelineIndex].get();
} else if (create_info.basePipelineHandle != VK_NULL_HANDLE) {
base_pipeline = GetPipelineState(create_info.basePipelineHandle);
}
if (!base_pipeline || !(base_pipeline->GetPipelineCreateFlags() & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
skip |= LogError(
device, "VUID-vkCreateRayTracingPipelinesNV-flags-03416",
"vkCreateRayTracingPipelinesNV: If the flags member of any element of pCreateInfos contains the "
"VK_PIPELINE_CREATE_DERIVATIVE_BIT flag,"
"the base pipeline must have been created with the VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT flag set.");
}
}
skip |= ValidateRayTracingPipeline(pipeline, pCreateInfos[i].flags, /*isKHR*/ false);
skip |= ValidatePipelineCacheControlFlags(pCreateInfos[i].flags, i, "vkCreateRayTracingPipelinesNV",
"VUID-VkRayTracingPipelineCreateInfoNV-pipelineCreationCacheControl-02905");
}
return skip;
}
bool CoreChecks::PreCallValidateCreateRayTracingPipelinesKHR(VkDevice device, VkDeferredOperationKHR deferredOperation,
VkPipelineCache pipelineCache, uint32_t count,
const VkRayTracingPipelineCreateInfoKHR *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines,
void *crtpl_state_data) const {
bool skip = StateTracker::PreCallValidateCreateRayTracingPipelinesKHR(device, deferredOperation, pipelineCache, count,
pCreateInfos, pAllocator, pPipelines, crtpl_state_data);
auto *crtpl_state = reinterpret_cast<create_ray_tracing_pipeline_khr_api_state *>(crtpl_state_data);
for (uint32_t i = 0; i < count; i++) {
PIPELINE_STATE *pipeline = crtpl_state->pipe_state[i].get();
const auto &create_info = pipeline->create_info.raytracing;
if (create_info.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
const PIPELINE_STATE *base_pipeline = nullptr;
if (create_info.basePipelineIndex != -1) {
base_pipeline = crtpl_state->pipe_state[create_info.basePipelineIndex].get();
} else if (create_info.basePipelineHandle != VK_NULL_HANDLE) {
base_pipeline = GetPipelineState(create_info.basePipelineHandle);
}
if (!base_pipeline || !(base_pipeline->GetPipelineCreateFlags() & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
skip |= LogError(
device, "VUID-vkCreateRayTracingPipelinesKHR-flags-03416",
"vkCreateRayTracingPipelinesKHR: If the flags member of any element of pCreateInfos contains the "
"VK_PIPELINE_CREATE_DERIVATIVE_BIT flag,"
"the base pipeline must have been created with the VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT flag set.");
}
}
skip |= ValidateRayTracingPipeline(pipeline, pCreateInfos[i].flags, /*isKHR*/ true);
skip |= ValidatePipelineCacheControlFlags(pCreateInfos[i].flags, i, "vkCreateRayTracingPipelinesKHR",
"VUID-VkRayTracingPipelineCreateInfoKHR-pipelineCreationCacheControl-02905");
if (create_info.pLibraryInfo) {
const std::vector<std::pair<const char *, VkPipelineCreateFlags>> vuid_map = {
{"VUID-VkRayTracingPipelineCreateInfoKHR-flags-04718", VK_PIPELINE_CREATE_RAY_TRACING_SKIP_AABBS_BIT_KHR},
{"VUID-VkRayTracingPipelineCreateInfoKHR-flags-04719", VK_PIPELINE_CREATE_RAY_TRACING_SKIP_TRIANGLES_BIT_KHR},
{"VUID-VkRayTracingPipelineCreateInfoKHR-flags-04720",
VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_ANY_HIT_SHADERS_BIT_KHR},
{"VUID-VkRayTracingPipelineCreateInfoKHR-flags-04721",
VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_CLOSEST_HIT_SHADERS_BIT_KHR},
{"VUID-VkRayTracingPipelineCreateInfoKHR-flags-04722",
VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_INTERSECTION_SHADERS_BIT_KHR},
{"VUID-VkRayTracingPipelineCreateInfoKHR-flags-04723", VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_MISS_SHADERS_BIT_KHR},
};
for (uint32_t j = 0; j < create_info.pLibraryInfo->libraryCount; ++j) {
const auto &lib = Get<PIPELINE_STATE>(create_info.pLibraryInfo->pLibraries[j]);
if ((lib->create_info.raytracing.flags & VK_PIPELINE_CREATE_LIBRARY_BIT_KHR) == 0) {
skip |= LogError(
device, "VUID-VkPipelineLibraryCreateInfoKHR-pLibraries-03381",
"vkCreateRayTracingPipelinesKHR(): pCreateInfo[%" PRIu32 "].pLibraryInfo->pLibraries[%" PRIu32
"] was not created with VK_PIPELINE_CREATE_LIBRARY_BIT_KHR.", i, j);
}
for (const auto &pair : vuid_map) {
if (create_info.flags & pair.second) {
if ((lib->create_info.raytracing.flags & pair.second) == 0) {
skip |= LogError(
device, pair.first,
"vkCreateRayTracingPipelinesKHR(): pCreateInfo[%" PRIu32
"].flags contains %s bit, but pCreateInfo[%" PRIu32
"].pLibraryInfo->pLibraries[%" PRIu32 "] was created without it.",
i, string_VkPipelineCreateFlags(pair.second).c_str(), i, j);
}
}
}
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateGetPipelineExecutablePropertiesKHR(VkDevice device, const VkPipelineInfoKHR *pPipelineInfo,
uint32_t *pExecutableCount,
VkPipelineExecutablePropertiesKHR *pProperties) const {
bool skip = false;
skip |= ValidatePipelineExecutableInfo(device, nullptr, "vkGetPipelineExecutablePropertiesKHR",
"VUID-vkGetPipelineExecutablePropertiesKHR-pipelineExecutableInfo-03270");
return skip;
}
bool CoreChecks::ValidatePipelineExecutableInfo(VkDevice device, const VkPipelineExecutableInfoKHR *pExecutableInfo,
const char *caller_name, const char *feature_vuid) const {
bool skip = false;
if (!enabled_features.pipeline_exe_props_features.pipelineExecutableInfo) {
skip |= LogError(device, feature_vuid, "%s(): called when pipelineExecutableInfo feature is not enabled.", caller_name);
}
// vkGetPipelineExecutablePropertiesKHR will not have struct to validate further
if (pExecutableInfo) {
auto pi = LvlInitStruct<VkPipelineInfoKHR>();
pi.pipeline = pExecutableInfo->pipeline;
// We could probably cache this instead of fetching it every time
uint32_t executable_count = 0;
DispatchGetPipelineExecutablePropertiesKHR(device, &pi, &executable_count, NULL);
if (pExecutableInfo->executableIndex >= executable_count) {
skip |= LogError(
pExecutableInfo->pipeline, "VUID-VkPipelineExecutableInfoKHR-executableIndex-03275",
"%s(): VkPipelineExecutableInfo::executableIndex (%1u) must be less than the number of executables associated with "
"the pipeline (%1u) as returned by vkGetPipelineExecutablePropertiessKHR",
caller_name, pExecutableInfo->executableIndex, executable_count);
}
}
return skip;
}
bool CoreChecks::PreCallValidateGetPipelineExecutableStatisticsKHR(VkDevice device,
const VkPipelineExecutableInfoKHR *pExecutableInfo,
uint32_t *pStatisticCount,
VkPipelineExecutableStatisticKHR *pStatistics) const {
bool skip = false;
skip |= ValidatePipelineExecutableInfo(device, pExecutableInfo, "vkGetPipelineExecutableStatisticsKHR",
"VUID-vkGetPipelineExecutableStatisticsKHR-pipelineExecutableInfo-03272");
const PIPELINE_STATE *pipeline_state = GetPipelineState(pExecutableInfo->pipeline);
if (!(pipeline_state->GetPipelineCreateFlags() & VK_PIPELINE_CREATE_CAPTURE_STATISTICS_BIT_KHR)) {
skip |= LogError(pExecutableInfo->pipeline, "VUID-vkGetPipelineExecutableStatisticsKHR-pipeline-03274",
"vkGetPipelineExecutableStatisticsKHR called on a pipeline created without the "
"VK_PIPELINE_CREATE_CAPTURE_STATISTICS_BIT_KHR flag set");
}
return skip;
}
bool CoreChecks::PreCallValidateGetPipelineExecutableInternalRepresentationsKHR(
VkDevice device, const VkPipelineExecutableInfoKHR *pExecutableInfo, uint32_t *pInternalRepresentationCount,
VkPipelineExecutableInternalRepresentationKHR *pStatistics) const {
bool skip = false;
skip |= ValidatePipelineExecutableInfo(device, pExecutableInfo, "vkGetPipelineExecutableInternalRepresentationsKHR",
"VUID-vkGetPipelineExecutableInternalRepresentationsKHR-pipelineExecutableInfo-03276");
const PIPELINE_STATE *pipeline_state = GetPipelineState(pExecutableInfo->pipeline);
if (!(pipeline_state->GetPipelineCreateFlags() & VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR)) {
skip |= LogError(pExecutableInfo->pipeline, "VUID-vkGetPipelineExecutableInternalRepresentationsKHR-pipeline-03278",
"vkGetPipelineExecutableInternalRepresentationsKHR called on a pipeline created without the "
"VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR flag set");
}
return skip;
}
bool CoreChecks::PreCallValidateCreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkDescriptorSetLayout *pSetLayout) const {
return cvdescriptorset::ValidateDescriptorSetLayoutCreateInfo(
this, pCreateInfo, IsExtEnabled(device_extensions.vk_khr_push_descriptor),
phys_dev_ext_props.push_descriptor_props.maxPushDescriptors, IsExtEnabled(device_extensions.vk_ext_descriptor_indexing),
&enabled_features.core12, &enabled_features.inline_uniform_block_features, &phys_dev_ext_props.inline_uniform_block_props,
&enabled_features.ray_tracing_acceleration_structure_features, &device_extensions);
}
enum DSL_DESCRIPTOR_GROUPS {
DSL_TYPE_SAMPLERS = 0,
DSL_TYPE_UNIFORM_BUFFERS,
DSL_TYPE_STORAGE_BUFFERS,
DSL_TYPE_SAMPLED_IMAGES,
DSL_TYPE_STORAGE_IMAGES,
DSL_TYPE_INPUT_ATTACHMENTS,
DSL_TYPE_INLINE_UNIFORM_BLOCK,
DSL_TYPE_ACCELERATION_STRUCTURE,
DSL_TYPE_ACCELERATION_STRUCTURE_NV,
DSL_NUM_DESCRIPTOR_GROUPS
};
// Used by PreCallValidateCreatePipelineLayout.
// Returns an array of size DSL_NUM_DESCRIPTOR_GROUPS of the maximum number of descriptors used in any single pipeline stage
std::valarray<uint32_t> GetDescriptorCountMaxPerStage(
const DeviceFeatures *enabled_features,
const std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> &set_layouts, bool skip_update_after_bind) {
// Identify active pipeline stages
std::vector<VkShaderStageFlags> stage_flags = {VK_SHADER_STAGE_VERTEX_BIT, VK_SHADER_STAGE_FRAGMENT_BIT,
VK_SHADER_STAGE_COMPUTE_BIT};
if (enabled_features->core.geometryShader) {
stage_flags.push_back(VK_SHADER_STAGE_GEOMETRY_BIT);
}
if (enabled_features->core.tessellationShader) {
stage_flags.push_back(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
stage_flags.push_back(VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT);
}
// Allow iteration over enum values
std::vector<DSL_DESCRIPTOR_GROUPS> dsl_groups = {
DSL_TYPE_SAMPLERS,
DSL_TYPE_UNIFORM_BUFFERS,
DSL_TYPE_STORAGE_BUFFERS,
DSL_TYPE_SAMPLED_IMAGES,
DSL_TYPE_STORAGE_IMAGES,
DSL_TYPE_INPUT_ATTACHMENTS,
DSL_TYPE_INLINE_UNIFORM_BLOCK,
DSL_TYPE_ACCELERATION_STRUCTURE,
DSL_TYPE_ACCELERATION_STRUCTURE_NV,
};
// Sum by layouts per stage, then pick max of stages per type
std::valarray<uint32_t> max_sum(0U, DSL_NUM_DESCRIPTOR_GROUPS); // max descriptor sum among all pipeline stages
for (auto stage : stage_flags) {
std::valarray<uint32_t> stage_sum(0U, DSL_NUM_DESCRIPTOR_GROUPS); // per-stage sums
for (const auto &dsl : set_layouts) {
if (skip_update_after_bind && (dsl->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT)) {
continue;
}
for (uint32_t binding_idx = 0; binding_idx < dsl->GetBindingCount(); binding_idx++) {
const VkDescriptorSetLayoutBinding *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx);
// Bindings with a descriptorCount of 0 are "reserved" and should be skipped
if (0 != (stage & binding->stageFlags) && binding->descriptorCount > 0) {
switch (binding->descriptorType) {
case VK_DESCRIPTOR_TYPE_SAMPLER:
stage_sum[DSL_TYPE_SAMPLERS] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
stage_sum[DSL_TYPE_UNIFORM_BUFFERS] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
stage_sum[DSL_TYPE_STORAGE_BUFFERS] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
stage_sum[DSL_TYPE_SAMPLED_IMAGES] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
stage_sum[DSL_TYPE_STORAGE_IMAGES] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
stage_sum[DSL_TYPE_SAMPLED_IMAGES] += binding->descriptorCount;
stage_sum[DSL_TYPE_SAMPLERS] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
stage_sum[DSL_TYPE_INPUT_ATTACHMENTS] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT:
// count one block per binding. descriptorCount is number of bytes
stage_sum[DSL_TYPE_INLINE_UNIFORM_BLOCK]++;
break;
case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR:
stage_sum[DSL_TYPE_ACCELERATION_STRUCTURE] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV:
stage_sum[DSL_TYPE_ACCELERATION_STRUCTURE_NV] += binding->descriptorCount;
break;
default:
break;
}
}
}
}
for (auto type : dsl_groups) {
max_sum[type] = std::max(stage_sum[type], max_sum[type]);
}
}
return max_sum;
}
// Used by PreCallValidateCreatePipelineLayout.
// Returns a map indexed by VK_DESCRIPTOR_TYPE_* enum of the summed descriptors by type.
// Note: descriptors only count against the limit once even if used by multiple stages.
std::map<uint32_t, uint32_t> GetDescriptorSum(
const std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> &set_layouts, bool skip_update_after_bind) {
std::map<uint32_t, uint32_t> sum_by_type;
for (const auto &dsl : set_layouts) {
if (skip_update_after_bind && (dsl->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT)) {
continue;
}
for (uint32_t binding_idx = 0; binding_idx < dsl->GetBindingCount(); binding_idx++) {
const VkDescriptorSetLayoutBinding *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx);
// Bindings with a descriptorCount of 0 are "reserved" and should be skipped
if (binding->descriptorCount > 0) {
if (binding->descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) {
// count one block per binding. descriptorCount is number of bytes
sum_by_type[binding->descriptorType]++;
} else {
sum_by_type[binding->descriptorType] += binding->descriptorCount;
}
}
}
}
return sum_by_type;
}
bool CoreChecks::PreCallValidateCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkPipelineLayout *pPipelineLayout) const {
bool skip = false;
std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> set_layouts(pCreateInfo->setLayoutCount, nullptr);
unsigned int push_descriptor_set_count = 0;
{
for (uint32_t i = 0; i < pCreateInfo->setLayoutCount; ++i) {
set_layouts[i] = GetDescriptorSetLayoutShared(pCreateInfo->pSetLayouts[i]);
if (set_layouts[i]->IsPushDescriptor()) ++push_descriptor_set_count;
if (set_layouts[i]->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_HOST_ONLY_POOL_BIT_VALVE) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-04606",
"vkCreatePipelineLayout(): pCreateInfo->pSetLayouts[%" PRIu32 "] was created with VK_DESCRIPTOR_SET_LAYOUT_CREATE_HOST_ONLY_POOL_BIT_VALVE bit.", i);
}
}
}
if (push_descriptor_set_count > 1) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00293",
"vkCreatePipelineLayout() Multiple push descriptor sets found.");
}
// Max descriptors by type, within a single pipeline stage
std::valarray<uint32_t> max_descriptors_per_stage = GetDescriptorCountMaxPerStage(&enabled_features, set_layouts, true);
// Samplers
if (max_descriptors_per_stage[DSL_TYPE_SAMPLERS] > phys_dev_props.limits.maxPerStageDescriptorSamplers) {
const char *vuid = IsExtEnabled(device_extensions.vk_ext_descriptor_indexing)
? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03016"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00287";
skip |= LogError(device, vuid,
"vkCreatePipelineLayout(): max per-stage sampler bindings count (%d) exceeds device "
"maxPerStageDescriptorSamplers limit (%d).",
max_descriptors_per_stage[DSL_TYPE_SAMPLERS], phys_dev_props.limits.maxPerStageDescriptorSamplers);
}
// Uniform buffers
if (max_descriptors_per_stage[DSL_TYPE_UNIFORM_BUFFERS] > phys_dev_props.limits.maxPerStageDescriptorUniformBuffers) {
const char *vuid = IsExtEnabled(device_extensions.vk_ext_descriptor_indexing)
? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03017"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00288";
skip |= LogError(device, vuid,
"vkCreatePipelineLayout(): max per-stage uniform buffer bindings count (%d) exceeds device "
"maxPerStageDescriptorUniformBuffers limit (%d).",
max_descriptors_per_stage[DSL_TYPE_UNIFORM_BUFFERS],
phys_dev_props.limits.maxPerStageDescriptorUniformBuffers);
}
// Storage buffers
if (max_descriptors_per_stage[DSL_TYPE_STORAGE_BUFFERS] > phys_dev_props.limits.maxPerStageDescriptorStorageBuffers) {
const char *vuid = IsExtEnabled(device_extensions.vk_ext_descriptor_indexing)
? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03018"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00289";
skip |= LogError(device, vuid,
"vkCreatePipelineLayout(): max per-stage storage buffer bindings count (%d) exceeds device "
"maxPerStageDescriptorStorageBuffers limit (%d).",
max_descriptors_per_stage[DSL_TYPE_STORAGE_BUFFERS],
phys_dev_props.limits.maxPerStageDescriptorStorageBuffers);
}
// Sampled images
if (max_descriptors_per_stage[DSL_TYPE_SAMPLED_IMAGES] > phys_dev_props.limits.maxPerStageDescriptorSampledImages) {
const char *vuid = IsExtEnabled(device_extensions.vk_ext_descriptor_indexing)
? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03019"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00290";
skip |=
LogError(device, vuid,
"vkCreatePipelineLayout(): max per-stage sampled image bindings count (%d) exceeds device "
"maxPerStageDescriptorSampledImages limit (%d).",
max_descriptors_per_stage[DSL_TYPE_SAMPLED_IMAGES], phys_dev_props.limits.maxPerStageDescriptorSampledImages);
}
// Storage images
if (max_descriptors_per_stage[DSL_TYPE_STORAGE_IMAGES] > phys_dev_props.limits.maxPerStageDescriptorStorageImages) {
const char *vuid = IsExtEnabled(device_extensions.vk_ext_descriptor_indexing)
? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03020"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00291";
skip |=
LogError(device, vuid,
"vkCreatePipelineLayout(): max per-stage storage image bindings count (%d) exceeds device "
"maxPerStageDescriptorStorageImages limit (%d).",
max_descriptors_per_stage[DSL_TYPE_STORAGE_IMAGES], phys_dev_props.limits.maxPerStageDescriptorStorageImages);
}
// Input attachments
if (max_descriptors_per_stage[DSL_TYPE_INPUT_ATTACHMENTS] > phys_dev_props.limits.maxPerStageDescriptorInputAttachments) {
const char *vuid = IsExtEnabled(device_extensions.vk_ext_descriptor_indexing)
? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03021"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01676";
skip |= LogError(device, vuid,
"vkCreatePipelineLayout(): max per-stage input attachment bindings count (%d) exceeds device "
"maxPerStageDescriptorInputAttachments limit (%d).",
max_descriptors_per_stage[DSL_TYPE_INPUT_ATTACHMENTS],
phys_dev_props.limits.maxPerStageDescriptorInputAttachments);
}
// Inline uniform blocks
if (max_descriptors_per_stage[DSL_TYPE_INLINE_UNIFORM_BLOCK] >
phys_dev_ext_props.inline_uniform_block_props.maxPerStageDescriptorInlineUniformBlocks) {
const char *vuid = IsExtEnabled(device_extensions.vk_ext_descriptor_indexing)
? "VUID-VkPipelineLayoutCreateInfo-descriptorType-02214"
: "VUID-VkPipelineLayoutCreateInfo-descriptorType-02212";
skip |= LogError(device, vuid,
"vkCreatePipelineLayout(): max per-stage inline uniform block bindings count (%d) exceeds device "
"maxPerStageDescriptorInlineUniformBlocks limit (%d).",
max_descriptors_per_stage[DSL_TYPE_INLINE_UNIFORM_BLOCK],
phys_dev_ext_props.inline_uniform_block_props.maxPerStageDescriptorInlineUniformBlocks);
}
if (max_descriptors_per_stage[DSL_TYPE_ACCELERATION_STRUCTURE] >
phys_dev_ext_props.acc_structure_props.maxPerStageDescriptorUpdateAfterBindAccelerationStructures) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03572",
"vkCreatePipelineLayout(): max per-stage acceleration structure bindings count (%" PRIu32 ") exceeds device "
"maxPerStageDescriptorInlineUniformBlocks limit (%" PRIu32 ").",
max_descriptors_per_stage[DSL_TYPE_ACCELERATION_STRUCTURE],
phys_dev_ext_props.inline_uniform_block_props.maxPerStageDescriptorInlineUniformBlocks);
}
// Total descriptors by type
//
std::map<uint32_t, uint32_t> sum_all_stages = GetDescriptorSum(set_layouts, true);
// Samplers
uint32_t sum = sum_all_stages[VK_DESCRIPTOR_TYPE_SAMPLER] + sum_all_stages[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER];
if (sum > phys_dev_props.limits.maxDescriptorSetSamplers) {
const char *vuid = IsExtEnabled(device_extensions.vk_ext_descriptor_indexing)
? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03028"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01677";
skip |= LogError(device, vuid,
"vkCreatePipelineLayout(): sum of sampler bindings among all stages (%d) exceeds device "
"maxDescriptorSetSamplers limit (%d).",
sum, phys_dev_props.limits.maxDescriptorSetSamplers);
}
// Uniform buffers
if (sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER] > phys_dev_props.limits.maxDescriptorSetUniformBuffers) {
const char *vuid = IsExtEnabled(device_extensions.vk_ext_descriptor_indexing)
? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03029"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01678";
skip |= LogError(device, vuid,
"vkCreatePipelineLayout(): sum of uniform buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetUniformBuffers limit (%d).",
sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER], phys_dev_props.limits.maxDescriptorSetUniformBuffers);
}
// Dynamic uniform buffers
if (sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC] > phys_dev_props.limits.maxDescriptorSetUniformBuffersDynamic) {
const char *vuid = IsExtEnabled(device_extensions.vk_ext_descriptor_indexing)
? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03030"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01679";
skip |= LogError(device, vuid,
"vkCreatePipelineLayout(): sum of dynamic uniform buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetUniformBuffersDynamic limit (%d).",
sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC],
phys_dev_props.limits.maxDescriptorSetUniformBuffersDynamic);
}
// Storage buffers
if (sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER] > phys_dev_props.limits.maxDescriptorSetStorageBuffers) {
const char *vuid = IsExtEnabled(device_extensions.vk_ext_descriptor_indexing)
? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03031"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01680";
skip |= LogError(device, vuid,
"vkCreatePipelineLayout(): sum of storage buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetStorageBuffers limit (%d).",
sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER], phys_dev_props.limits.maxDescriptorSetStorageBuffers);
}
// Dynamic storage buffers
if (sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC] > phys_dev_props.limits.maxDescriptorSetStorageBuffersDynamic) {
const char *vuid = IsExtEnabled(device_extensions.vk_ext_descriptor_indexing)
? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03032"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01681";
skip |= LogError(device, vuid,
"vkCreatePipelineLayout(): sum of dynamic storage buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetStorageBuffersDynamic limit (%d).",
sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC],
phys_dev_props.limits.maxDescriptorSetStorageBuffersDynamic);
}
// Sampled images
sum = sum_all_stages[VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE] + sum_all_stages[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER] +
sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER];
if (sum > phys_dev_props.limits.maxDescriptorSetSampledImages) {
const char *vuid = IsExtEnabled(device_extensions.vk_ext_descriptor_indexing)
? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03033"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01682";
skip |= LogError(device, vuid,
"vkCreatePipelineLayout(): sum of sampled image bindings among all stages (%d) exceeds device "
"maxDescriptorSetSampledImages limit (%d).",
sum, phys_dev_props.limits.maxDescriptorSetSampledImages);
}
// Storage images
sum = sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_IMAGE] + sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER];
if (sum > phys_dev_props.limits.maxDescriptorSetStorageImages) {
const char *vuid = IsExtEnabled(device_extensions.vk_ext_descriptor_indexing)
? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03034"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01683";
skip |= LogError(device, vuid,
"vkCreatePipelineLayout(): sum of storage image bindings among all stages (%d) exceeds device "
"maxDescriptorSetStorageImages limit (%d).",
sum, phys_dev_props.limits.maxDescriptorSetStorageImages);
}
// Input attachments
if (sum_all_stages[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT] > phys_dev_props.limits.maxDescriptorSetInputAttachments) {
const char *vuid = IsExtEnabled(device_extensions.vk_ext_descriptor_indexing)
? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03035"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01684";
skip |=
LogError(device, vuid,
"vkCreatePipelineLayout(): sum of input attachment bindings among all stages (%d) exceeds device "
"maxDescriptorSetInputAttachments limit (%d).",
sum_all_stages[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT], phys_dev_props.limits.maxDescriptorSetInputAttachments);
}
// Inline uniform blocks
if (sum_all_stages[VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT] >
phys_dev_ext_props.inline_uniform_block_props.maxDescriptorSetInlineUniformBlocks) {
const char *vuid = IsExtEnabled(device_extensions.vk_ext_descriptor_indexing)
? "VUID-VkPipelineLayoutCreateInfo-descriptorType-02216"
: "VUID-VkPipelineLayoutCreateInfo-descriptorType-02213";
skip |= LogError(device, vuid,
"vkCreatePipelineLayout(): sum of inline uniform block bindings among all stages (%d) exceeds device "
"maxDescriptorSetInlineUniformBlocks limit (%d).",
sum_all_stages[VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT],
phys_dev_ext_props.inline_uniform_block_props.maxDescriptorSetInlineUniformBlocks);
}
// Acceleration structures NV
if (sum_all_stages[VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV] >
phys_dev_ext_props.ray_tracing_propsNV.maxDescriptorSetAccelerationStructures) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-02381",
"vkCreatePipelineLayout(): sum of acceleration structures NV bindings among all stages (%" PRIu32 ") exceeds device "
"VkPhysicalDeviceRayTracingPropertiesNV::maxDescriptorSetAccelerationStructures limit (%" PRIu32 ").",
sum_all_stages[VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT],
phys_dev_ext_props.ray_tracing_propsNV.maxDescriptorSetAccelerationStructures);
}
if (IsExtEnabled(device_extensions.vk_ext_descriptor_indexing)) {
// XXX TODO: replace with correct VU messages
// Max descriptors by type, within a single pipeline stage
std::valarray<uint32_t> max_descriptors_per_stage_update_after_bind =
GetDescriptorCountMaxPerStage(&enabled_features, set_layouts, false);
// Samplers
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLERS] >
phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindSamplers) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03022",
"vkCreatePipelineLayout(): max per-stage sampler bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindSamplers limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLERS],
phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindSamplers);
}
// Uniform buffers
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_UNIFORM_BUFFERS] >
phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindUniformBuffers) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03023",
"vkCreatePipelineLayout(): max per-stage uniform buffer bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindUniformBuffers limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_UNIFORM_BUFFERS],
phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindUniformBuffers);
}
// Storage buffers
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_BUFFERS] >
phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindStorageBuffers) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03024",
"vkCreatePipelineLayout(): max per-stage storage buffer bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindStorageBuffers limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_BUFFERS],
phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindStorageBuffers);
}
// Sampled images
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLED_IMAGES] >
phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindSampledImages) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03025",
"vkCreatePipelineLayout(): max per-stage sampled image bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindSampledImages limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLED_IMAGES],
phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindSampledImages);
}
// Storage images
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_IMAGES] >
phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindStorageImages) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03026",
"vkCreatePipelineLayout(): max per-stage storage image bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindStorageImages limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_IMAGES],
phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindStorageImages);
}
// Input attachments
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_INPUT_ATTACHMENTS] >
phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindInputAttachments) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03027",
"vkCreatePipelineLayout(): max per-stage input attachment bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindInputAttachments limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_INPUT_ATTACHMENTS],
phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindInputAttachments);
}
// Inline uniform blocks
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_INLINE_UNIFORM_BLOCK] >
phys_dev_ext_props.inline_uniform_block_props.maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-02215",
"vkCreatePipelineLayout(): max per-stage inline uniform block bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_INLINE_UNIFORM_BLOCK],
phys_dev_ext_props.inline_uniform_block_props.maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks);
}
// Total descriptors by type, summed across all pipeline stages
//
std::map<uint32_t, uint32_t> sum_all_stages_update_after_bind = GetDescriptorSum(set_layouts, false);
// Samplers
sum = sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_SAMPLER] +
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER];
if (sum > phys_dev_props_core12.maxDescriptorSetUpdateAfterBindSamplers) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03036",
"vkCreatePipelineLayout(): sum of sampler bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindSamplers limit (%d).",
sum, phys_dev_props_core12.maxDescriptorSetUpdateAfterBindSamplers);
}
// Uniform buffers
if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER] >
phys_dev_props_core12.maxDescriptorSetUpdateAfterBindUniformBuffers) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03037",
"vkCreatePipelineLayout(): sum of uniform buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindUniformBuffers limit (%d).",
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER],
phys_dev_props_core12.maxDescriptorSetUpdateAfterBindUniformBuffers);
}
// Dynamic uniform buffers
if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC] >
phys_dev_props_core12.maxDescriptorSetUpdateAfterBindUniformBuffersDynamic) {
skip |=
LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03038",
"vkCreatePipelineLayout(): sum of dynamic uniform buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindUniformBuffersDynamic limit (%d).",
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC],
phys_dev_props_core12.maxDescriptorSetUpdateAfterBindUniformBuffersDynamic);
}
// Storage buffers
if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER] >
phys_dev_props_core12.maxDescriptorSetUpdateAfterBindStorageBuffers) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03039",
"vkCreatePipelineLayout(): sum of storage buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindStorageBuffers limit (%d).",
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER],
phys_dev_props_core12.maxDescriptorSetUpdateAfterBindStorageBuffers);
}
// Dynamic storage buffers
if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC] >
phys_dev_props_core12.maxDescriptorSetUpdateAfterBindStorageBuffersDynamic) {
skip |=
LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03040",
"vkCreatePipelineLayout(): sum of dynamic storage buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindStorageBuffersDynamic limit (%d).",
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC],
phys_dev_props_core12.maxDescriptorSetUpdateAfterBindStorageBuffersDynamic);
}
// Sampled images
sum = sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE] +
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER] +
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER];
if (sum > phys_dev_props_core12.maxDescriptorSetUpdateAfterBindSampledImages) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03041",
"vkCreatePipelineLayout(): sum of sampled image bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindSampledImages limit (%d).",
sum, phys_dev_props_core12.maxDescriptorSetUpdateAfterBindSampledImages);
}
// Storage images
sum = sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_IMAGE] +
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER];
if (sum > phys_dev_props_core12.maxDescriptorSetUpdateAfterBindStorageImages) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03042",
"vkCreatePipelineLayout(): sum of storage image bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindStorageImages limit (%d).",
sum, phys_dev_props_core12.maxDescriptorSetUpdateAfterBindStorageImages);
}
// Input attachments
if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT] >
phys_dev_props_core12.maxDescriptorSetUpdateAfterBindInputAttachments) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03043",
"vkCreatePipelineLayout(): sum of input attachment bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindInputAttachments limit (%d).",
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT],
phys_dev_props_core12.maxDescriptorSetUpdateAfterBindInputAttachments);
}
// Inline uniform blocks
if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT] >
phys_dev_ext_props.inline_uniform_block_props.maxDescriptorSetUpdateAfterBindInlineUniformBlocks) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-02217",
"vkCreatePipelineLayout(): sum of inline uniform block bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindInlineUniformBlocks limit (%d).",
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT],
phys_dev_ext_props.inline_uniform_block_props.maxDescriptorSetUpdateAfterBindInlineUniformBlocks);
}
}
if (IsExtEnabled(device_extensions.vk_ext_fragment_density_map2)) {
uint32_t sum_subsampled_samplers = 0;
for (const auto &dsl : set_layouts) {
// find the number of subsampled samplers across all stages
// NOTE: this does not use the GetDescriptorSum patter because it needs the GetSamplerState method
if ((dsl->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT)) {
continue;
}
for (uint32_t binding_idx = 0; binding_idx < dsl->GetBindingCount(); binding_idx++) {
const VkDescriptorSetLayoutBinding *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx);
// Bindings with a descriptorCount of 0 are "reserved" and should be skipped
if (binding->descriptorCount > 0) {
if (((binding->descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) ||
(binding->descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER)) &&
(binding->pImmutableSamplers != nullptr)) {
for (uint32_t sampler_idx = 0; sampler_idx < binding->descriptorCount; sampler_idx++) {
const SAMPLER_STATE *state = GetSamplerState(binding->pImmutableSamplers[sampler_idx]);
if (state->createInfo.flags & (VK_SAMPLER_CREATE_SUBSAMPLED_BIT_EXT |
VK_SAMPLER_CREATE_SUBSAMPLED_COARSE_RECONSTRUCTION_BIT_EXT)) {
sum_subsampled_samplers++;
}
}
}
}
}
}
if (sum_subsampled_samplers > phys_dev_ext_props.fragment_density_map2_props.maxDescriptorSetSubsampledSamplers) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pImmutableSamplers-03566",
"vkCreatePipelineLayout(): sum of sampler bindings with flags containing "
"VK_SAMPLER_CREATE_SUBSAMPLED_BIT_EXT or "
"VK_SAMPLER_CREATE_SUBSAMPLED_COARSE_RECONSTRUCTION_BIT_EXT among all stages(% d) "
"exceeds device maxDescriptorSetSubsampledSamplers limit (%d).",
sum_subsampled_samplers,
phys_dev_ext_props.fragment_density_map2_props.maxDescriptorSetSubsampledSamplers);
}
}
return skip;
}
bool CoreChecks::PreCallValidateResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
VkDescriptorPoolResetFlags flags) const {
// Make sure sets being destroyed are not currently in-use
if (disabled[object_in_use]) return false;
bool skip = false;
const DESCRIPTOR_POOL_STATE *pool = GetDescriptorPoolState(descriptorPool);
if (pool != nullptr) {
for (auto *ds : pool->sets) {
if (ds && ds->InUse()) {
skip |= LogError(descriptorPool, "VUID-vkResetDescriptorPool-descriptorPool-00313",
"It is invalid to call vkResetDescriptorPool() with descriptor sets in use by a command buffer.");
if (skip) break;
}
}
}
return skip;
}
// Ensure the pool contains enough descriptors and descriptor sets to satisfy
// an allocation request. Fills common_data with the total number of descriptors of each type required,
// as well as DescriptorSetLayout ptrs used for later update.
bool CoreChecks::PreCallValidateAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo,
VkDescriptorSet *pDescriptorSets, void *ads_state_data) const {
StateTracker::PreCallValidateAllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets, ads_state_data);
cvdescriptorset::AllocateDescriptorSetsData *ads_state =
reinterpret_cast<cvdescriptorset::AllocateDescriptorSetsData *>(ads_state_data);
// All state checks for AllocateDescriptorSets is done in single function
return ValidateAllocateDescriptorSets(pAllocateInfo, ads_state);
}
bool CoreChecks::PreCallValidateFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count,
const VkDescriptorSet *pDescriptorSets) const {
// Make sure that no sets being destroyed are in-flight
bool skip = false;
// First make sure sets being destroyed are not currently in-use
for (uint32_t i = 0; i < count; ++i) {
if (pDescriptorSets[i] != VK_NULL_HANDLE) {
skip |= ValidateIdleDescriptorSet(pDescriptorSets[i], "vkFreeDescriptorSets");
}
}
const DESCRIPTOR_POOL_STATE *pool_state = GetDescriptorPoolState(descriptorPool);
if (pool_state && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pool_state->createInfo.flags)) {
// Can't Free from a NON_FREE pool
skip |= LogError(descriptorPool, "VUID-vkFreeDescriptorSets-descriptorPool-00312",
"It is invalid to call vkFreeDescriptorSets() with a pool created without setting "
"VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT.");
}
return skip;
}
bool CoreChecks::PreCallValidateUpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount,
const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
const VkCopyDescriptorSet *pDescriptorCopies) const {
// First thing to do is perform map look-ups.
// NOTE : UpdateDescriptorSets is somewhat unique in that it's operating on a number of DescriptorSets
// so we can't just do a single map look-up up-front, but do them individually in functions below
// Now make call(s) that validate state, but don't perform state updates in this function
// Note, here DescriptorSets is unique in that we don't yet have an instance. Using a helper function in the
// namespace which will parse params and make calls into specific class instances
return ValidateUpdateDescriptorSets(descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies,
"vkUpdateDescriptorSets()");
}
bool CoreChecks::PreCallValidateBeginCommandBuffer(VkCommandBuffer commandBuffer,
const VkCommandBufferBeginInfo *pBeginInfo) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
if (!cb_state) return false;
bool skip = false;
if (cb_state->InUse()) {
skip |= LogError(commandBuffer, "VUID-vkBeginCommandBuffer-commandBuffer-00049",
"Calling vkBeginCommandBuffer() on active %s before it has completed. You must check "
"command buffer fence before this call.",
report_data->FormatHandle(commandBuffer).c_str());
}
if (cb_state->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
// Primary Command Buffer
const VkCommandBufferUsageFlags invalid_usage =
(VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT | VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT);
if ((pBeginInfo->flags & invalid_usage) == invalid_usage) {
skip |= LogError(commandBuffer, "VUID-vkBeginCommandBuffer-commandBuffer-02840",
"vkBeginCommandBuffer(): Primary %s can't have both VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT and "
"VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.",
report_data->FormatHandle(commandBuffer).c_str());
}
} else {
// Secondary Command Buffer
const VkCommandBufferInheritanceInfo *info = pBeginInfo->pInheritanceInfo;
if (!info) {
skip |= LogError(commandBuffer, "VUID-vkBeginCommandBuffer-commandBuffer-00051",
"vkBeginCommandBuffer(): Secondary %s must have inheritance info.",
report_data->FormatHandle(commandBuffer).c_str());
} else {
if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
assert(info->renderPass);
const auto *framebuffer = GetFramebufferState(info->framebuffer);
if (framebuffer) {
if (framebuffer->createInfo.renderPass != info->renderPass) {
const auto *render_pass = GetRenderPassState(info->renderPass);
// renderPass that framebuffer was created with must be compatible with local renderPass
skip |= ValidateRenderPassCompatibility("framebuffer", framebuffer->rp_state.get(), "command buffer",
render_pass, "vkBeginCommandBuffer()",
"VUID-VkCommandBufferBeginInfo-flags-00055");
}
}
}
if ((info->occlusionQueryEnable == VK_FALSE || enabled_features.core.occlusionQueryPrecise == VK_FALSE) &&
(info->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
skip |= LogError(commandBuffer, "VUID-vkBeginCommandBuffer-commandBuffer-00052",
"vkBeginCommandBuffer(): Secondary %s must not have VK_QUERY_CONTROL_PRECISE_BIT if "
"occulusionQuery is disabled or the device does not support precise occlusion queries.",
report_data->FormatHandle(commandBuffer).c_str());
}
auto p_inherited_viewport_scissor_info =
LvlFindInChain<VkCommandBufferInheritanceViewportScissorInfoNV>(info->pNext);
if (p_inherited_viewport_scissor_info != nullptr && p_inherited_viewport_scissor_info->viewportScissor2D) {
if (!enabled_features.inherited_viewport_scissor_features.inheritedViewportScissor2D)
{
skip |= LogError(commandBuffer, "VUID-VkCommandBufferInheritanceViewportScissorInfoNV-viewportScissor2D-04782",
"vkBeginCommandBuffer(): inheritedViewportScissor2D feature not enabled.");
}
if (!(pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
skip |= LogError(commandBuffer, "VUID-VkCommandBufferInheritanceViewportScissorInfoNV-viewportScissor2D-04786",
"vkBeginCommandBuffer(): Secondary %s must be recorded with the"
"VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT if viewportScissor2D is VK_TRUE.",
report_data->FormatHandle(commandBuffer).c_str());
}
if (p_inherited_viewport_scissor_info->viewportDepthCount == 0) {
skip |= LogError(commandBuffer, "VUID-VkCommandBufferInheritanceViewportScissorInfoNV-viewportScissor2D-04784",
"vkBeginCommandBuffer(): "
"If viewportScissor2D is VK_TRUE, then viewportDepthCount must be greater than 0.");
}
}
}
if (info && info->renderPass != VK_NULL_HANDLE) {
const auto *render_pass = GetRenderPassState(info->renderPass);
if (render_pass) {
if (info->subpass >= render_pass->createInfo.subpassCount) {
skip |= LogError(commandBuffer, "VUID-VkCommandBufferBeginInfo-flags-00054",
"vkBeginCommandBuffer(): Secondary %s must have a subpass index (%d) that is "
"less than the number of subpasses (%d).",
report_data->FormatHandle(commandBuffer).c_str(), info->subpass,
render_pass->createInfo.subpassCount);
}
}
}
}
if (CB_RECORDING == cb_state->state) {
skip |= LogError(commandBuffer, "VUID-vkBeginCommandBuffer-commandBuffer-00049",
"vkBeginCommandBuffer(): Cannot call Begin on %s in the RECORDING state. Must first call "
"vkEndCommandBuffer().",
report_data->FormatHandle(commandBuffer).c_str());
} else if (CB_RECORDED == cb_state->state || CB_INVALID_COMPLETE == cb_state->state) {
VkCommandPool cmd_pool = cb_state->createInfo.commandPool;
const auto *pool = cb_state->command_pool.get();
if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pool->createFlags)) {
LogObjectList objlist(commandBuffer);
objlist.add(cmd_pool);
skip |= LogError(objlist, "VUID-vkBeginCommandBuffer-commandBuffer-00050",
"Call to vkBeginCommandBuffer() on %s attempts to implicitly reset cmdBuffer created from "
"%s that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
report_data->FormatHandle(commandBuffer).c_str(), report_data->FormatHandle(cmd_pool).c_str());
}
}
auto chained_device_group_struct = LvlFindInChain<VkDeviceGroupCommandBufferBeginInfo>(pBeginInfo->pNext);
if (chained_device_group_struct) {
skip |= ValidateDeviceMaskToPhysicalDeviceCount(chained_device_group_struct->deviceMask, commandBuffer,
"VUID-VkDeviceGroupCommandBufferBeginInfo-deviceMask-00106");
skip |= ValidateDeviceMaskToZero(chained_device_group_struct->deviceMask, commandBuffer,
"VUID-VkDeviceGroupCommandBufferBeginInfo-deviceMask-00107");
}
return skip;
}
bool CoreChecks::PreCallValidateEndCommandBuffer(VkCommandBuffer commandBuffer) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
if (!cb_state) return false;
bool skip = false;
if ((VK_COMMAND_BUFFER_LEVEL_PRIMARY == cb_state->createInfo.level) ||
!(cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
// This needs spec clarification to update valid usage, see comments in PR:
// https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/165
skip |= InsideRenderPass(cb_state, "vkEndCommandBuffer()", "VUID-vkEndCommandBuffer-commandBuffer-00060");
}
if (cb_state->state == CB_INVALID_COMPLETE || cb_state->state == CB_INVALID_INCOMPLETE) {
skip |= ReportInvalidCommandBuffer(cb_state, "vkEndCommandBuffer()");
} else if (CB_RECORDING != cb_state->state) {
skip |= LogError(
commandBuffer, "VUID-vkEndCommandBuffer-commandBuffer-00059",
"vkEndCommandBuffer(): Cannot call End on %s when not in the RECORDING state. Must first call vkBeginCommandBuffer().",
report_data->FormatHandle(commandBuffer).c_str());
}
for (const auto &query : cb_state->activeQueries) {
skip |= LogError(commandBuffer, "VUID-vkEndCommandBuffer-commandBuffer-00061",
"vkEndCommandBuffer(): Ending command buffer with in progress query: %s, query %d.",
report_data->FormatHandle(query.pool).c_str(), query.query);
}
if (cb_state->conditional_rendering_active) {
skip |= LogError(commandBuffer, "VUID-vkEndCommandBuffer-None-01978",
"vkEndCommandBuffer(): Ending command buffer with active conditional rendering.");
}
return skip;
}
bool CoreChecks::PreCallValidateResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) const {
bool skip = false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
if (!cb_state) return false;
VkCommandPool cmd_pool = cb_state->createInfo.commandPool;
const auto *pool = cb_state->command_pool.get();
if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pool->createFlags)) {
LogObjectList objlist(commandBuffer);
objlist.add(cmd_pool);
skip |= LogError(objlist, "VUID-vkResetCommandBuffer-commandBuffer-00046",
"vkResetCommandBuffer(): Attempt to reset %s created from %s that does NOT have the "
"VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
report_data->FormatHandle(commandBuffer).c_str(), report_data->FormatHandle(cmd_pool).c_str());
}
skip |= CheckCommandBufferInFlight(cb_state, "reset", "VUID-vkResetCommandBuffer-commandBuffer-00045");
return skip;
}
static const char *GetPipelineTypeName(VkPipelineBindPoint pipelineBindPoint) {
switch (pipelineBindPoint) {
case VK_PIPELINE_BIND_POINT_GRAPHICS:
return "graphics";
case VK_PIPELINE_BIND_POINT_COMPUTE:
return "compute";
case VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR:
return "ray-tracing";
case VK_PIPELINE_BIND_POINT_SUBPASS_SHADING_HUAWEI:
return "subpass-shading";
default:
return "unknown";
}
}
bool CoreChecks::ValidateGraphicsPipelineBindPoint(const CMD_BUFFER_STATE *cb_state, const PIPELINE_STATE *pipeline_state) const {
bool skip = false;
if (cb_state->inheritedViewportDepths.size() != 0) {
bool dyn_viewport = IsDynamic(pipeline_state, VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT)
|| IsDynamic(pipeline_state, VK_DYNAMIC_STATE_VIEWPORT);
bool dyn_scissor = IsDynamic(pipeline_state, VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT)
|| IsDynamic(pipeline_state, VK_DYNAMIC_STATE_SCISSOR);
if (!dyn_viewport || !dyn_scissor) {
skip |= LogError(device, "VUID-vkCmdBindPipeline-commandBuffer-04808",
"Graphics pipeline incompatible with viewport/scissor inheritance.");
}
const auto &create_info = pipeline_state->create_info.graphics;
const auto *discard_rectangle_state = LvlFindInChain<VkPipelineDiscardRectangleStateCreateInfoEXT>(create_info.pNext);
if (discard_rectangle_state && discard_rectangle_state->discardRectangleCount != 0) {
if (!IsDynamic(pipeline_state, VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT)) {
skip |= LogError(device, "VUID-vkCmdBindPipeline-commandBuffer-04809",
"vkCmdBindPipeline(): commandBuffer is a secondary command buffer with "
"VkCommandBufferInheritanceViewportScissorInfoNV::viewportScissor2D enabled, pipelineBindPoint is "
"VK_PIPELINE_BIND_POINT_GRAPHICS and pipeline was created with "
"VkPipelineDiscardRectangleStateCreateInfoEXT::discardRectangleCount = %" PRIu32
", but without VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT.",
discard_rectangle_state->discardRectangleCount);
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
VkPipeline pipeline) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_BINDPIPELINE);
static const std::map<VkPipelineBindPoint, std::string> bindpoint_errors = {
std::make_pair(VK_PIPELINE_BIND_POINT_GRAPHICS, "VUID-vkCmdBindPipeline-pipelineBindPoint-00777"),
std::make_pair(VK_PIPELINE_BIND_POINT_COMPUTE, "VUID-vkCmdBindPipeline-pipelineBindPoint-00778"),
std::make_pair(VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR, "VUID-vkCmdBindPipeline-pipelineBindPoint-02391")};
skip |= ValidatePipelineBindPoint(cb_state, pipelineBindPoint, "vkCmdBindPipeline()", bindpoint_errors);
const auto *pipeline_state = GetPipelineState(pipeline);
assert(pipeline_state);
const auto pipeline_state_bind_point = pipeline_state->GetPipelineType();
if (pipelineBindPoint != pipeline_state_bind_point) {
if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS) {
skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdBindPipeline-pipelineBindPoint-00779",
"Cannot bind a pipeline of type %s to the graphics pipeline bind point",
GetPipelineTypeName(pipeline_state_bind_point));
} else if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_COMPUTE) {
skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdBindPipeline-pipelineBindPoint-00780",
"Cannot bind a pipeline of type %s to the compute pipeline bind point",
GetPipelineTypeName(pipeline_state_bind_point));
} else if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR) {
skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdBindPipeline-pipelineBindPoint-02392",
"Cannot bind a pipeline of type %s to the ray-tracing pipeline bind point",
GetPipelineTypeName(pipeline_state_bind_point));
}
} else {
if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS) {
skip |= ValidateGraphicsPipelineBindPoint(cb_state, pipeline_state);
if (cb_state->activeRenderPass &&
phys_dev_ext_props.provoking_vertex_props.provokingVertexModePerPipeline == VK_FALSE) {
const auto lvl_bind_point = ConvertToLvlBindPoint(pipelineBindPoint);
const auto &last_bound_it = cb_state->lastBound[lvl_bind_point];
if (last_bound_it.pipeline_state) {
auto last_bound_provoking_vertex_state_ci =
LvlFindInChain<VkPipelineRasterizationProvokingVertexStateCreateInfoEXT>(
last_bound_it.pipeline_state->create_info.graphics.pRasterizationState->pNext);
auto current_provoking_vertex_state_ci =
LvlFindInChain<VkPipelineRasterizationProvokingVertexStateCreateInfoEXT>(
pipeline_state->create_info.graphics.pRasterizationState->pNext);
if (last_bound_provoking_vertex_state_ci && !current_provoking_vertex_state_ci) {
skip |= LogError(pipeline, "VUID-vkCmdBindPipeline-pipelineBindPoint-04881",
"Previous %s's provokingVertexMode is %s, but %s doesn't chain "
"VkPipelineRasterizationProvokingVertexStateCreateInfoEXT.",
report_data->FormatHandle(last_bound_it.pipeline_state->pipeline()).c_str(),
string_VkProvokingVertexModeEXT(last_bound_provoking_vertex_state_ci->provokingVertexMode),
report_data->FormatHandle(pipeline).c_str());
} else if (!last_bound_provoking_vertex_state_ci && current_provoking_vertex_state_ci) {
skip |= LogError(pipeline, "VUID-vkCmdBindPipeline-pipelineBindPoint-04881",
" %s's provokingVertexMode is %s, but previous %s doesn't chain "
"VkPipelineRasterizationProvokingVertexStateCreateInfoEXT.",
report_data->FormatHandle(pipeline).c_str(),
string_VkProvokingVertexModeEXT(current_provoking_vertex_state_ci->provokingVertexMode),
report_data->FormatHandle(last_bound_it.pipeline_state->pipeline()).c_str());
} else if (last_bound_provoking_vertex_state_ci && current_provoking_vertex_state_ci &&
last_bound_provoking_vertex_state_ci->provokingVertexMode !=
current_provoking_vertex_state_ci->provokingVertexMode) {
skip |=
LogError(pipeline, "VUID-vkCmdBindPipeline-pipelineBindPoint-04881",
"%s's provokingVertexMode is %s, but previous %s's provokingVertexMode is %s.",
report_data->FormatHandle(pipeline).c_str(),
string_VkProvokingVertexModeEXT(current_provoking_vertex_state_ci->provokingVertexMode),
report_data->FormatHandle(last_bound_it.pipeline_state->pipeline()).c_str(),
string_VkProvokingVertexModeEXT(last_bound_provoking_vertex_state_ci->provokingVertexMode));
}
}
}
if (cb_state->activeRenderPass && phys_dev_ext_props.sample_locations_props.variableSampleLocations == VK_FALSE) {
const auto *sample_locations =
LvlFindInChain<VkPipelineSampleLocationsStateCreateInfoEXT>(pipeline_state->create_info.graphics.pNext);
if (sample_locations && sample_locations->sampleLocationsEnable == VK_TRUE &&
!IsDynamic(pipeline_state, VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT)) {
const VkRenderPassSampleLocationsBeginInfoEXT *sample_locations_begin_info =
LvlFindInChain<VkRenderPassSampleLocationsBeginInfoEXT>(cb_state->activeRenderPassBeginInfo.pNext);
bool found = false;
if (sample_locations_begin_info) {
for (uint32_t i = 0; i < sample_locations_begin_info->postSubpassSampleLocationsCount; ++i) {
if (sample_locations_begin_info->pPostSubpassSampleLocations[i].subpassIndex ==
cb_state->activeSubpass) {
if (MatchSampleLocationsInfo(
&sample_locations_begin_info->pPostSubpassSampleLocations[i].sampleLocationsInfo,
&sample_locations->sampleLocationsInfo)) {
found = true;
}
}
}
}
if (!found) {
skip |=
LogError(pipeline, "VUID-vkCmdBindPipeline-variableSampleLocations-01525",
"vkCmdBindPipeline(): VkPhysicalDeviceSampleLocationsPropertiesEXT::variableSampleLocations "
"is false, pipeline is a graphics pipeline with "
"VkPipelineSampleLocationsStateCreateInfoEXT::sampleLocationsEnable equal to true and without "
"VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT, but the current render pass (%" PRIu32
") was not begun with any element of "
"VkRenderPassSampleLocationsBeginInfoEXT::pPostSubpassSampleLocations subpassIndex "
"matching the current subpass index and sampleLocationsInfo matching sampleLocationsInfo of "
"VkPipelineSampleLocationsStateCreateInfoEXT the pipeline was created with.",
cb_state->activeSubpass);
}
}
}
}
if (pipeline_state->GetPipelineCreateFlags() & VK_PIPELINE_CREATE_LIBRARY_BIT_KHR) {
skip |= LogError(
pipeline, "VUID-vkCmdBindPipeline-pipeline-03382",
"vkCmdBindPipeline(): Cannot bind a pipeline that was created with the VK_PIPELINE_CREATE_LIBRARY_BIT_KHR flag.");
}
if (cb_state->transform_feedback_active) {
skip |= LogError(pipeline, "VUID-vkCmdBindPipeline-None-02323", "vkCmdBindPipeline(): transform feedback is active.");
}
}
return skip;
}
bool CoreChecks::ForbidInheritedViewportScissor(VkCommandBuffer commandBuffer, const CMD_BUFFER_STATE *cb_state,
const char* vuid, const char *cmdName) const {
bool skip = false;
if (cb_state->inheritedViewportDepths.size() != 0) {
skip |= LogError(
commandBuffer, vuid,
"%s: commandBuffer must not have VkCommandBufferInheritanceViewportScissorInfoNV::viewportScissor2D enabled.", cmdName);
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount,
const VkViewport *pViewports) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETVIEWPORT);
skip |=
ForbidInheritedViewportScissor(commandBuffer, cb_state, "VUID-vkCmdSetViewport-commandBuffer-04821", "vkCmdSetViewport");
return skip;
}
bool CoreChecks::PreCallValidateCmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount,
const VkRect2D *pScissors) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETSCISSOR);
skip |=
ForbidInheritedViewportScissor(commandBuffer, cb_state, "VUID-vkCmdSetScissor-viewportScissor2D-04789", "vkCmdSetScissor");
return skip;
}
bool CoreChecks::PreCallValidateCmdSetExclusiveScissorNV(VkCommandBuffer commandBuffer, uint32_t firstExclusiveScissor,
uint32_t exclusiveScissorCount, const VkRect2D *pExclusiveScissors) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETEXCLUSIVESCISSORNV);
if (!enabled_features.exclusive_scissor_features.exclusiveScissor) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetExclusiveScissorNV-None-02031",
"vkCmdSetExclusiveScissorNV: The exclusiveScissor feature is disabled.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdBindShadingRateImageNV(VkCommandBuffer commandBuffer, VkImageView imageView,
VkImageLayout imageLayout) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_BINDSHADINGRATEIMAGENV);
if (!enabled_features.shading_rate_image_features.shadingRateImage) {
skip |= LogError(commandBuffer, "VUID-vkCmdBindShadingRateImageNV-None-02058",
"vkCmdBindShadingRateImageNV: The shadingRateImage feature is disabled.");
}
if (imageView != VK_NULL_HANDLE) {
const auto view_state = GetImageViewState(imageView);
auto &ivci = view_state->create_info;
if (!view_state || (ivci.viewType != VK_IMAGE_VIEW_TYPE_2D && ivci.viewType != VK_IMAGE_VIEW_TYPE_2D_ARRAY)) {
skip |= LogError(imageView, "VUID-vkCmdBindShadingRateImageNV-imageView-02059",
"vkCmdBindShadingRateImageNV: If imageView is not VK_NULL_HANDLE, it must be a valid "
"VkImageView handle of type VK_IMAGE_VIEW_TYPE_2D or VK_IMAGE_VIEW_TYPE_2D_ARRAY.");
}
if (view_state && ivci.format != VK_FORMAT_R8_UINT) {
skip |= LogError(
imageView, "VUID-vkCmdBindShadingRateImageNV-imageView-02060",
"vkCmdBindShadingRateImageNV: If imageView is not VK_NULL_HANDLE, it must have a format of VK_FORMAT_R8_UINT.");
}
const VkImageCreateInfo *ici = view_state ? &GetImageState(view_state->create_info.image)->createInfo : nullptr;
if (ici && !(ici->usage & VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV)) {
skip |= LogError(imageView, "VUID-vkCmdBindShadingRateImageNV-imageView-02061",
"vkCmdBindShadingRateImageNV: If imageView is not VK_NULL_HANDLE, the image must have been "
"created with VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV set.");
}
if (view_state) {
const auto image_state = GetImageState(view_state->create_info.image);
bool hit_error = false;
// XXX TODO: While the VUID says "each subresource", only the base mip level is
// actually used. Since we don't have an existing convenience function to iterate
// over all mip levels, just don't bother with non-base levels.
const VkImageSubresourceRange &range = view_state->normalized_subresource_range;
VkImageSubresourceLayers subresource = {range.aspectMask, range.baseMipLevel, range.baseArrayLayer, range.layerCount};
if (image_state) {
skip |= VerifyImageLayout(cb_state, image_state, subresource, imageLayout, VK_IMAGE_LAYOUT_SHADING_RATE_OPTIMAL_NV,
"vkCmdCopyImage()", "VUID-vkCmdBindShadingRateImageNV-imageLayout-02063",
"VUID-vkCmdBindShadingRateImageNV-imageView-02062", &hit_error);
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetViewportShadingRatePaletteNV(VkCommandBuffer commandBuffer, uint32_t firstViewport,
uint32_t viewportCount,
const VkShadingRatePaletteNV *pShadingRatePalettes) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETVIEWPORTSHADINGRATEPALETTENV);
if (!enabled_features.shading_rate_image_features.shadingRateImage) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetViewportShadingRatePaletteNV-None-02064",
"vkCmdSetViewportShadingRatePaletteNV: The shadingRateImage feature is disabled.");
}
for (uint32_t i = 0; i < viewportCount; ++i) {
auto *palette = &pShadingRatePalettes[i];
if (palette->shadingRatePaletteEntryCount == 0 ||
palette->shadingRatePaletteEntryCount > phys_dev_ext_props.shading_rate_image_props.shadingRatePaletteSize) {
skip |= LogError(
commandBuffer, "VUID-VkShadingRatePaletteNV-shadingRatePaletteEntryCount-02071",
"vkCmdSetViewportShadingRatePaletteNV: shadingRatePaletteEntryCount must be between 1 and shadingRatePaletteSize.");
}
}
return skip;
}
bool CoreChecks::ValidateGeometryTrianglesNV(const VkGeometryTrianglesNV &triangles, const char *func_name) const {
bool skip = false;
const BUFFER_STATE *vb_state = GetBufferState(triangles.vertexData);
if (vb_state != nullptr && vb_state->createInfo.size <= triangles.vertexOffset) {
skip |= LogError(device, "VUID-VkGeometryTrianglesNV-vertexOffset-02428", "%s", func_name);
}
const BUFFER_STATE *ib_state = GetBufferState(triangles.indexData);
if (ib_state != nullptr && ib_state->createInfo.size <= triangles.indexOffset) {
skip |= LogError(device, "VUID-VkGeometryTrianglesNV-indexOffset-02431", "%s", func_name);
}
const BUFFER_STATE *td_state = GetBufferState(triangles.transformData);
if (td_state != nullptr && td_state->createInfo.size <= triangles.transformOffset) {
skip |= LogError(device, "VUID-VkGeometryTrianglesNV-transformOffset-02437", "%s", func_name);
}
return skip;
}
bool CoreChecks::ValidateGeometryAABBNV(const VkGeometryAABBNV &aabbs, const char *func_name) const {
bool skip = false;
const BUFFER_STATE *aabb_state = GetBufferState(aabbs.aabbData);
if (aabb_state != nullptr && aabb_state->createInfo.size > 0 && aabb_state->createInfo.size <= aabbs.offset) {
skip |= LogError(device, "VUID-VkGeometryAABBNV-offset-02439", "%s", func_name);
}
return skip;
}
bool CoreChecks::ValidateGeometryNV(const VkGeometryNV &geometry, const char *func_name) const {
bool skip = false;
if (geometry.geometryType == VK_GEOMETRY_TYPE_TRIANGLES_NV) {
skip = ValidateGeometryTrianglesNV(geometry.geometry.triangles, func_name);
} else if (geometry.geometryType == VK_GEOMETRY_TYPE_AABBS_NV) {
skip = ValidateGeometryAABBNV(geometry.geometry.aabbs, func_name);
}
return skip;
}
bool CoreChecks::PreCallValidateCreateAccelerationStructureNV(VkDevice device,
const VkAccelerationStructureCreateInfoNV *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkAccelerationStructureNV *pAccelerationStructure) const {
bool skip = false;
if (pCreateInfo != nullptr && pCreateInfo->info.type == VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV) {
for (uint32_t i = 0; i < pCreateInfo->info.geometryCount; i++) {
skip |= ValidateGeometryNV(pCreateInfo->info.pGeometries[i], "vkCreateAccelerationStructureNV():");
}
}
return skip;
}
bool CoreChecks::PreCallValidateCreateAccelerationStructureKHR(VkDevice device,
const VkAccelerationStructureCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkAccelerationStructureKHR *pAccelerationStructure) const {
bool skip = false;
if (pCreateInfo) {
const BUFFER_STATE *buffer_state = GetBufferState(pCreateInfo->buffer);
if (buffer_state) {
if (!(buffer_state->createInfo.usage & VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_STORAGE_BIT_KHR)) {
skip |=
LogError(device, "VUID-VkAccelerationStructureCreateInfoKHR-buffer-03614",
"VkAccelerationStructureCreateInfoKHR(): buffer must have been created with a usage value containing "
"VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_STORAGE_BIT_KHR.");
}
if (buffer_state->createInfo.flags & VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT) {
skip |= LogError(device, "VUID-VkAccelerationStructureCreateInfoKHR-buffer-03615",
"VkAccelerationStructureCreateInfoKHR(): buffer must not have been created with "
"VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT.");
}
if (pCreateInfo->offset + pCreateInfo->size > buffer_state->createInfo.size) {
skip |= LogError(
device, "VUID-VkAccelerationStructureCreateInfoKHR-offset-03616",
"VkAccelerationStructureCreateInfoKHR(): The sum of offset and size must be less than the size of buffer.");
}
}
}
return skip;
}
bool CoreChecks::ValidateBindAccelerationStructureMemory(VkDevice device,
const VkBindAccelerationStructureMemoryInfoNV &info) const {
bool skip = false;
const ACCELERATION_STRUCTURE_STATE *as_state = GetAccelerationStructureStateNV(info.accelerationStructure);
if (!as_state) {
return skip;
}
if (!as_state->GetBoundMemory().empty()) {
skip |=
LogError(info.accelerationStructure, "VUID-VkBindAccelerationStructureMemoryInfoNV-accelerationStructure-03620",
"vkBindAccelerationStructureMemoryNV(): accelerationStructure must not already be backed by a memory object.");
}
// Validate bound memory range information
const auto mem_info = GetDevMemState(info.memory);
if (mem_info) {
skip |= ValidateInsertAccelerationStructureMemoryRange(info.accelerationStructure, mem_info, info.memoryOffset,
"vkBindAccelerationStructureMemoryNV()");
skip |= ValidateMemoryTypes(mem_info, as_state->memory_requirements.memoryRequirements.memoryTypeBits,
"vkBindAccelerationStructureMemoryNV()",
"VUID-VkBindAccelerationStructureMemoryInfoNV-memory-03622");
}
// Validate memory requirements alignment
if (SafeModulo(info.memoryOffset, as_state->memory_requirements.memoryRequirements.alignment) != 0) {
skip |= LogError(info.accelerationStructure, "VUID-VkBindAccelerationStructureMemoryInfoNV-memoryOffset-03623",
"vkBindAccelerationStructureMemoryNV(): memoryOffset 0x%" PRIxLEAST64
" must be an integer multiple of the alignment 0x%" PRIxLEAST64
" member of the VkMemoryRequirements structure returned from "
"a call to vkGetAccelerationStructureMemoryRequirementsNV with accelerationStructure and type of "
"VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV",
info.memoryOffset, as_state->memory_requirements.memoryRequirements.alignment);
}
if (mem_info) {
// Validate memory requirements size
if (as_state->memory_requirements.memoryRequirements.size > (mem_info->alloc_info.allocationSize - info.memoryOffset)) {
skip |= LogError(info.accelerationStructure, "VUID-VkBindAccelerationStructureMemoryInfoNV-size-03624",
"vkBindAccelerationStructureMemoryNV(): The size 0x%" PRIxLEAST64
" member of the VkMemoryRequirements structure returned from a call to "
"vkGetAccelerationStructureMemoryRequirementsNV with accelerationStructure and type of "
"VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV must be less than or equal to the size "
"of memory minus memoryOffset 0x%" PRIxLEAST64 ".",
as_state->memory_requirements.memoryRequirements.size,
mem_info->alloc_info.allocationSize - info.memoryOffset);
}
}
return skip;
}
bool CoreChecks::PreCallValidateBindAccelerationStructureMemoryNV(VkDevice device, uint32_t bindInfoCount,
const VkBindAccelerationStructureMemoryInfoNV *pBindInfos) const {
bool skip = false;
for (uint32_t i = 0; i < bindInfoCount; i++) {
skip |= ValidateBindAccelerationStructureMemory(device, pBindInfos[i]);
}
return skip;
}
bool CoreChecks::PreCallValidateGetAccelerationStructureHandleNV(VkDevice device, VkAccelerationStructureNV accelerationStructure,
size_t dataSize, void *pData) const {
bool skip = false;
const ACCELERATION_STRUCTURE_STATE *as_state = GetAccelerationStructureStateNV(accelerationStructure);
if (as_state != nullptr) {
// TODO: update the fake VUID below once the real one is generated.
skip = ValidateMemoryIsBoundToAccelerationStructure(
as_state, "vkGetAccelerationStructureHandleNV",
"UNASSIGNED-vkGetAccelerationStructureHandleNV-accelerationStructure-XXXX");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdBuildAccelerationStructuresKHR(
VkCommandBuffer commandBuffer, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR *pInfos,
const VkAccelerationStructureBuildRangeInfoKHR *const *ppBuildRangeInfos) const {
bool skip = false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
skip |= ValidateCmd(cb_state, CMD_BUILDACCELERATIONSTRUCTURESKHR);
if (pInfos != NULL) {
for (uint32_t info_index = 0; info_index < infoCount; ++info_index) {
const ACCELERATION_STRUCTURE_STATE_KHR *src_as_state =
GetAccelerationStructureStateKHR(pInfos[info_index].srcAccelerationStructure);
const ACCELERATION_STRUCTURE_STATE_KHR *dst_as_state =
GetAccelerationStructureStateKHR(pInfos[info_index].dstAccelerationStructure);
if (pInfos[info_index].mode == VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR) {
if (src_as_state == nullptr || !src_as_state->built ||
!(src_as_state->build_info_khr.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR)) {
skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresKHR-pInfos-03667",
"vkCmdBuildAccelerationStructuresKHR(): For each element of pInfos, if its mode member is "
"VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its srcAccelerationStructure member must "
"have been built before with VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR set in "
"VkAccelerationStructureBuildGeometryInfoKHR::flags.");
}
if (pInfos[info_index].geometryCount != src_as_state->build_info_khr.geometryCount) {
skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresKHR-pInfos-03758",
"vkCmdBuildAccelerationStructuresKHR(): For each element of pInfos, if its mode member is "
"VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR,"
" its geometryCount member must have the same value which was specified when "
"srcAccelerationStructure was last built.");
}
if (pInfos[info_index].flags != src_as_state->build_info_khr.flags) {
skip |=
LogError(device, "VUID-vkCmdBuildAccelerationStructuresKHR-pInfos-03759",
"vkCmdBuildAccelerationStructuresKHR(): For each element of pInfos, if its mode member is"
" VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its flags member must have the same value which"
" was specified when srcAccelerationStructure was last built.");
}
if (pInfos[info_index].type != src_as_state->build_info_khr.type) {
skip |=
LogError(device, "VUID-vkCmdBuildAccelerationStructuresKHR-pInfos-03760",
"vkCmdBuildAccelerationStructuresKHR(): For each element of pInfos, if its mode member is"
" VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its type member must have the same value which"
" was specified when srcAccelerationStructure was last built.");
}
}
if (pInfos[info_index].type == VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR) {
if (!dst_as_state ||
(dst_as_state && dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR &&
dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR)) {
skip |=
LogError(device, "VUID-vkCmdBuildAccelerationStructuresKHR-pInfos-03700",
"vkCmdBuildAccelerationStructuresKHR(): For each element of pInfos, if its type member is "
"VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR, its dstAccelerationStructure member must have "
"been created with a value of VkAccelerationStructureCreateInfoKHR::type equal to either "
"VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR or VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR.");
}
}
if (pInfos[info_index].type == VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR) {
if (!dst_as_state ||
(dst_as_state && dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR &&
dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR)) {
skip |=
LogError(device, "VUID-vkCmdBuildAccelerationStructuresKHR-pInfos-03699",
"vkCmdBuildAccelerationStructuresKHR(): For each element of pInfos, if its type member is "
"VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR, its dstAccelerationStructure member must have been "
"created with a value of VkAccelerationStructureCreateInfoKHR::type equal to either "
"VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR or VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR.");
}
}
skip |= ValidateAccelerationBuffers(info_index, pInfos[info_index], "vkCmdBuildAccelerationStructuresKHR");
}
}
return skip;
}
bool CoreChecks::ValidateAccelerationBuffers(uint32_t info_index, const VkAccelerationStructureBuildGeometryInfoKHR &info,
const char *func_name) const {
bool skip = false;
const auto geometry_count = info.geometryCount;
const auto *p_geometries = info.pGeometries;
const auto *const *const pp_geometries = info.ppGeometries;
auto buffer_check = [this, info_index, func_name](uint32_t gi, const VkDeviceOrHostAddressConstKHR address,
const char *field) -> bool {
const auto itr = buffer_address_map_.find(address.deviceAddress);
if (itr != buffer_address_map_.cend() &&
!(itr->second->createInfo.usage & VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_BUILD_INPUT_READ_ONLY_BIT_KHR)) {
LogObjectList objlist(device);
objlist.add(itr->second->Handle());
return LogError(objlist, "VUID-vkCmdBuildAccelerationStructuresKHR-geometry-03673",
"%s(): The buffer associated with pInfos[%" PRIu32 "].pGeometries[%" PRIu32
"].%s was not created with VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_BUILD_INPUT_READ_ONLY_BIT_KHR.",
func_name, info_index, gi, field);
}
return false;
};
// Parameter validation has already checked VUID-VkAccelerationStructureBuildGeometryInfoKHR-pGeometries-03788
// !(pGeometries && ppGeometries)
std::function<const VkAccelerationStructureGeometryKHR &(uint32_t)> geom_accessor;
if (p_geometries) {
geom_accessor = [p_geometries](uint32_t i) -> const VkAccelerationStructureGeometryKHR & { return p_geometries[i]; };
} else if (pp_geometries) {
geom_accessor = [pp_geometries](uint32_t i) -> const VkAccelerationStructureGeometryKHR & {
// pp_geometries[i] is assumed to be a valid pointer
return *pp_geometries[i];
};
}
if (geom_accessor) {
for (uint32_t geom_index = 0; geom_index < geometry_count; ++geom_index) {
const auto &geom_data = geom_accessor(geom_index);
switch (geom_data.geometryType) {
case VK_GEOMETRY_TYPE_TRIANGLES_KHR: // == VK_GEOMETRY_TYPE_TRIANGLES_NV
skip |= buffer_check(geom_index, geom_data.geometry.triangles.vertexData, "geometry.triangles.vertexData");
skip |= buffer_check(geom_index, geom_data.geometry.triangles.indexData, "geometry.triangles.indexData");
skip |=
buffer_check(geom_index, geom_data.geometry.triangles.transformData, "geometry.triangles.transformData");
break;
case VK_GEOMETRY_TYPE_INSTANCES_KHR:
skip |= buffer_check(geom_index, geom_data.geometry.instances.data, "geometry.instances.data");
break;
case VK_GEOMETRY_TYPE_AABBS_KHR: // == VK_GEOMETRY_TYPE_AABBS_NV
skip |= buffer_check(geom_index, geom_data.geometry.aabbs.data, "geometry.aabbs.data");
break;
default:
// no-op
break;
}
}
}
const auto itr = buffer_address_map_.find(info.scratchData.deviceAddress);
if (itr != buffer_address_map_.cend() && !(itr->second->createInfo.usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT)) {
skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresKHR-pInfos-03674",
"vkBuildAccelerationStructuresKHR(): The buffer associated with pInfos[%" PRIu32
"].scratchData.deviceAddress was not created with VK_BUFFER_USAGE_STORAGE_BUFFER_BIT bit.",
info_index);
}
return skip;
}
bool CoreChecks::PreCallValidateBuildAccelerationStructuresKHR(
VkDevice device, VkDeferredOperationKHR deferredOperation, uint32_t infoCount,
const VkAccelerationStructureBuildGeometryInfoKHR *pInfos,
const VkAccelerationStructureBuildRangeInfoKHR *const *ppBuildRangeInfos) const {
bool skip = false;
for (uint32_t i = 0; i < infoCount; ++i) {
const ACCELERATION_STRUCTURE_STATE_KHR *src_as_state = GetAccelerationStructureStateKHR(pInfos[i].srcAccelerationStructure);
const ACCELERATION_STRUCTURE_STATE_KHR *dst_as_state = GetAccelerationStructureStateKHR(pInfos[i].dstAccelerationStructure);
if (pInfos[i].mode == VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR) {
if (src_as_state == nullptr || !src_as_state->built ||
!(src_as_state->build_info_khr.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR)) {
skip |= LogError(device, "VUID-vkBuildAccelerationStructuresKHR-pInfos-03667",
"vkBuildAccelerationStructuresKHR(): For each element of pInfos, if its mode member is "
"VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its srcAccelerationStructure member must have "
"been built before with VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR set in "
"VkAccelerationStructureBuildGeometryInfoKHR::flags.");
}
if (pInfos[i].geometryCount != src_as_state->build_info_khr.geometryCount) {
skip |= LogError(device, "VUID-vkBuildAccelerationStructuresKHR-pInfos-03758",
"vkBuildAccelerationStructuresKHR(): For each element of pInfos, if its mode member is "
"VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR,"
" its geometryCount member must have the same value which was specified when "
"srcAccelerationStructure was last built.");
}
if (pInfos[i].flags != src_as_state->build_info_khr.flags) {
skip |= LogError(device, "VUID-vkBuildAccelerationStructuresKHR-pInfos-03759",
"vkBuildAccelerationStructuresKHR(): For each element of pInfos, if its mode member is"
" VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its flags member must have the same value which"
" was specified when srcAccelerationStructure was last built.");
}
if (pInfos[i].type != src_as_state->build_info_khr.type) {
skip |= LogError(device, "VUID-vkBuildAccelerationStructuresKHR-pInfos-03760",
"vkBuildAccelerationStructuresKHR(): For each element of pInfos, if its mode member is"
" VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its type member must have the same value which"
" was specified when srcAccelerationStructure was last built.");
}
}
if (pInfos[i].type == VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR) {
if (!dst_as_state ||
(dst_as_state && dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR &&
dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR)) {
skip |= LogError(device, "VUID-vkBuildAccelerationStructuresKHR-pInfos-03700",
"vkBuildAccelerationStructuresKHR(): For each element of pInfos, if its type member is "
"VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR, its dstAccelerationStructure member must have "
"been created with a value of VkAccelerationStructureCreateInfoKHR::type equal to either "
"VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR or VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR.");
}
}
if (pInfos[i].type == VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR) {
if (!dst_as_state ||
(dst_as_state && dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR &&
dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR)) {
skip |= LogError(device, "VUID-vkBuildAccelerationStructuresKHR-pInfos-03699",
"vkBuildAccelerationStructuresKHR(): For each element of pInfos, if its type member is "
"VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR, its dstAccelerationStructure member must have been "
"created with a value of VkAccelerationStructureCreateInfoKHR::type equal to either "
"VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR or VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR.");
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdBuildAccelerationStructureNV(VkCommandBuffer commandBuffer,
const VkAccelerationStructureInfoNV *pInfo, VkBuffer instanceData,
VkDeviceSize instanceOffset, VkBool32 update,
VkAccelerationStructureNV dst, VkAccelerationStructureNV src,
VkBuffer scratch, VkDeviceSize scratchOffset) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_BUILDACCELERATIONSTRUCTURENV);
if (pInfo != nullptr && pInfo->type == VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV) {
for (uint32_t i = 0; i < pInfo->geometryCount; i++) {
skip |= ValidateGeometryNV(pInfo->pGeometries[i], "vkCmdBuildAccelerationStructureNV():");
}
}
if (pInfo != nullptr && pInfo->geometryCount > phys_dev_ext_props.ray_tracing_propsNV.maxGeometryCount) {
skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-geometryCount-02241",
"vkCmdBuildAccelerationStructureNV(): geometryCount [%d] must be less than or equal to "
"VkPhysicalDeviceRayTracingPropertiesNV::maxGeometryCount.",
pInfo->geometryCount);
}
const ACCELERATION_STRUCTURE_STATE *dst_as_state = GetAccelerationStructureStateNV(dst);
const ACCELERATION_STRUCTURE_STATE *src_as_state = GetAccelerationStructureStateNV(src);
const BUFFER_STATE *scratch_buffer_state = GetBufferState(scratch);
if (dst_as_state != nullptr && pInfo != nullptr) {
if (dst_as_state->create_infoNV.info.type != pInfo->type) {
skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488",
"vkCmdBuildAccelerationStructureNV(): create info VkAccelerationStructureInfoNV::type"
"[%s] must be identical to build info VkAccelerationStructureInfoNV::type [%s].",
string_VkAccelerationStructureTypeNV(dst_as_state->create_infoNV.info.type),
string_VkAccelerationStructureTypeNV(pInfo->type));
}
if (dst_as_state->create_infoNV.info.flags != pInfo->flags) {
skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488",
"vkCmdBuildAccelerationStructureNV(): create info VkAccelerationStructureInfoNV::flags"
"[0x%X] must be identical to build info VkAccelerationStructureInfoNV::flags [0x%X].",
dst_as_state->create_infoNV.info.flags, pInfo->flags);
}
if (dst_as_state->create_infoNV.info.instanceCount < pInfo->instanceCount) {
skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488",
"vkCmdBuildAccelerationStructureNV(): create info VkAccelerationStructureInfoNV::instanceCount "
"[%d] must be greater than or equal to build info VkAccelerationStructureInfoNV::instanceCount [%d].",
dst_as_state->create_infoNV.info.instanceCount, pInfo->instanceCount);
}
if (dst_as_state->create_infoNV.info.geometryCount < pInfo->geometryCount) {
skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488",
"vkCmdBuildAccelerationStructureNV(): create info VkAccelerationStructureInfoNV::geometryCount"
"[%d] must be greater than or equal to build info VkAccelerationStructureInfoNV::geometryCount [%d].",
dst_as_state->create_infoNV.info.geometryCount, pInfo->geometryCount);
} else {
for (uint32_t i = 0; i < pInfo->geometryCount; i++) {
const VkGeometryDataNV &create_geometry_data = dst_as_state->create_infoNV.info.pGeometries[i].geometry;
const VkGeometryDataNV &build_geometry_data = pInfo->pGeometries[i].geometry;
if (create_geometry_data.triangles.vertexCount < build_geometry_data.triangles.vertexCount) {
skip |= LogError(
commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488",
"vkCmdBuildAccelerationStructureNV(): create info pGeometries[%d].geometry.triangles.vertexCount [%d]"
"must be greater than or equal to build info pGeometries[%d].geometry.triangles.vertexCount [%d].",
i, create_geometry_data.triangles.vertexCount, i, build_geometry_data.triangles.vertexCount);
break;
}
if (create_geometry_data.triangles.indexCount < build_geometry_data.triangles.indexCount) {
skip |= LogError(
commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488",
"vkCmdBuildAccelerationStructureNV(): create info pGeometries[%d].geometry.triangles.indexCount [%d]"
"must be greater than or equal to build info pGeometries[%d].geometry.triangles.indexCount [%d].",
i, create_geometry_data.triangles.indexCount, i, build_geometry_data.triangles.indexCount);
break;
}
if (create_geometry_data.aabbs.numAABBs < build_geometry_data.aabbs.numAABBs) {
skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488",
"vkCmdBuildAccelerationStructureNV(): create info pGeometries[%d].geometry.aabbs.numAABBs [%d]"
"must be greater than or equal to build info pGeometries[%d].geometry.aabbs.numAABBs [%d].",
i, create_geometry_data.aabbs.numAABBs, i, build_geometry_data.aabbs.numAABBs);
break;
}
}
}
}
if (dst_as_state != nullptr) {
skip |= ValidateMemoryIsBoundToAccelerationStructure(
dst_as_state, "vkCmdBuildAccelerationStructureNV()",
"UNASSIGNED-CoreValidation-DrawState-InvalidCommandBuffer-VkAccelerationStructureNV");
}
if (update == VK_TRUE) {
if (src == VK_NULL_HANDLE) {
skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-update-02489",
"vkCmdBuildAccelerationStructureNV(): If update is VK_TRUE, src must not be VK_NULL_HANDLE.");
} else {
if (src_as_state == nullptr || !src_as_state->built ||
!(src_as_state->build_info.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_NV)) {
skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-update-02490",
"vkCmdBuildAccelerationStructureNV(): If update is VK_TRUE, src must have been built before "
"with VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_NV set in "
"VkAccelerationStructureInfoNV::flags.");
}
}
if (dst_as_state != nullptr && !dst_as_state->update_scratch_memory_requirements_checked) {
skip |=
LogWarning(dst, kVUID_Core_CmdBuildAccelNV_NoUpdateMemReqQuery,
"vkCmdBuildAccelerationStructureNV(): Updating %s but vkGetAccelerationStructureMemoryRequirementsNV() "
"has not been called for update scratch memory.",
report_data->FormatHandle(dst_as_state->acceleration_structure()).c_str());
// Use requirements fetched at create time
}
if (scratch_buffer_state != nullptr && dst_as_state != nullptr &&
dst_as_state->update_scratch_memory_requirements.memoryRequirements.size >
(scratch_buffer_state->createInfo.size - scratchOffset)) {
skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-update-02492",
"vkCmdBuildAccelerationStructureNV(): If update is VK_TRUE, The size member of the "
"VkMemoryRequirements structure returned from a call to "
"vkGetAccelerationStructureMemoryRequirementsNV with "
"VkAccelerationStructureMemoryRequirementsInfoNV::accelerationStructure set to dst and "
"VkAccelerationStructureMemoryRequirementsInfoNV::type set to "
"VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_NV must be less than "
"or equal to the size of scratch minus scratchOffset");
}
} else {
if (dst_as_state != nullptr && !dst_as_state->build_scratch_memory_requirements_checked) {
skip |= LogWarning(dst, kVUID_Core_CmdBuildAccelNV_NoScratchMemReqQuery,
"vkCmdBuildAccelerationStructureNV(): Assigning scratch buffer to %s but "
"vkGetAccelerationStructureMemoryRequirementsNV() has not been called for scratch memory.",
report_data->FormatHandle(dst_as_state->acceleration_structure()).c_str());
// Use requirements fetched at create time
}
if (scratch_buffer_state != nullptr && dst_as_state != nullptr &&
dst_as_state->build_scratch_memory_requirements.memoryRequirements.size >
(scratch_buffer_state->createInfo.size - scratchOffset)) {
skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-update-02491",
"vkCmdBuildAccelerationStructureNV(): If update is VK_FALSE, The size member of the "
"VkMemoryRequirements structure returned from a call to "
"vkGetAccelerationStructureMemoryRequirementsNV with "
"VkAccelerationStructureMemoryRequirementsInfoNV::accelerationStructure set to dst and "
"VkAccelerationStructureMemoryRequirementsInfoNV::type set to "
"VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_NV must be less than "
"or equal to the size of scratch minus scratchOffset");
}
}
if (instanceData != VK_NULL_HANDLE) {
const auto buffer_state = GetBufferState(instanceData);
if (buffer_state != nullptr) {
skip |= ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_RAY_TRACING_BIT_NV, true,
"VUID-VkAccelerationStructureInfoNV-instanceData-02782",
"vkCmdBuildAccelerationStructureNV()", "VK_BUFFER_USAGE_RAY_TRACING_BIT_NV");
}
}
if (scratch_buffer_state != nullptr) {
skip |= ValidateBufferUsageFlags(scratch_buffer_state, VK_BUFFER_USAGE_RAY_TRACING_BIT_NV, true,
"VUID-VkAccelerationStructureInfoNV-scratch-02781", "vkCmdBuildAccelerationStructureNV()",
"VK_BUFFER_USAGE_RAY_TRACING_BIT_NV");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdCopyAccelerationStructureNV(VkCommandBuffer commandBuffer, VkAccelerationStructureNV dst,
VkAccelerationStructureNV src,
VkCopyAccelerationStructureModeNV mode) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_COPYACCELERATIONSTRUCTURENV);
const ACCELERATION_STRUCTURE_STATE *dst_as_state = GetAccelerationStructureStateNV(dst);
const ACCELERATION_STRUCTURE_STATE *src_as_state = GetAccelerationStructureStateNV(src);
if (dst_as_state != nullptr) {
skip |= ValidateMemoryIsBoundToAccelerationStructure(
dst_as_state, "vkCmdBuildAccelerationStructureNV()",
"UNASSIGNED-CoreValidation-DrawState-InvalidCommandBuffer-VkAccelerationStructureNV");
}
if (mode == VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NV) {
if (src_as_state != nullptr &&
(!src_as_state->built || !(src_as_state->build_info.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_NV))) {
skip |= LogError(commandBuffer, "VUID-vkCmdCopyAccelerationStructureNV-src-03411",
"vkCmdCopyAccelerationStructureNV(): src must have been built with "
"VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_NV if mode is "
"VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NV.");
}
}
if (!(mode == VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NV || mode == VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_KHR)) {
skip |= LogError(commandBuffer, "VUID-vkCmdCopyAccelerationStructureNV-mode-03410",
"vkCmdCopyAccelerationStructureNV():mode must be VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_KHR"
"or VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_KHR.");
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyAccelerationStructureNV(VkDevice device, VkAccelerationStructureNV accelerationStructure,
const VkAllocationCallbacks *pAllocator) const {
const ACCELERATION_STRUCTURE_STATE *as_state = GetAccelerationStructureStateNV(accelerationStructure);
bool skip = false;
if (as_state) {
skip |= ValidateObjectNotInUse(as_state, "vkDestroyAccelerationStructureNV",
"VUID-vkDestroyAccelerationStructureKHR-accelerationStructure-02442");
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyAccelerationStructureKHR(VkDevice device, VkAccelerationStructureKHR accelerationStructure,
const VkAllocationCallbacks *pAllocator) const {
const ACCELERATION_STRUCTURE_STATE_KHR *as_state = GetAccelerationStructureStateKHR(accelerationStructure);
bool skip = false;
if (as_state) {
skip |= ValidateObjectNotInUse(as_state, "vkDestroyAccelerationStructureKHR",
"VUID-vkDestroyAccelerationStructureKHR-accelerationStructure-02442");
}
if (pAllocator && !as_state->allocator) {
skip |= LogError(device, "VUID-vkDestroyAccelerationStructureKHR-accelerationStructure-02444",
"vkDestroyAccelerationStructureKH:If no VkAllocationCallbacks were provided when accelerationStructure"
"was created, pAllocator must be NULL.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetViewportWScalingNV(VkCommandBuffer commandBuffer, uint32_t firstViewport,
uint32_t viewportCount,
const VkViewportWScalingNV *pViewportWScalings) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETVIEWPORTWSCALINGNV);
return skip;
}
bool CoreChecks::PreCallValidateCmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETLINEWIDTH);
return skip;
}
bool CoreChecks::PreCallValidateCmdSetLineStippleEXT(VkCommandBuffer commandBuffer, uint32_t lineStippleFactor,
uint16_t lineStipplePattern) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETLINESTIPPLEEXT);
return skip;
}
bool CoreChecks::PreCallValidateCmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp,
float depthBiasSlopeFactor) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETDEPTHBIAS);
if ((depthBiasClamp != 0.0) && (!enabled_features.core.depthBiasClamp)) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetDepthBias-depthBiasClamp-00790",
"vkCmdSetDepthBias(): the depthBiasClamp device feature is disabled: the depthBiasClamp parameter must "
"be set to 0.0.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETBLENDCONSTANTS);
return skip;
}
bool CoreChecks::PreCallValidateCmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETDEPTHBOUNDS);
// The extension was not created with a feature bit whichs prevents displaying the 2 variations of the VUIDs
if (!IsExtEnabled(device_extensions.vk_ext_depth_range_unrestricted)) {
if (!(minDepthBounds >= 0.0) || !(minDepthBounds <= 1.0)) {
// Also VUID-vkCmdSetDepthBounds-minDepthBounds-00600
skip |= LogError(commandBuffer, "VUID-vkCmdSetDepthBounds-minDepthBounds-02508",
"vkCmdSetDepthBounds(): VK_EXT_depth_range_unrestricted extension is not enabled and minDepthBounds "
"(=%f) is not within the [0.0, 1.0] range.",
minDepthBounds);
}
if (!(maxDepthBounds >= 0.0) || !(maxDepthBounds <= 1.0)) {
// Also VUID-vkCmdSetDepthBounds-maxDepthBounds-00601
skip |= LogError(commandBuffer, "VUID-vkCmdSetDepthBounds-maxDepthBounds-02509",
"vkCmdSetDepthBounds(): VK_EXT_depth_range_unrestricted extension is not enabled and maxDepthBounds "
"(=%f) is not within the [0.0, 1.0] range.",
maxDepthBounds);
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask,
uint32_t compareMask) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETSTENCILCOMPAREMASK);
return skip;
}
bool CoreChecks::PreCallValidateCmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask,
uint32_t writeMask) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETSTENCILWRITEMASK);
return skip;
}
bool CoreChecks::PreCallValidateCmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask,
uint32_t reference) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETSTENCILREFERENCE);
return skip;
}
bool CoreChecks::PreCallValidateCmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
VkPipelineLayout layout, uint32_t firstSet, uint32_t setCount,
const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
const uint32_t *pDynamicOffsets) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_BINDDESCRIPTORSETS);
// Track total count of dynamic descriptor types to make sure we have an offset for each one
uint32_t total_dynamic_descriptors = 0;
string error_string = "";
const auto *pipeline_layout = GetPipelineLayout(layout);
for (uint32_t set_idx = 0; set_idx < setCount; set_idx++) {
const cvdescriptorset::DescriptorSet *descriptor_set = GetSetNode(pDescriptorSets[set_idx]);
if (descriptor_set) {
// Verify that set being bound is compatible with overlapping setLayout of pipelineLayout
if (!VerifySetLayoutCompatibility(report_data, descriptor_set, pipeline_layout, set_idx + firstSet, error_string)) {
skip |= LogError(pDescriptorSets[set_idx], "VUID-vkCmdBindDescriptorSets-pDescriptorSets-00358",
"vkCmdBindDescriptorSets(): descriptorSet #%u being bound is not compatible with overlapping "
"descriptorSetLayout at index %u of "
"%s due to: %s.",
set_idx, set_idx + firstSet, report_data->FormatHandle(layout).c_str(), error_string.c_str());
}
auto set_dynamic_descriptor_count = descriptor_set->GetDynamicDescriptorCount();
if (set_dynamic_descriptor_count) {
// First make sure we won't overstep bounds of pDynamicOffsets array
if ((total_dynamic_descriptors + set_dynamic_descriptor_count) > dynamicOffsetCount) {
// Test/report this here, such that we don't run past the end of pDynamicOffsets in the else clause
skip |=
LogError(pDescriptorSets[set_idx], "VUID-vkCmdBindDescriptorSets-dynamicOffsetCount-00359",
"vkCmdBindDescriptorSets(): descriptorSet #%u (%s) requires %u dynamicOffsets, but only %u "
"dynamicOffsets are left in "
"pDynamicOffsets array. There must be one dynamic offset for each dynamic descriptor being bound.",
set_idx, report_data->FormatHandle(pDescriptorSets[set_idx]).c_str(),
descriptor_set->GetDynamicDescriptorCount(), (dynamicOffsetCount - total_dynamic_descriptors));
// Set the number found to the maximum to prevent duplicate messages, or subsquent descriptor sets from
// testing against the "short tail" we're skipping below.
total_dynamic_descriptors = dynamicOffsetCount;
} else { // Validate dynamic offsets and Dynamic Offset Minimums
// offset for all sets (pDynamicOffsets)
uint32_t cur_dyn_offset = total_dynamic_descriptors;
// offset into this descriptor set
uint32_t set_dyn_offset = 0;
const auto &dsl = descriptor_set->GetLayout();
const auto binding_count = dsl->GetBindingCount();
const auto &limits = phys_dev_props.limits;
for (uint32_t i = 0; i < binding_count; i++) {
const auto *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(i);
// skip checking binding if not needed
if (cvdescriptorset::IsDynamicDescriptor(binding->descriptorType) == false) {
continue;
}
// If a descriptor set has only binding 0 and 2 the binding_index will be 0 and 2
const uint32_t binding_index = binding->binding;
const uint32_t descriptorCount = binding->descriptorCount;
// Need to loop through each descriptor count inside the binding
// if descriptorCount is zero the binding with a dynamic descriptor type does not count
for (uint32_t j = 0; j < descriptorCount; j++) {
const uint32_t offset = pDynamicOffsets[cur_dyn_offset];
if (offset == 0) {
// offset of zero is equivalent of not having the dynamic offset
cur_dyn_offset++;
set_dyn_offset++;
continue;
}
// Validate alignment with limit
if ((binding->descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) &&
(SafeModulo(offset, limits.minUniformBufferOffsetAlignment) != 0)) {
skip |= LogError(commandBuffer, "VUID-vkCmdBindDescriptorSets-pDynamicOffsets-01971",
"vkCmdBindDescriptorSets(): pDynamicOffsets[%u] is %u, but must be a multiple of "
"device limit minUniformBufferOffsetAlignment 0x%" PRIxLEAST64 ".",
cur_dyn_offset, offset, limits.minUniformBufferOffsetAlignment);
}
if ((binding->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) &&
(SafeModulo(offset, limits.minStorageBufferOffsetAlignment) != 0)) {
skip |= LogError(commandBuffer, "VUID-vkCmdBindDescriptorSets-pDynamicOffsets-01972",
"vkCmdBindDescriptorSets(): pDynamicOffsets[%u] is %u, but must be a multiple of "
"device limit minStorageBufferOffsetAlignment 0x%" PRIxLEAST64 ".",
cur_dyn_offset, offset, limits.minStorageBufferOffsetAlignment);
}
auto *descriptor = descriptor_set->GetDescriptorFromDynamicOffsetIndex(set_dyn_offset);
assert(descriptor != nullptr);
// Currently only GeneralBuffer are dynamic and need to be checked
if (descriptor->GetClass() == cvdescriptorset::DescriptorClass::GeneralBuffer) {
const auto *buffer_descriptor = static_cast<const cvdescriptorset::BufferDescriptor *>(descriptor);
const VkDeviceSize bound_range = buffer_descriptor->GetRange();
const VkDeviceSize bound_offset = buffer_descriptor->GetOffset();
//NOTE: null / invalid buffers may show up here, errors are raised elsewhere for this.
const BUFFER_STATE *buffer_state = buffer_descriptor->GetBufferState();
// Validate offset didn't go over buffer
if ((bound_range == VK_WHOLE_SIZE) && (offset > 0)) {
LogObjectList objlist(commandBuffer);
objlist.add(pDescriptorSets[set_idx]);
objlist.add(buffer_descriptor->GetBuffer());
skip |=
LogError(objlist, "VUID-vkCmdBindDescriptorSets-pDescriptorSets-01979",
"vkCmdBindDescriptorSets(): pDynamicOffsets[%u] is 0x%x, but must be zero since "
"the buffer descriptor's range is VK_WHOLE_SIZE in descriptorSet #%u binding #%u "
"descriptor[%u].",
cur_dyn_offset, offset, set_idx, binding_index, j);
} else if (buffer_state != nullptr && (bound_range != VK_WHOLE_SIZE) &&
((offset + bound_range + bound_offset) > buffer_state->createInfo.size)) {
LogObjectList objlist(commandBuffer);
objlist.add(pDescriptorSets[set_idx]);
objlist.add(buffer_descriptor->GetBuffer());
skip |=
LogError(objlist, "VUID-vkCmdBindDescriptorSets-pDescriptorSets-01979",
"vkCmdBindDescriptorSets(): pDynamicOffsets[%u] is 0x%x which when added to the "
"buffer descriptor's range (0x%" PRIxLEAST64
") is greater than the size of the buffer (0x%" PRIxLEAST64
") in descriptorSet #%u binding #%u descriptor[%u].",
cur_dyn_offset, offset, bound_range, buffer_state->createInfo.size, set_idx,
binding_index, j);
}
}
cur_dyn_offset++;
set_dyn_offset++;
} // descriptorCount loop
} // bindingCount loop
// Keep running total of dynamic descriptor count to verify at the end
total_dynamic_descriptors += set_dynamic_descriptor_count;
}
}
if (descriptor_set->GetPoolState()->createInfo.flags & VK_DESCRIPTOR_POOL_CREATE_HOST_ONLY_BIT_VALVE) {
skip |= LogError(pDescriptorSets[set_idx], "VUID-vkCmdBindDescriptorSets-pDescriptorSets-04616",
"vkCmdBindDescriptorSets(): pDescriptorSets[%" PRIu32 "] was allocated from a pool that was created with VK_DESCRIPTOR_POOL_CREATE_HOST_ONLY_BIT_VALVE.", set_idx);
}
} else {
skip |= LogError(pDescriptorSets[set_idx], "VUID-vkCmdBindDescriptorSets-pDescriptorSets-parameter",
"vkCmdBindDescriptorSets(): Attempt to bind %s that doesn't exist!",
report_data->FormatHandle(pDescriptorSets[set_idx]).c_str());
}
}
// dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
if (total_dynamic_descriptors != dynamicOffsetCount) {
skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdBindDescriptorSets-dynamicOffsetCount-00359",
"vkCmdBindDescriptorSets(): Attempting to bind %u descriptorSets with %u dynamic descriptors, but "
"dynamicOffsetCount is %u. It should "
"exactly match the number of dynamic descriptors.",
setCount, total_dynamic_descriptors, dynamicOffsetCount);
}
// firstSet and descriptorSetCount sum must be less than setLayoutCount
if ((firstSet + setCount) > static_cast<uint32_t>(pipeline_layout->set_layouts.size())) {
skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdBindDescriptorSets-firstSet-00360",
"vkCmdBindDescriptorSets(): Sum of firstSet (%u) and descriptorSetCount (%u) is greater than "
"VkPipelineLayoutCreateInfo::setLayoutCount "
"(%zu) when pipeline layout was created",
firstSet, setCount, pipeline_layout->set_layouts.size());
}
static const std::map<VkPipelineBindPoint, std::string> bindpoint_errors = {
std::make_pair(VK_PIPELINE_BIND_POINT_GRAPHICS, "VUID-vkCmdBindDescriptorSets-pipelineBindPoint-00361"),
std::make_pair(VK_PIPELINE_BIND_POINT_COMPUTE, "VUID-vkCmdBindDescriptorSets-pipelineBindPoint-00361"),
std::make_pair(VK_PIPELINE_BIND_POINT_RAY_TRACING_NV, "VUID-vkCmdBindDescriptorSets-pipelineBindPoint-00361")};
skip |= ValidatePipelineBindPoint(cb_state, pipelineBindPoint, "vkCmdBindPipeline()", bindpoint_errors);
return skip;
}
// Validates that the supplied bind point is supported for the command buffer (vis. the command pool)
// Takes array of error codes as some of the VUID's (e.g. vkCmdBindPipeline) are written per bindpoint
// TODO add vkCmdBindPipeline bind_point validation using this call.
bool CoreChecks::ValidatePipelineBindPoint(const CMD_BUFFER_STATE *cb_state, VkPipelineBindPoint bind_point, const char *func_name,
const std::map<VkPipelineBindPoint, std::string> &bind_errors) const {
bool skip = false;
auto pool = cb_state->command_pool.get();
if (pool) { // The loss of a pool in a recording cmd is reported in DestroyCommandPool
static const std::map<VkPipelineBindPoint, VkQueueFlags> flag_mask = {
std::make_pair(VK_PIPELINE_BIND_POINT_GRAPHICS, static_cast<VkQueueFlags>(VK_QUEUE_GRAPHICS_BIT)),
std::make_pair(VK_PIPELINE_BIND_POINT_COMPUTE, static_cast<VkQueueFlags>(VK_QUEUE_COMPUTE_BIT)),
std::make_pair(VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR,
static_cast<VkQueueFlags>(VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT)),
};
const auto &qfp = GetPhysicalDeviceState()->queue_family_properties[pool->queueFamilyIndex];
if (0 == (qfp.queueFlags & flag_mask.at(bind_point))) {
const std::string &error = bind_errors.at(bind_point);
LogObjectList objlist(cb_state->commandBuffer());
objlist.add(cb_state->createInfo.commandPool);
skip |= LogError(objlist, error, "%s: %s was allocated from %s that does not support bindpoint %s.", func_name,
report_data->FormatHandle(cb_state->commandBuffer()).c_str(),
report_data->FormatHandle(cb_state->createInfo.commandPool).c_str(),
string_VkPipelineBindPoint(bind_point));
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdPushDescriptorSetKHR(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount,
const VkWriteDescriptorSet *pDescriptorWrites) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
const char *func_name = "vkCmdPushDescriptorSetKHR()";
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_PUSHDESCRIPTORSETKHR);
static const std::map<VkPipelineBindPoint, std::string> bind_errors = {
std::make_pair(VK_PIPELINE_BIND_POINT_GRAPHICS, "VUID-vkCmdPushDescriptorSetKHR-pipelineBindPoint-00363"),
std::make_pair(VK_PIPELINE_BIND_POINT_COMPUTE, "VUID-vkCmdPushDescriptorSetKHR-pipelineBindPoint-00363"),
std::make_pair(VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR, "VUID-vkCmdPushDescriptorSetKHR-pipelineBindPoint-00363")};
skip |= ValidatePipelineBindPoint(cb_state, pipelineBindPoint, func_name, bind_errors);
const auto layout_data = GetPipelineLayout(layout);
// Validate the set index points to a push descriptor set and is in range
if (layout_data) {
const auto &set_layouts = layout_data->set_layouts;
if (set < set_layouts.size()) {
const auto &dsl = set_layouts[set];
if (dsl) {
if (!dsl->IsPushDescriptor()) {
skip = LogError(layout, "VUID-vkCmdPushDescriptorSetKHR-set-00365",
"%s: Set index %" PRIu32 " does not match push descriptor set layout index for %s.", func_name,
set, report_data->FormatHandle(layout).c_str());
} else {
// Create an empty proxy in order to use the existing descriptor set update validation
// TODO move the validation (like this) that doesn't need descriptor set state to the DSL object so we
// don't have to do this.
cvdescriptorset::DescriptorSet proxy_ds(VK_NULL_HANDLE, nullptr, dsl, 0, this);
skip |= ValidatePushDescriptorsUpdate(&proxy_ds, descriptorWriteCount, pDescriptorWrites, func_name);
}
}
} else {
skip = LogError(layout, "VUID-vkCmdPushDescriptorSetKHR-set-00364",
"%s: Set index %" PRIu32 " is outside of range for %s (set < %" PRIu32 ").", func_name, set,
report_data->FormatHandle(layout).c_str(), static_cast<uint32_t>(set_layouts.size()));
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
VkIndexType indexType) const {
const auto buffer_state = GetBufferState(buffer);
const auto cb_node = GetCBState(commandBuffer);
assert(buffer_state);
assert(cb_node);
bool skip =
ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_INDEX_BUFFER_BIT, true, "VUID-vkCmdBindIndexBuffer-buffer-00433",
"vkCmdBindIndexBuffer()", "VK_BUFFER_USAGE_INDEX_BUFFER_BIT");
skip |= ValidateCmd(cb_node, CMD_BINDINDEXBUFFER);
skip |= ValidateMemoryIsBoundToBuffer(buffer_state, "vkCmdBindIndexBuffer()", "VUID-vkCmdBindIndexBuffer-buffer-00434");
const auto offset_align = GetIndexAlignment(indexType);
if (offset % offset_align) {
skip |= LogError(commandBuffer, "VUID-vkCmdBindIndexBuffer-offset-00432",
"vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") does not fall on alignment (%s) boundary.", offset,
string_VkIndexType(indexType));
}
if (offset >= buffer_state->requirements.size) {
skip |= LogError(commandBuffer, "VUID-vkCmdBindIndexBuffer-offset-00431",
"vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") is not less than the size (0x%" PRIxLEAST64
") of buffer (%s).",
offset, buffer_state->requirements.size, report_data->FormatHandle(buffer_state->buffer()).c_str());
}
return skip;
}
bool CoreChecks::PreCallValidateCmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount,
const VkBuffer *pBuffers, const VkDeviceSize *pOffsets) const {
const auto cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_BINDVERTEXBUFFERS);
for (uint32_t i = 0; i < bindingCount; ++i) {
const auto buffer_state = GetBufferState(pBuffers[i]);
if (buffer_state) {
skip |= ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, true,
"VUID-vkCmdBindVertexBuffers-pBuffers-00627", "vkCmdBindVertexBuffers()",
"VK_BUFFER_USAGE_VERTEX_BUFFER_BIT");
skip |= ValidateMemoryIsBoundToBuffer(buffer_state, "vkCmdBindVertexBuffers()",
"VUID-vkCmdBindVertexBuffers-pBuffers-00628");
if (pOffsets[i] >= buffer_state->createInfo.size) {
skip |=
LogError(buffer_state->buffer(), "VUID-vkCmdBindVertexBuffers-pOffsets-00626",
"vkCmdBindVertexBuffers() offset (0x%" PRIxLEAST64 ") is beyond the end of the buffer.", pOffsets[i]);
}
}
}
return skip;
}
// Validate that an image's sampleCount matches the requirement for a specific API call
bool CoreChecks::ValidateImageSampleCount(const IMAGE_STATE *image_state, VkSampleCountFlagBits sample_count, const char *location,
const std::string &msgCode) const {
bool skip = false;
if (image_state->createInfo.samples != sample_count) {
skip = LogError(image_state->image(), msgCode, "%s for %s was created with a sample count of %s but must be %s.", location,
report_data->FormatHandle(image_state->image()).c_str(),
string_VkSampleCountFlagBits(image_state->createInfo.samples), string_VkSampleCountFlagBits(sample_count));
}
return skip;
}
bool CoreChecks::PreCallValidateCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
VkDeviceSize dataSize, const void *pData) const {
const auto cb_state = GetCBState(commandBuffer);
assert(cb_state);
const auto dst_buffer_state = GetBufferState(dstBuffer);
assert(dst_buffer_state);
bool skip = false;
skip |= ValidateMemoryIsBoundToBuffer(dst_buffer_state, "vkCmdUpdateBuffer()", "VUID-vkCmdUpdateBuffer-dstBuffer-00035");
// Validate that DST buffer has correct usage flags set
skip |=
ValidateBufferUsageFlags(dst_buffer_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "VUID-vkCmdUpdateBuffer-dstBuffer-00034",
"vkCmdUpdateBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
skip |= ValidateCmd(cb_state, CMD_UPDATEBUFFER);
skip |=
ValidateProtectedBuffer(cb_state, dst_buffer_state, "vkCmdUpdateBuffer()", "VUID-vkCmdUpdateBuffer-commandBuffer-01813");
skip |=
ValidateUnprotectedBuffer(cb_state, dst_buffer_state, "vkCmdUpdateBuffer()", "VUID-vkCmdUpdateBuffer-commandBuffer-01814");
if (dstOffset >= dst_buffer_state->createInfo.size) {
skip |= LogError(
commandBuffer, "VUID-vkCmdUpdateBuffer-dstOffset-00032",
"vkCmdUpdateBuffer() dstOffset (0x%" PRIxLEAST64 ") is not less than the size (0x%" PRIxLEAST64 ") of buffer (%s).",
dstOffset, dst_buffer_state->createInfo.size, report_data->FormatHandle(dst_buffer_state->buffer()).c_str());
} else if (dataSize > dst_buffer_state->createInfo.size - dstOffset) {
skip |= LogError(commandBuffer, "VUID-vkCmdUpdateBuffer-dataSize-00033",
"vkCmdUpdateBuffer() dataSize (0x%" PRIxLEAST64 ") is not less than the size (0x%" PRIxLEAST64
") of buffer (%s) minus dstOffset (0x%" PRIxLEAST64 ").",
dataSize, dst_buffer_state->createInfo.size, report_data->FormatHandle(dst_buffer_state->buffer()).c_str(),
dstOffset);
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETEVENT);
Location loc(Func::vkCmdSetEvent, Field::stageMask);
LogObjectList objects(commandBuffer);
skip |= ValidatePipelineStage(objects, loc, cb_state->GetQueueFlags(), stageMask);
skip |= ValidateStageMaskHost(loc, stageMask);
return skip;
}
bool CoreChecks::PreCallValidateCmdSetEvent2KHR(VkCommandBuffer commandBuffer, VkEvent event,
const VkDependencyInfoKHR *pDependencyInfo) const {
LogObjectList objects(commandBuffer);
objects.add(event);
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
if (!enabled_features.synchronization2_features.synchronization2) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetEvent2KHR-synchronization2-03824",
"vkCmdSetEvent2KHR(): Synchronization2 feature is not enabled");
}
skip |= ValidateCmd(cb_state, CMD_SETEVENT);
Location loc(Func::vkCmdSetEvent2KHR, Field::pDependencyInfo);
if (pDependencyInfo->dependencyFlags != 0) {
skip |= LogError(objects, "VUID-vkCmdSetEvent2KHR-dependencyFlags-03825", "%s (%s) must be 0",
loc.dot(Field::dependencyFlags).Message().c_str(),
string_VkDependencyFlags(pDependencyInfo->dependencyFlags).c_str());
}
skip |= ValidateDependencyInfo(objects, loc, cb_state, pDependencyInfo);
return skip;
}
bool CoreChecks::PreCallValidateCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
LogObjectList objects(commandBuffer);
Location loc(Func::vkCmdResetEvent, Field::stageMask);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_RESETEVENT);
skip |= ValidatePipelineStage(objects, loc, cb_state->GetQueueFlags(), stageMask);
skip |= ValidateStageMaskHost(loc, stageMask);
return skip;
}
bool CoreChecks::PreCallValidateCmdResetEvent2KHR(VkCommandBuffer commandBuffer, VkEvent event,
VkPipelineStageFlags2KHR stageMask) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
LogObjectList objects(commandBuffer);
Location loc(Func::vkCmdResetEvent2KHR, Field::stageMask);
bool skip = false;
if (!enabled_features.synchronization2_features.synchronization2) {
skip |= LogError(commandBuffer, "VUID-vkCmdResetEvent2KHR-synchronization2-03829",
"vkCmdResetEvent2KHR(): Synchronization2 feature is not enabled");
}
skip |= ValidateCmd(cb_state, CMD_RESETEVENT);
skip |= ValidatePipelineStage(objects, loc, cb_state->GetQueueFlags(), stageMask);
skip |= ValidateStageMaskHost(loc, stageMask);
return skip;
}
static bool HasNonFramebufferStagePipelineStageFlags(VkPipelineStageFlags2KHR inflags) {
return (inflags & ~(VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT)) != 0;
}
// transient helper struct for checking parts of VUID 02285
struct RenderPassDepState {
using Location = core_error::Location;
using Func = core_error::Func;
using Struct = core_error::Struct;
using Field = core_error::Field;
const CoreChecks *core;
const std::string func_name;
const std::string vuid;
uint32_t active_subpass;
const VkRenderPass rp_handle;
const VkPipelineStageFlags2KHR disabled_features;
const std::vector<uint32_t> &self_dependencies;
const safe_VkSubpassDependency2 *dependencies;
RenderPassDepState(const CoreChecks *c, const std::string &f, const std::string &v, uint32_t subpass, const VkRenderPass handle,
const DeviceFeatures &features, const std::vector<uint32_t> &self_deps,
const safe_VkSubpassDependency2 *deps)
: core(c),
func_name(f),
vuid(v),
active_subpass(subpass),
rp_handle(handle),
disabled_features(sync_utils::DisabledPipelineStages(features)),
self_dependencies(self_deps),
dependencies(deps) {}
VkMemoryBarrier2KHR GetSubPassDepBarrier(const safe_VkSubpassDependency2 &dep) {
VkMemoryBarrier2KHR result;
const auto *barrier = LvlFindInChain<VkMemoryBarrier2KHR>(dep.pNext);
if (barrier) {
result = *barrier;
} else {
result.srcStageMask = dep.srcStageMask;
result.dstStageMask = dep.dstStageMask;
result.srcAccessMask = dep.srcAccessMask;
result.dstAccessMask = dep.dstAccessMask;
}
return result;
}
bool ValidateStage(const Location &loc, VkPipelineStageFlags2KHR src_stage_mask, VkPipelineStageFlags2KHR dst_stage_mask) {
// Look for matching mask in any self-dependency
bool match = false;
for (const auto self_dep_index : self_dependencies) {
const auto sub_dep = GetSubPassDepBarrier(dependencies[self_dep_index]);
auto sub_src_stage_mask =
sync_utils::ExpandPipelineStages(sub_dep.srcStageMask, sync_utils::kAllQueueTypes, disabled_features);
auto sub_dst_stage_mask =
sync_utils::ExpandPipelineStages(sub_dep.dstStageMask, sync_utils::kAllQueueTypes, disabled_features);
match = ((sub_src_stage_mask == VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) ||
(src_stage_mask == (sub_src_stage_mask & src_stage_mask))) &&
((sub_dst_stage_mask == VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) ||
(dst_stage_mask == (sub_dst_stage_mask & dst_stage_mask)));
if (match) break;
}
if (!match) {
std::stringstream self_dep_ss;
stream_join(self_dep_ss, ", ", self_dependencies);
core->LogError(rp_handle, vuid,
"%s (0x%" PRIx64
") is not a subset of VkSubpassDependency srcAccessMask "
"for any self-dependency of subpass %d of %s for which dstAccessMask is also a subset. "
"Candidate VkSubpassDependency are pDependencies entries [%s].",
loc.dot(Field::srcStageMask).Message().c_str(), src_stage_mask, active_subpass,
core->report_data->FormatHandle(rp_handle).c_str(), self_dep_ss.str().c_str());
core->LogError(rp_handle, vuid,
"%s (0x%" PRIx64
") is not a subset of VkSubpassDependency dstAccessMask "
"for any self-dependency of subpass %d of %s for which srcAccessMask is also a subset. "
"Candidate VkSubpassDependency are pDependencies entries [%s].",
loc.dot(Field::dstStageMask).Message().c_str(), dst_stage_mask, active_subpass,
core->report_data->FormatHandle(rp_handle).c_str(), self_dep_ss.str().c_str());
}
return !match;
}
bool ValidateAccess(const Location &loc, VkAccessFlags2KHR src_access_mask, VkAccessFlags2KHR dst_access_mask) {
bool match = false;
for (const auto self_dep_index : self_dependencies) {
const auto sub_dep = GetSubPassDepBarrier(dependencies[self_dep_index]);
match = (src_access_mask == (sub_dep.srcAccessMask & src_access_mask)) &&
(dst_access_mask == (sub_dep.dstAccessMask & dst_access_mask));
if (match) break;
}
if (!match) {
std::stringstream self_dep_ss;
stream_join(self_dep_ss, ", ", self_dependencies);
core->LogError(rp_handle, vuid,
"%s (0x%" PRIx64
") is not a subset of VkSubpassDependency "
"srcAccessMask of subpass %d of %s. Candidate VkSubpassDependency are pDependencies entries [%s].",
loc.dot(Field::srcAccessMask).Message().c_str(), src_access_mask, active_subpass,
core->report_data->FormatHandle(rp_handle).c_str(), self_dep_ss.str().c_str());
core->LogError(rp_handle, vuid,
"%s (0x%" PRIx64
") is not a subset of VkSubpassDependency "
"dstAccessMask of subpass %d of %s. Candidate VkSubpassDependency are pDependencies entries [%s].",
loc.dot(Field::dstAccessMask).Message().c_str(), dst_access_mask, active_subpass,
core->report_data->FormatHandle(rp_handle).c_str(), self_dep_ss.str().c_str());
}
return !match;
}
bool ValidateDependencyFlag(VkDependencyFlags dependency_flags) {
bool match = false;
for (const auto self_dep_index : self_dependencies) {
const auto &sub_dep = dependencies[self_dep_index];
match = sub_dep.dependencyFlags == dependency_flags;
if (match) break;
}
if (!match) {
std::stringstream self_dep_ss;
stream_join(self_dep_ss, ", ", self_dependencies);
core->LogError(rp_handle, vuid,
"%s: dependencyFlags param (0x%X) does not equal VkSubpassDependency dependencyFlags value for any "
"self-dependency of subpass %d of %s. Candidate VkSubpassDependency are pDependencies entries [%s].",
func_name.c_str(), dependency_flags, active_subpass, core->report_data->FormatHandle(rp_handle).c_str(),
self_dep_ss.str().c_str());
}
return !match;
}
};
// Validate VUs for Pipeline Barriers that are within a renderPass
// Pre: cb_state->activeRenderPass must be a pointer to valid renderPass state
bool CoreChecks::ValidateRenderPassPipelineBarriers(const Location &outer_loc, const CMD_BUFFER_STATE *cb_state,
VkPipelineStageFlags src_stage_mask, VkPipelineStageFlags dst_stage_mask,
VkDependencyFlags dependency_flags, uint32_t mem_barrier_count,
const VkMemoryBarrier *mem_barriers, uint32_t buffer_mem_barrier_count,
const VkBufferMemoryBarrier *buffer_mem_barriers,
uint32_t image_mem_barrier_count,
const VkImageMemoryBarrier *image_barriers) const {
bool skip = false;
const auto& rp_state = cb_state->activeRenderPass;
RenderPassDepState state(this, outer_loc.StringFunc().c_str(), "VUID-vkCmdPipelineBarrier-pDependencies-02285",
cb_state->activeSubpass, rp_state->renderPass(), enabled_features,
rp_state->self_dependencies[cb_state->activeSubpass], rp_state->createInfo.pDependencies);
if (state.self_dependencies.size() == 0) {
skip |= LogError(state.rp_handle, "VUID-vkCmdPipelineBarrier-pDependencies-02285",
"%s Barriers cannot be set during subpass %d of %s with no self-dependency specified.",
outer_loc.Message().c_str(), state.active_subpass, report_data->FormatHandle(state.rp_handle).c_str());
return skip;
}
// Grab ref to current subpassDescription up-front for use below
const auto &sub_desc = rp_state->createInfo.pSubpasses[state.active_subpass];
skip |= state.ValidateStage(outer_loc, src_stage_mask, dst_stage_mask);
if (0 != buffer_mem_barrier_count) {
skip |= LogError(state.rp_handle, "VUID-vkCmdPipelineBarrier-bufferMemoryBarrierCount-01178",
"%s: bufferMemoryBarrierCount is non-zero (%d) for subpass %d of %s.", state.func_name.c_str(),
buffer_mem_barrier_count, state.active_subpass, report_data->FormatHandle(rp_state->renderPass()).c_str());
}
for (uint32_t i = 0; i < mem_barrier_count; ++i) {
const auto &mem_barrier = mem_barriers[i];
Location loc(outer_loc.function, Struct::VkMemoryBarrier, Field::pMemoryBarriers, i);
skip |= state.ValidateAccess(loc, mem_barrier.srcAccessMask, mem_barrier.dstAccessMask);
}
for (uint32_t i = 0; i < image_mem_barrier_count; ++i) {
const auto &img_barrier = image_barriers[i];
Location loc(outer_loc.function, Struct::VkImageMemoryBarrier, Field::pImageMemoryBarriers, i);
skip |= state.ValidateAccess(loc, img_barrier.srcAccessMask, img_barrier.dstAccessMask);
if (VK_QUEUE_FAMILY_IGNORED != img_barrier.srcQueueFamilyIndex ||
VK_QUEUE_FAMILY_IGNORED != img_barrier.dstQueueFamilyIndex) {
skip |= LogError(state.rp_handle, "VUID-vkCmdPipelineBarrier-srcQueueFamilyIndex-01182",
"%s is %d and dstQueueFamilyIndex is %d but both must be VK_QUEUE_FAMILY_IGNORED.",
loc.dot(Field::srcQueueFamilyIndex).Message().c_str(), img_barrier.srcQueueFamilyIndex,
img_barrier.dstQueueFamilyIndex);
}
// Secondary CBs can have null framebuffer so record will queue up validation in that case 'til FB is known
if (VK_NULL_HANDLE != cb_state->activeFramebuffer) {
skip |= ValidateImageBarrierAttachment(loc, cb_state, cb_state->activeFramebuffer.get(), state.active_subpass, sub_desc,
state.rp_handle, img_barrier);
}
}
skip |= state.ValidateDependencyFlag(dependency_flags);
return skip;
}
bool CoreChecks::ValidateRenderPassPipelineBarriers(const Location &outer_loc, const CMD_BUFFER_STATE *cb_state,
const VkDependencyInfoKHR *dep_info) const {
bool skip = false;
const auto& rp_state = cb_state->activeRenderPass;
RenderPassDepState state(this, outer_loc.StringFunc().c_str(), "VUID-vkCmdPipelineBarrier2KHR-pDependencies-02285",
cb_state->activeSubpass, rp_state->renderPass(), enabled_features,
rp_state->self_dependencies[cb_state->activeSubpass], rp_state->createInfo.pDependencies);
if (state.self_dependencies.size() == 0) {
skip |= LogError(state.rp_handle, state.vuid,
"%s: Barriers cannot be set during subpass %d of %s with no self-dependency specified.",
state.func_name.c_str(), state.active_subpass, report_data->FormatHandle(rp_state->renderPass()).c_str());
return skip;
}
// Grab ref to current subpassDescription up-front for use below
const auto &sub_desc = rp_state->createInfo.pSubpasses[state.active_subpass];
for (uint32_t i = 0; i < dep_info->memoryBarrierCount; ++i) {
const auto &mem_barrier = dep_info->pMemoryBarriers[i];
Location loc(outer_loc.function, Struct::VkMemoryBarrier2KHR, Field::pMemoryBarriers, i);
skip |= state.ValidateStage(loc, mem_barrier.srcStageMask, mem_barrier.dstStageMask);
skip |= state.ValidateAccess(loc, mem_barrier.srcAccessMask, mem_barrier.dstAccessMask);
}
if (0 != dep_info->bufferMemoryBarrierCount) {
skip |=
LogError(state.rp_handle, "VUID-vkCmdPipelineBarrier2KHR-bufferMemoryBarrierCount-01178",
"%s: bufferMemoryBarrierCount is non-zero (%d) for subpass %d of %s.", state.func_name.c_str(),
dep_info->bufferMemoryBarrierCount, state.active_subpass, report_data->FormatHandle(state.rp_handle).c_str());
}
for (uint32_t i = 0; i < dep_info->imageMemoryBarrierCount; ++i) {
const auto &img_barrier = dep_info->pImageMemoryBarriers[i];
Location loc(outer_loc.function, Struct::VkImageMemoryBarrier2KHR, Field::pImageMemoryBarriers, i);
skip |= state.ValidateStage(loc, img_barrier.srcStageMask, img_barrier.dstStageMask);
skip |= state.ValidateAccess(loc, img_barrier.srcAccessMask, img_barrier.dstAccessMask);
if (VK_QUEUE_FAMILY_IGNORED != img_barrier.srcQueueFamilyIndex ||
VK_QUEUE_FAMILY_IGNORED != img_barrier.dstQueueFamilyIndex) {
skip |= LogError(state.rp_handle, "VUID-vkCmdPipelineBarrier2KHR-srcQueueFamilyIndex-01182",
"%s is %d and dstQueueFamilyIndex is %d but both must be VK_QUEUE_FAMILY_IGNORED.",
loc.dot(Field::srcQueueFamilyIndex).Message().c_str(), img_barrier.srcQueueFamilyIndex,
img_barrier.dstQueueFamilyIndex);
}
// Secondary CBs can have null framebuffer so record will queue up validation in that case 'til FB is known
if (VK_NULL_HANDLE != cb_state->activeFramebuffer) {
skip |= ValidateImageBarrierAttachment(loc, cb_state, cb_state->activeFramebuffer.get(), state.active_subpass, sub_desc,
state.rp_handle, img_barrier);
}
}
skip |= state.ValidateDependencyFlag(dep_info->dependencyFlags);
return skip;
}
bool CoreChecks::ValidateStageMasksAgainstQueueCapabilities(const LogObjectList &objects, const Location &loc,
VkQueueFlags queue_flags, VkPipelineStageFlags2KHR stage_mask) const {
bool skip = false;
// these are always allowed.
stage_mask &= ~(VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT_KHR | VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR |
VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR | VK_PIPELINE_STAGE_2_HOST_BIT_KHR);
if (stage_mask == 0) {
return skip;
}
static const std::map<VkPipelineStageFlags2KHR, VkQueueFlags> metaFlags{
{VK_PIPELINE_STAGE_2_ALL_GRAPHICS_BIT_KHR, VK_QUEUE_GRAPHICS_BIT},
{VK_PIPELINE_STAGE_2_ALL_TRANSFER_BIT_KHR, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT},
{VK_PIPELINE_STAGE_2_PRE_RASTERIZATION_SHADERS_BIT_KHR, VK_QUEUE_GRAPHICS_BIT},
{VK_PIPELINE_STAGE_2_VERTEX_INPUT_BIT_KHR, VK_QUEUE_GRAPHICS_BIT},
};
for (const auto &entry : metaFlags) {
if (((entry.first & stage_mask) != 0) && ((entry.second & queue_flags) == 0)) {
const auto& vuid = sync_vuid_maps::GetStageQueueCapVUID(loc, entry.first);
skip |= LogError(objects, vuid,
"%s flag %s is not compatible with the queue family properties (%s) of this command buffer.",
loc.Message().c_str(), sync_utils::StringPipelineStageFlags(entry.first).c_str(),
string_VkQueueFlags(queue_flags).c_str());
}
stage_mask &= ~entry.first;
}
if (stage_mask == 0) {
return skip;
}
auto supported_flags = sync_utils::ExpandPipelineStages(VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT_KHR, queue_flags);
auto bad_flags = stage_mask & ~supported_flags;
// Lookup each bit in the stagemask and check for overlap between its table bits and queue_flags
for (size_t i = 0; i < sizeof(bad_flags) * 8; i++) {
VkPipelineStageFlags2KHR bit = (1ULL << i) & bad_flags;
if (bit) {
const auto& vuid = sync_vuid_maps::GetStageQueueCapVUID(loc, bit);
skip |= LogError(
objects, vuid, "%s flag %s is not compatible with the queue family properties (%s) of this command buffer.",
loc.Message().c_str(), sync_utils::StringPipelineStageFlags(bit).c_str(), string_VkQueueFlags(queue_flags).c_str());
}
}
return skip;
}
bool CoreChecks::ValidatePipelineStageFeatureEnables(const LogObjectList &objects, const Location &loc,
VkPipelineStageFlags2KHR stage_mask) const {
bool skip = false;
if (!enabled_features.synchronization2_features.synchronization2 && stage_mask == 0) {
const auto& vuid = sync_vuid_maps::GetBadFeatureVUID(loc, 0);
std::stringstream msg;
msg << loc.Message() << " must not be 0 unless synchronization2 is enabled.";
skip |= LogError(objects, vuid, "%s", msg.str().c_str());
}
auto disabled_stages = sync_utils::DisabledPipelineStages(enabled_features);
auto bad_bits = stage_mask & disabled_stages;
if (bad_bits == 0) {
return skip;
}
for (size_t i = 0; i < sizeof(bad_bits) * 8; i++) {
VkPipelineStageFlags2KHR bit = 1ULL << i;
if (bit & bad_bits) {
const auto& vuid = sync_vuid_maps::GetBadFeatureVUID(loc, bit);
std::stringstream msg;
msg << loc.Message() << " includes " << sync_utils::StringPipelineStageFlags(bit) << " when the device does not have "
<< sync_vuid_maps::kFeatureNameMap.at(bit) << " feature enabled.";
skip |= LogError(objects, vuid, "%s", msg.str().c_str());
}
}
return skip;
}
bool CoreChecks::ValidatePipelineStage(const LogObjectList &objects, const Location &loc, VkQueueFlags queue_flags,
VkPipelineStageFlags2KHR stage_mask) const {
bool skip = false;
skip |= ValidateStageMasksAgainstQueueCapabilities(objects, loc, queue_flags, stage_mask);
skip |= ValidatePipelineStageFeatureEnables(objects, loc, stage_mask);
return skip;
}
bool CoreChecks::ValidateAccessMask(const LogObjectList &objects, const Location &loc, VkQueueFlags queue_flags,
VkAccessFlags2KHR access_mask, VkPipelineStageFlags2KHR stage_mask) const {
bool skip = false;
// Early out if all commands set
if ((stage_mask & VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT_KHR) != 0) return skip;
// or if only generic memory accesses are specified (or we got a 0 mask)
access_mask &= ~(VK_ACCESS_2_MEMORY_READ_BIT_KHR | VK_ACCESS_2_MEMORY_WRITE_BIT_KHR);
if (access_mask == 0) return skip;
auto expanded_stages = sync_utils::ExpandPipelineStages(stage_mask, queue_flags); // TODO:
auto valid_accesses = sync_utils::CompatibleAccessMask(expanded_stages);
auto bad_accesses = (access_mask & ~valid_accesses);
if (bad_accesses == 0) {
return skip;
}
for (size_t i = 0; i < sizeof(bad_accesses) * 8; i++) {
VkAccessFlags2KHR bit = (1ULL << i);
if (bad_accesses & bit) {
const auto& vuid = sync_vuid_maps::GetBadAccessFlagsVUID(loc, bit);
std::stringstream msg;
msg << loc.Message() << " bit " << sync_utils::StringAccessFlags(bit) << " is not supported by stage mask ("
<< sync_utils::StringPipelineStageFlags(stage_mask) << ").";
skip |= LogError(objects, vuid, "%s", msg.str().c_str());
}
}
return skip;
}
bool CoreChecks::ValidateEventStageMask(const ValidationStateTracker *state_data, const CMD_BUFFER_STATE *pCB, size_t eventCount,
size_t firstEventIndex, VkPipelineStageFlags2KHR sourceStageMask,
EventToStageMap *localEventToStageMap) {
bool skip = false;
VkPipelineStageFlags2KHR stage_mask = 0;
const auto max_event = std::min((firstEventIndex + eventCount), pCB->events.size());
for (size_t event_index = firstEventIndex; event_index < max_event; ++event_index) {
auto event = pCB->events[event_index];
auto event_data = localEventToStageMap->find(event);
if (event_data != localEventToStageMap->end()) {
stage_mask |= event_data->second;
} else {
auto global_event_data = state_data->GetEventState(event);
if (!global_event_data) {
skip |= state_data->LogError(event, kVUID_Core_DrawState_InvalidEvent,
"%s cannot be waited on if it has never been set.",
state_data->report_data->FormatHandle(event).c_str());
} else {
stage_mask |= global_event_data->stageMask;
}
}
}
// TODO: Need to validate that host_bit is only set if set event is called
// but set event can be called at any time.
if (sourceStageMask != stage_mask && sourceStageMask != (stage_mask | VK_PIPELINE_STAGE_HOST_BIT)) {
skip |= state_data->LogError(
pCB->commandBuffer(), "VUID-vkCmdWaitEvents-srcStageMask-parameter",
"Submitting cmdbuffer with call to VkCmdWaitEvents using srcStageMask 0x%" PRIx64
" which must be the bitwise OR of "
"the stageMask parameters used in calls to vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if used with "
"vkSetEvent but instead is 0x%" PRIx64 ".",
sourceStageMask, stage_mask);
}
return skip;
}
bool CoreChecks::PreCallValidateCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
uint32_t imageMemoryBarrierCount,
const VkImageMemoryBarrier *pImageMemoryBarriers) const {
bool skip = false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
auto queue_flags = cb_state->GetQueueFlags();
LogObjectList objects(commandBuffer);
Location loc(Func::vkCmdWaitEvents);
skip |= ValidatePipelineStage(objects, loc.dot(Field::srcStageMask), queue_flags, srcStageMask);
skip |= ValidatePipelineStage(objects, loc.dot(Field::dstStageMask), queue_flags, dstStageMask);
skip |= ValidateCmd(cb_state, CMD_WAITEVENTS);
skip |=
ValidateBarriers(loc.dot(Field::pDependencyInfo), cb_state, srcStageMask, dstStageMask, memoryBarrierCount, pMemoryBarriers,
bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
for (uint32_t i = 0; i < bufferMemoryBarrierCount; ++i) {
if (pBufferMemoryBarriers[i].srcQueueFamilyIndex != pBufferMemoryBarriers[i].dstQueueFamilyIndex) {
skip |= LogError(commandBuffer, "VUID-vkCmdWaitEvents-srcQueueFamilyIndex-02803",
"vkCmdWaitEvents(): pBufferMemoryBarriers[%" PRIu32 "] has different srcQueueFamilyIndex (%" PRIu32
") and dstQueueFamilyIndex (%" PRIu32 ").",
i, pBufferMemoryBarriers[i].srcQueueFamilyIndex, pBufferMemoryBarriers[i].dstQueueFamilyIndex);
}
}
for (uint32_t i = 0; i < imageMemoryBarrierCount; ++i) {
if (pImageMemoryBarriers[i].srcQueueFamilyIndex != pImageMemoryBarriers[i].dstQueueFamilyIndex) {
skip |= LogError(commandBuffer, "VUID-vkCmdWaitEvents-srcQueueFamilyIndex-02803",
"vkCmdWaitEvents(): pImageMemoryBarriers[%" PRIu32 "] has different srcQueueFamilyIndex (%" PRIu32
") and dstQueueFamilyIndex (%" PRIu32 ").",
i, pImageMemoryBarriers[i].srcQueueFamilyIndex, pImageMemoryBarriers[i].dstQueueFamilyIndex);
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdWaitEvents2KHR(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
const VkDependencyInfoKHR *pDependencyInfos) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
if (!enabled_features.synchronization2_features.synchronization2) {
skip |= LogError(commandBuffer, "VUID-vkCmdWaitEvents2KHR-synchronization2-03836",
"vkCmdWaitEvents2KHR(): Synchronization2 feature is not enabled");
}
for (uint32_t i = 0; (i < eventCount) && !skip; i++) {
LogObjectList objects(commandBuffer);
objects.add(pEvents[i]);
Location loc(Func::vkCmdWaitEvents2KHR, Field::pDependencyInfos, i);
if (pDependencyInfos[i].dependencyFlags != 0) {
skip |= LogError(objects, "VUID-vkCmdWaitEvents2KHR-dependencyFlags-03844", "%s (%s) must be 0.",
loc.dot(Field::dependencyFlags).Message().c_str(),
string_VkDependencyFlags(pDependencyInfos[i].dependencyFlags).c_str());
}
skip |= ValidateDependencyInfo(objects, loc, cb_state, &pDependencyInfos[i]);
}
skip |= ValidateCmd(cb_state, CMD_WAITEVENTS);
return skip;
}
void CoreChecks::PreCallRecordCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask,
uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
// The StateTracker added will add to the events vector.
auto first_event_index = cb_state->events.size();
StateTracker::PreCallRecordCmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask, memoryBarrierCount,
pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
imageMemoryBarrierCount, pImageMemoryBarriers);
auto event_added_count = cb_state->events.size() - first_event_index;
const CMD_BUFFER_STATE *cb_state_const = cb_state;
cb_state->eventUpdates.emplace_back(
[cb_state_const, event_added_count, first_event_index, sourceStageMask](
const ValidationStateTracker *device_data, bool do_validate, EventToStageMap *localEventToStageMap) {
if (!do_validate) return false;
return ValidateEventStageMask(device_data, cb_state_const, event_added_count, first_event_index, sourceStageMask,
localEventToStageMap);
});
TransitionImageLayouts(cb_state, imageMemoryBarrierCount, pImageMemoryBarriers);
}
void CoreChecks::PreCallRecordCmdWaitEvents2KHR(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
const VkDependencyInfoKHR *pDependencyInfos) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
// The StateTracker added will add to the events vector.
auto first_event_index = cb_state->events.size();
StateTracker::PreCallRecordCmdWaitEvents2KHR(commandBuffer, eventCount, pEvents, pDependencyInfos);
auto event_added_count = cb_state->events.size() - first_event_index;
const CMD_BUFFER_STATE *cb_state_const = cb_state;
for (uint32_t i = 0; i < eventCount; i++) {
const auto &dep_info = pDependencyInfos[i];
auto stage_masks = sync_utils::GetGlobalStageMasks(dep_info);
cb_state->eventUpdates.emplace_back(
[cb_state_const, event_added_count, first_event_index, stage_masks](
const ValidationStateTracker *device_data, bool do_validate, EventToStageMap *localEventToStageMap) {
if (!do_validate) return false;
return ValidateEventStageMask(device_data, cb_state_const, event_added_count, first_event_index, stage_masks.src,
localEventToStageMap);
});
TransitionImageLayouts(cb_state, dep_info.imageMemoryBarrierCount, dep_info.pImageMemoryBarriers);
}
}
void CoreChecks::PostCallRecordCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask,
uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
RecordBarriers(Func::vkCmdWaitEvents, cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
pImageMemoryBarriers);
}
void CoreChecks::PostCallRecordCmdWaitEvents2KHR(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
const VkDependencyInfoKHR *pDependencyInfos) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
for (uint32_t i = 0; i < eventCount; i++) {
const auto &dep_info = pDependencyInfos[i];
RecordBarriers(Func::vkCmdWaitEvents2KHR, cb_state, dep_info);
}
}
bool CoreChecks::PreCallValidateCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
uint32_t bufferMemoryBarrierCount,
const VkBufferMemoryBarrier *pBufferMemoryBarriers,
uint32_t imageMemoryBarrierCount,
const VkImageMemoryBarrier *pImageMemoryBarriers) const {
bool skip = false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
LogObjectList objects(commandBuffer);
auto queue_flags = cb_state->GetQueueFlags();
Location loc(Func::vkCmdPipelineBarrier);
skip |= ValidatePipelineStage(objects, loc.dot(Field::srcStageMask), queue_flags, srcStageMask);
skip |= ValidatePipelineStage(objects, loc.dot(Field::dstStageMask), queue_flags, dstStageMask);
skip |= ValidateCmd(cb_state, CMD_PIPELINEBARRIER);
if (cb_state->activeRenderPass) {
skip |= ValidateRenderPassPipelineBarriers(loc, cb_state, srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount,
pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
imageMemoryBarrierCount, pImageMemoryBarriers);
if (skip) return true; // Early return to avoid redundant errors from below calls
} else {
if (dependencyFlags & VK_DEPENDENCY_VIEW_LOCAL_BIT) {
skip = LogError(objects, "VUID-vkCmdPipelineBarrier-dependencyFlags-01186",
"%s VK_DEPENDENCY_VIEW_LOCAL_BIT must not be set outside of a render pass instance",
loc.dot(Field::dependencyFlags).Message().c_str());
}
}
skip |= ValidateBarriers(loc, cb_state, srcStageMask, dstStageMask, memoryBarrierCount, pMemoryBarriers,
bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
return skip;
}
bool CoreChecks::PreCallValidateCmdPipelineBarrier2KHR(VkCommandBuffer commandBuffer,
const VkDependencyInfoKHR *pDependencyInfo) const {
bool skip = false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
LogObjectList objects(commandBuffer);
Location loc(Func::vkCmdPipelineBarrier2KHR, Field::pDependencyInfo);
if (!enabled_features.synchronization2_features.synchronization2) {
skip |= LogError(commandBuffer, "VUID-vkCmdPipelineBarrier2KHR-synchronization2-03848",
"vkCmdPipelineBarrier2KHR(): Synchronization2 feature is not enabled");
}
skip |= ValidateCmd(cb_state, CMD_PIPELINEBARRIER);
if (cb_state->activeRenderPass) {
skip |= ValidateRenderPassPipelineBarriers(loc, cb_state, pDependencyInfo);
if (skip) return true; // Early return to avoid redundant errors from below calls
} else {
if (pDependencyInfo->dependencyFlags & VK_DEPENDENCY_VIEW_LOCAL_BIT) {
skip = LogError(objects, "VUID-vkCmdPipelineBarrier2KHR-dependencyFlags-01186",
"%s VK_DEPENDENCY_VIEW_LOCAL_BIT must not be set outside of a render pass instance",
loc.dot(Field::dependencyFlags).Message().c_str());
}
}
skip |= ValidateDependencyInfo(objects, loc, cb_state, pDependencyInfo);
return skip;
}
void CoreChecks::PreCallRecordCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
uint32_t bufferMemoryBarrierCount,
const VkBufferMemoryBarrier *pBufferMemoryBarriers,
uint32_t imageMemoryBarrierCount,
const VkImageMemoryBarrier *pImageMemoryBarriers) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
RecordBarriers(Func::vkCmdPipelineBarrier, cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
pImageMemoryBarriers);
TransitionImageLayouts(cb_state, imageMemoryBarrierCount, pImageMemoryBarriers);
StateTracker::PreCallRecordCmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount,
pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
imageMemoryBarrierCount, pImageMemoryBarriers);
}
void CoreChecks::PreCallRecordCmdPipelineBarrier2KHR(VkCommandBuffer commandBuffer, const VkDependencyInfoKHR *pDependencyInfo) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
RecordBarriers(Func::vkCmdPipelineBarrier2KHR, cb_state, *pDependencyInfo);
TransitionImageLayouts(cb_state, pDependencyInfo->imageMemoryBarrierCount, pDependencyInfo->pImageMemoryBarriers);
StateTracker::PreCallRecordCmdPipelineBarrier2KHR(commandBuffer, pDependencyInfo);
}
bool CoreChecks::ValidateBeginQuery(const CMD_BUFFER_STATE *cb_state, const QueryObject &query_obj, VkFlags flags, uint32_t index,
CMD_TYPE cmd, const ValidateBeginQueryVuids *vuids) const {
bool skip = false;
const auto *query_pool_state = GetQueryPoolState(query_obj.pool);
const auto &query_pool_ci = query_pool_state->createInfo;
const char *cmd_name = CommandTypeString(cmd);
if (query_pool_ci.queryType == VK_QUERY_TYPE_TIMESTAMP) {
skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdBeginQuery-queryType-02804",
"%s: The querypool's query type must not be VK_QUERY_TYPE_TIMESTAMP.", cmd_name);
}
// Check for nested queries
if (cb_state->activeQueries.size()) {
for (const auto &a_query : cb_state->activeQueries) {
auto active_query_pool_state = GetQueryPoolState(a_query.pool);
if (active_query_pool_state->createInfo.queryType == query_pool_ci.queryType && a_query.index == index) {
LogObjectList obj_list(cb_state->commandBuffer());
obj_list.add(query_obj.pool);
obj_list.add(a_query.pool);
skip |= LogError(obj_list, vuids->vuid_dup_query_type,
"%s: Within the same command buffer %s, query %d from pool %s has same queryType as active query "
"%d from pool %s.",
cmd_name, report_data->FormatHandle(cb_state->commandBuffer()).c_str(), query_obj.index,
report_data->FormatHandle(query_obj.pool).c_str(), a_query.index,
report_data->FormatHandle(a_query.pool).c_str());
}
}
}
// There are tighter queue constraints to test for certain query pools
if (query_pool_ci.queryType == VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT) {
skip |= ValidateCmdQueueFlags(cb_state, cmd_name, VK_QUEUE_GRAPHICS_BIT, vuids->vuid_queue_feedback);
if (!phys_dev_ext_props.transform_feedback_props.transformFeedbackQueries) {
const char *vuid = cmd == CMD_BEGINQUERYINDEXEDEXT ? "VUID-vkCmdBeginQueryIndexedEXT-queryType-02341"
: "VUID-vkCmdBeginQuery-queryType-02328";
skip |= LogError(cb_state->commandBuffer(), vuid,
"%s: queryPool was created with queryType VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT, but "
"VkPhysicalDeviceTransformFeedbackPropertiesEXT::transformFeedbackQueries is not supported.",
cmd_name);
}
}
if (query_pool_ci.queryType == VK_QUERY_TYPE_OCCLUSION) {
skip |= ValidateCmdQueueFlags(cb_state, cmd_name, VK_QUEUE_GRAPHICS_BIT, vuids->vuid_queue_occlusion);
}
if (query_pool_ci.queryType == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
if (!cb_state->performance_lock_acquired) {
skip |= LogError(cb_state->commandBuffer(), vuids->vuid_profile_lock,
"%s: profiling lock must be held before vkBeginCommandBuffer is called on "
"a command buffer where performance queries are recorded.",
cmd_name);
}
if (query_pool_state->has_perf_scope_command_buffer && cb_state->commandCount > 0) {
skip |= LogError(cb_state->commandBuffer(), vuids->vuid_scope_not_first,
"%s: Query pool %s was created with a counter of scope "
"VK_QUERY_SCOPE_COMMAND_BUFFER_KHR but %s is not the first recorded "
"command in the command buffer.",
cmd_name, report_data->FormatHandle(query_obj.pool).c_str(), cmd_name);
}
if (query_pool_state->has_perf_scope_render_pass && cb_state->activeRenderPass) {
skip |= LogError(cb_state->commandBuffer(), vuids->vuid_scope_in_rp,
"%s: Query pool %s was created with a counter of scope "
"VK_QUERY_SCOPE_RENDER_PASS_KHR but %s is inside a render pass.",
cmd_name, report_data->FormatHandle(query_obj.pool).c_str(), cmd_name);
}
}
if (query_pool_ci.queryType == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR ||
query_pool_ci.queryType == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR) {
const char *vuid = cmd == CMD_BEGINQUERYINDEXEDEXT ? "VUID-vkCmdBeginQueryIndexedEXT-queryType-04728"
: "VUID-vkCmdBeginQuery-queryType-04728";
skip |= LogError(cb_state->commandBuffer(), vuid, "%s: QueryPool was created with queryType %s.", cmd_name,
string_VkQueryType(query_pool_ci.queryType));
}
if (query_pool_ci.queryType == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV) {
const char *vuid = cmd == CMD_BEGINQUERYINDEXEDEXT ? "VUID-vkCmdBeginQueryIndexedEXT-queryType-04729"
: "VUID-vkCmdBeginQuery-queryType-04729";
skip |=
LogError(cb_state->commandBuffer(), vuid,
"%s: QueryPool was created with queryType VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV.", cmd_name);
}
skip |= ValidateCmdQueueFlags(cb_state, cmd_name, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, vuids->vuid_queue_flags);
if (flags & VK_QUERY_CONTROL_PRECISE_BIT) {
if (!enabled_features.core.occlusionQueryPrecise) {
skip |= LogError(cb_state->commandBuffer(), vuids->vuid_precise,
"%s: VK_QUERY_CONTROL_PRECISE_BIT provided, but precise occlusion queries not enabled on the device.",
cmd_name);
}
if (query_pool_ci.queryType != VK_QUERY_TYPE_OCCLUSION) {
skip |=
LogError(cb_state->commandBuffer(), vuids->vuid_precise,
"%s: VK_QUERY_CONTROL_PRECISE_BIT provided, but pool query type is not VK_QUERY_TYPE_OCCLUSION", cmd_name);
}
}
if (query_obj.query >= query_pool_ci.queryCount) {
skip |= LogError(cb_state->commandBuffer(), vuids->vuid_query_count,
"%s: Query index %" PRIu32 " must be less than query count %" PRIu32 " of %s.", cmd_name, query_obj.query,
query_pool_ci.queryCount, report_data->FormatHandle(query_obj.pool).c_str());
}
if (cb_state->unprotected == false) {
skip |= LogError(cb_state->commandBuffer(), vuids->vuid_protected_cb,
"%s: command can't be used in protected command buffers.", cmd_name);
}
skip |= ValidateCmd(cb_state, cmd);
return skip;
}
bool CoreChecks::PreCallValidateCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot,
VkFlags flags) const {
if (disabled[query_validation]) return false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
QueryObject query_obj(queryPool, slot);
struct BeginQueryVuids : ValidateBeginQueryVuids {
BeginQueryVuids() : ValidateBeginQueryVuids() {
vuid_queue_flags = "VUID-vkCmdBeginQuery-commandBuffer-cmdpool";
vuid_queue_feedback = "VUID-vkCmdBeginQuery-queryType-02327";
vuid_queue_occlusion = "VUID-vkCmdBeginQuery-queryType-00803";
vuid_precise = "VUID-vkCmdBeginQuery-queryType-00800";
vuid_query_count = "VUID-vkCmdBeginQuery-query-00802";
vuid_profile_lock = "VUID-vkCmdBeginQuery-queryPool-03223";
vuid_scope_not_first = "VUID-vkCmdBeginQuery-queryPool-03224";
vuid_scope_in_rp = "VUID-vkCmdBeginQuery-queryPool-03225";
vuid_dup_query_type = "VUID-vkCmdBeginQuery-queryPool-01922";
vuid_protected_cb = "VUID-vkCmdBeginQuery-commandBuffer-01885";
}
};
BeginQueryVuids vuids;
return ValidateBeginQuery(cb_state, query_obj, flags, 0, CMD_BEGINQUERY, &vuids);
}
bool CoreChecks::VerifyQueryIsReset(const ValidationStateTracker *state_data, VkCommandBuffer commandBuffer, QueryObject query_obj,
const char *func_name, VkQueryPool &firstPerfQueryPool, uint32_t perfPass,
QueryMap *localQueryToStateMap) {
bool skip = false;
const auto *query_pool_state = state_data->GetQueryPoolState(query_obj.pool);
const auto &query_pool_ci = query_pool_state->createInfo;
QueryState state = state_data->GetQueryState(localQueryToStateMap, query_obj.pool, query_obj.query, perfPass);
// If reset was in another command buffer, check the global map
if (state == QUERYSTATE_UNKNOWN) {
state = state_data->GetQueryState(&state_data->queryToStateMap, query_obj.pool, query_obj.query, perfPass);
}
// Performance queries have limitation upon when they can be
// reset.
if (query_pool_ci.queryType == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR && state == QUERYSTATE_UNKNOWN &&
perfPass >= query_pool_state->n_performance_passes) {
// If the pass is invalid, assume RESET state, another error
// will be raised in ValidatePerformanceQuery().
state = QUERYSTATE_RESET;
}
if (state != QUERYSTATE_RESET) {
skip |= state_data->LogError(commandBuffer, kVUID_Core_DrawState_QueryNotReset,
"%s: %s and query %" PRIu32
": query not reset. "
"After query pool creation, each query must be reset before it is used. "
"Queries must also be reset between uses.",
func_name, state_data->report_data->FormatHandle(query_obj.pool).c_str(), query_obj.query);
}
return skip;
}
bool CoreChecks::ValidatePerformanceQuery(const ValidationStateTracker *state_data, VkCommandBuffer commandBuffer,
QueryObject query_obj, const char *func_name, VkQueryPool &firstPerfQueryPool,
uint32_t perfPass, QueryMap *localQueryToStateMap) {
const auto *query_pool_state = state_data->GetQueryPoolState(query_obj.pool);
const auto &query_pool_ci = query_pool_state->createInfo;
if (query_pool_ci.queryType != VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) return false;
const auto *cb_state = state_data->Get<CMD_BUFFER_STATE>(commandBuffer);
bool skip = false;
if (perfPass >= query_pool_state->n_performance_passes) {
skip |= state_data->LogError(commandBuffer, "VUID-VkPerformanceQuerySubmitInfoKHR-counterPassIndex-03221",
"Invalid counterPassIndex (%u, maximum allowed %u) value for query pool %s.", perfPass,
query_pool_state->n_performance_passes,
state_data->report_data->FormatHandle(query_obj.pool).c_str());
}
if (!cb_state->performance_lock_acquired || cb_state->performance_lock_released) {
skip |= state_data->LogError(commandBuffer, "VUID-vkQueueSubmit-pCommandBuffers-03220",
"Commandbuffer %s was submitted and contains a performance query but the"
"profiling lock was not held continuously throughout the recording of commands.",
state_data->report_data->FormatHandle(commandBuffer).c_str());
}
QueryState command_buffer_state = state_data->GetQueryState(localQueryToStateMap, query_obj.pool, query_obj.query, perfPass);
if (command_buffer_state == QUERYSTATE_RESET) {
skip |= state_data->LogError(
commandBuffer, query_obj.indexed ? "VUID-vkCmdBeginQueryIndexedEXT-None-02863" : "VUID-vkCmdBeginQuery-None-02863",
"VkQuery begin command recorded in a command buffer that, either directly or "
"through secondary command buffers, also contains a vkCmdResetQueryPool command "
"affecting the same query.");
}
if (firstPerfQueryPool != VK_NULL_HANDLE) {
if (firstPerfQueryPool != query_obj.pool &&
!state_data->enabled_features.performance_query_features.performanceCounterMultipleQueryPools) {
skip |= state_data->LogError(
commandBuffer,
query_obj.indexed ? "VUID-vkCmdBeginQueryIndexedEXT-queryPool-03226" : "VUID-vkCmdBeginQuery-queryPool-03226",
"Commandbuffer %s contains more than one performance query pool but "
"performanceCounterMultipleQueryPools is not enabled.",
state_data->report_data->FormatHandle(commandBuffer).c_str());
}
} else {
firstPerfQueryPool = query_obj.pool;
}
return skip;
}
void CoreChecks::EnqueueVerifyBeginQuery(VkCommandBuffer command_buffer, const QueryObject &query_obj, const char *func_name) {
CMD_BUFFER_STATE *cb_state = GetCBState(command_buffer);
// Enqueue the submit time validation here, ahead of the submit time state update in the StateTracker's PostCallRecord
cb_state->queryUpdates.emplace_back([command_buffer, query_obj, func_name](const ValidationStateTracker *device_data,
bool do_validate, VkQueryPool &firstPerfQueryPool,
uint32_t perfPass, QueryMap *localQueryToStateMap) {
if (!do_validate) return false;
bool skip = false;
skip |= ValidatePerformanceQuery(device_data, command_buffer, query_obj, func_name, firstPerfQueryPool, perfPass,
localQueryToStateMap);
skip |= VerifyQueryIsReset(device_data, command_buffer, query_obj, func_name, firstPerfQueryPool, perfPass,
localQueryToStateMap);
return skip;
});
}
void CoreChecks::PreCallRecordCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
if (disabled[query_validation]) return;
QueryObject query_obj = {queryPool, slot};
EnqueueVerifyBeginQuery(commandBuffer, query_obj, "vkCmdBeginQuery()");
}
void CoreChecks::EnqueueVerifyEndQuery(VkCommandBuffer command_buffer, const QueryObject &query_obj) {
CMD_BUFFER_STATE *cb_state = GetCBState(command_buffer);
// Enqueue the submit time validation here, ahead of the submit time state update in the StateTracker's PostCallRecord
cb_state->queryUpdates.emplace_back([command_buffer, query_obj](const ValidationStateTracker *device_data, bool do_validate,
VkQueryPool &firstPerfQueryPool, uint32_t perfPass,
QueryMap *localQueryToStateMap) {
if (!do_validate) return false;
bool skip = false;
const CMD_BUFFER_STATE *cb_state = device_data->Get<CMD_BUFFER_STATE>(command_buffer);
const auto *query_pool_state = device_data->GetQueryPoolState(query_obj.pool);
if (query_pool_state->has_perf_scope_command_buffer && (cb_state->commandCount - 1) != query_obj.endCommandIndex) {
skip |= device_data->LogError(command_buffer, "VUID-vkCmdEndQuery-queryPool-03227",
"vkCmdEndQuery: Query pool %s was created with a counter of scope"
"VK_QUERY_SCOPE_COMMAND_BUFFER_KHR but the end of the query is not the last "
"command in the command buffer %s.",
device_data->report_data->FormatHandle(query_obj.pool).c_str(),
device_data->report_data->FormatHandle(command_buffer).c_str());
}
return skip;
});
}
bool CoreChecks::ValidateCmdEndQuery(const CMD_BUFFER_STATE *cb_state, const QueryObject &query_obj, uint32_t index, CMD_TYPE cmd,
const ValidateEndQueryVuids *vuids) const {
bool skip = false;
const char *cmd_name = CommandTypeString(cmd);
if (!cb_state->activeQueries.count(query_obj)) {
skip |=
LogError(cb_state->commandBuffer(), vuids->vuid_active_queries, "%s: Ending a query before it was started: %s, index %d.",
cmd_name, report_data->FormatHandle(query_obj.pool).c_str(), query_obj.query);
}
const auto *query_pool_state = GetQueryPoolState(query_obj.pool);
const auto &query_pool_ci = query_pool_state->createInfo;
if (query_pool_ci.queryType == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
if (query_pool_state->has_perf_scope_render_pass && cb_state->activeRenderPass) {
skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdEndQuery-queryPool-03228",
"%s: Query pool %s was created with a counter of scope "
"VK_QUERY_SCOPE_RENDER_PASS_KHR but %s is inside a render pass.",
cmd_name, report_data->FormatHandle(query_obj.pool).c_str(), cmd_name);
}
}
skip |= ValidateCmdQueueFlags(cb_state, cmd_name, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, vuids->vuid_queue_flags);
skip |= ValidateCmd(cb_state, cmd);
if (cb_state->unprotected == false) {
skip |= LogError(cb_state->commandBuffer(), vuids->vuid_protected_cb,
"%s: command can't be used in protected command buffers.", cmd_name);
}
return skip;
}
bool CoreChecks::PreCallValidateCmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) const {
if (disabled[query_validation]) return false;
bool skip = false;
QueryObject query_obj = {queryPool, slot};
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
const QUERY_POOL_STATE *query_pool_state = GetQueryPoolState(queryPool);
if (query_pool_state) {
const uint32_t available_query_count = query_pool_state->createInfo.queryCount;
// Only continue validating if the slot is even within range
if (slot >= available_query_count) {
skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdEndQuery-query-00810",
"vkCmdEndQuery(): query index (%u) is greater or equal to the queryPool size (%u).", slot,
available_query_count);
} else {
struct EndQueryVuids : ValidateEndQueryVuids {
EndQueryVuids() : ValidateEndQueryVuids() {
vuid_queue_flags = "VUID-vkCmdEndQuery-commandBuffer-cmdpool";
vuid_active_queries = "VUID-vkCmdEndQuery-None-01923";
vuid_protected_cb = "VUID-vkCmdEndQuery-commandBuffer-01886";
}
};
EndQueryVuids vuids;
skip |= ValidateCmdEndQuery(cb_state, query_obj, 0, CMD_ENDQUERY, &vuids);
}
}
return skip;
}
void CoreChecks::PreCallRecordCmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
if (disabled[query_validation]) return;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
QueryObject query_obj = {queryPool, slot};
query_obj.endCommandIndex = cb_state->commandCount - 1;
EnqueueVerifyEndQuery(commandBuffer, query_obj);
}
bool CoreChecks::ValidateQueryPoolIndex(VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, const char *func_name,
const char *first_vuid, const char *sum_vuid) const {
bool skip = false;
const QUERY_POOL_STATE *query_pool_state = GetQueryPoolState(queryPool);
if (query_pool_state) {
const uint32_t available_query_count = query_pool_state->createInfo.queryCount;
if (firstQuery >= available_query_count) {
skip |= LogError(queryPool, first_vuid,
"%s: In Query %s the firstQuery (%u) is greater or equal to the queryPool size (%u).", func_name,
report_data->FormatHandle(queryPool).c_str(), firstQuery, available_query_count);
}
if ((firstQuery + queryCount) > available_query_count) {
skip |=
LogError(queryPool, sum_vuid,
"%s: In Query %s the sum of firstQuery (%u) + queryCount (%u) is greater than the queryPool size (%u).",
func_name, report_data->FormatHandle(queryPool).c_str(), firstQuery, queryCount, available_query_count);
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
uint32_t queryCount) const {
if (disabled[query_validation]) return false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_RESETQUERYPOOL);
skip |= ValidateQueryPoolIndex(queryPool, firstQuery, queryCount, "VkCmdResetQueryPool()",
"VUID-vkCmdResetQueryPool-firstQuery-00796", "VUID-vkCmdResetQueryPool-firstQuery-00797");
return skip;
}
static QueryResultType GetQueryResultType(QueryState state, VkQueryResultFlags flags) {
switch (state) {
case QUERYSTATE_UNKNOWN:
return QUERYRESULT_UNKNOWN;
case QUERYSTATE_RESET:
case QUERYSTATE_RUNNING:
if (flags & VK_QUERY_RESULT_WAIT_BIT) {
return ((state == QUERYSTATE_RESET) ? QUERYRESULT_WAIT_ON_RESET : QUERYRESULT_WAIT_ON_RUNNING);
} else if ((flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)) {
return QUERYRESULT_SOME_DATA;
} else {
return QUERYRESULT_NO_DATA;
}
case QUERYSTATE_ENDED:
if ((flags & VK_QUERY_RESULT_WAIT_BIT) || (flags & VK_QUERY_RESULT_PARTIAL_BIT) ||
(flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)) {
return QUERYRESULT_SOME_DATA;
} else {
return QUERYRESULT_UNKNOWN;
}
case QUERYSTATE_AVAILABLE:
return QUERYRESULT_SOME_DATA;
}
assert(false);
return QUERYRESULT_UNKNOWN;
}
bool CoreChecks::ValidateCopyQueryPoolResults(const ValidationStateTracker *state_data, VkCommandBuffer commandBuffer,
VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, uint32_t perfPass,
VkQueryResultFlags flags, QueryMap *localQueryToStateMap) {
bool skip = false;
for (uint32_t i = 0; i < queryCount; i++) {
QueryState state = state_data->GetQueryState(localQueryToStateMap, queryPool, firstQuery + i, perfPass);
QueryResultType result_type = GetQueryResultType(state, flags);
if (result_type != QUERYRESULT_SOME_DATA && result_type != QUERYRESULT_UNKNOWN) {
skip |= state_data->LogError(
commandBuffer, kVUID_Core_DrawState_InvalidQuery,
"vkCmdCopyQueryPoolResults(): Requesting a copy from query to buffer on %s query %" PRIu32 ": %s",
state_data->report_data->FormatHandle(queryPool).c_str(), firstQuery + i, string_QueryResultType(result_type));
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset,
VkDeviceSize stride, VkQueryResultFlags flags) const {
if (disabled[query_validation]) return false;
const auto cb_state = GetCBState(commandBuffer);
const auto dst_buff_state = GetBufferState(dstBuffer);
assert(cb_state);
assert(dst_buff_state);
bool skip = ValidateMemoryIsBoundToBuffer(dst_buff_state, "vkCmdCopyQueryPoolResults()",
"VUID-vkCmdCopyQueryPoolResults-dstBuffer-00826");
skip |= ValidateQueryPoolStride("VUID-vkCmdCopyQueryPoolResults-flags-00822", "VUID-vkCmdCopyQueryPoolResults-flags-00823",
stride, "dstOffset", dstOffset, flags);
// Validate that DST buffer has correct usage flags set
skip |= ValidateBufferUsageFlags(dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
"VUID-vkCmdCopyQueryPoolResults-dstBuffer-00825", "vkCmdCopyQueryPoolResults()",
"VK_BUFFER_USAGE_TRANSFER_DST_BIT");
skip |= ValidateCmd(cb_state, CMD_COPYQUERYPOOLRESULTS);
skip |= ValidateQueryPoolIndex(queryPool, firstQuery, queryCount, "vkCmdCopyQueryPoolResults()",
"VUID-vkCmdCopyQueryPoolResults-firstQuery-00820",
"VUID-vkCmdCopyQueryPoolResults-firstQuery-00821");
if (dstOffset >= dst_buff_state->requirements.size) {
skip |= LogError(commandBuffer, "VUID-vkCmdCopyQueryPoolResults-dstOffset-00819",
"vkCmdCopyQueryPoolResults() dstOffset (0x%" PRIxLEAST64 ") is not less than the size (0x%" PRIxLEAST64
") of buffer (%s).",
dstOffset, dst_buff_state->requirements.size, report_data->FormatHandle(dst_buff_state->buffer()).c_str());
} else if (dstOffset + (queryCount * stride) > dst_buff_state->requirements.size) {
skip |=
LogError(commandBuffer, "VUID-vkCmdCopyQueryPoolResults-dstBuffer-00824",
"vkCmdCopyQueryPoolResults() storage required (0x%" PRIxLEAST64
") equal to dstOffset + (queryCount * stride) is greater than the size (0x%" PRIxLEAST64 ") of buffer (%s).",
dstOffset + (queryCount * stride), dst_buff_state->requirements.size,
report_data->FormatHandle(dst_buff_state->buffer()).c_str());
}
auto query_pool_state_iter = queryPoolMap.find(queryPool);
if (query_pool_state_iter != queryPoolMap.end()) {
auto query_pool_state = query_pool_state_iter->second.get();
if (query_pool_state->createInfo.queryType == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
skip |= ValidatePerformanceQueryResults("vkCmdCopyQueryPoolResults", query_pool_state, firstQuery, queryCount, flags);
if (!phys_dev_ext_props.performance_query_props.allowCommandBufferQueryCopies) {
skip |= LogError(commandBuffer, "VUID-vkCmdCopyQueryPoolResults-queryType-03232",
"vkCmdCopyQueryPoolResults called with query pool %s but "
"VkPhysicalDevicePerformanceQueryPropertiesKHR::allowCommandBufferQueryCopies "
"is not set.",
report_data->FormatHandle(queryPool).c_str());
}
}
if ((query_pool_state->createInfo.queryType == VK_QUERY_TYPE_TIMESTAMP) && ((flags & VK_QUERY_RESULT_PARTIAL_BIT) != 0)) {
skip |= LogError(commandBuffer, "VUID-vkCmdCopyQueryPoolResults-queryType-00827",
"vkCmdCopyQueryPoolResults() query pool %s was created with VK_QUERY_TYPE_TIMESTAMP so flags must not "
"contain VK_QUERY_RESULT_PARTIAL_BIT.",
report_data->FormatHandle(queryPool).c_str());
}
if (query_pool_state->createInfo.queryType == VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL) {
skip |= LogError(queryPool, "VUID-vkCmdCopyQueryPoolResults-queryType-02734",
"vkCmdCopyQueryPoolResults() called but QueryPool %s was created with queryType "
"VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL.",
report_data->FormatHandle(queryPool).c_str());
}
if (query_pool_state->createInfo.queryType == VK_QUERY_TYPE_VIDEO_ENCODE_BITSTREAM_BUFFER_RANGE_KHR ||
query_pool_state->createInfo.queryType == VK_QUERY_TYPE_RESULT_STATUS_ONLY_KHR) {
skip |=
LogError(queryPool, "VUID-vkCmdCopyQueryPoolResults-queryType-04812",
"vkCmdCopyQueryPoolResults(): called but QueryPool %s was created with queryType "
"%s.",
report_data->FormatHandle(queryPool).c_str(), string_VkQueryType(query_pool_state->createInfo.queryType));
}
}
return skip;
}
void CoreChecks::PreCallRecordCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset,
VkDeviceSize stride, VkQueryResultFlags flags) {
if (disabled[query_validation]) return;
auto cb_state = GetCBState(commandBuffer);
cb_state->queryUpdates.emplace_back([commandBuffer, queryPool, firstQuery, queryCount, flags](
const ValidationStateTracker *device_data, bool do_validate,
VkQueryPool &firstPerfQueryPool, uint32_t perfPass, QueryMap *localQueryToStateMap) {
if (!do_validate) return false;
return ValidateCopyQueryPoolResults(device_data, commandBuffer, queryPool, firstQuery, queryCount, perfPass, flags,
localQueryToStateMap);
});
}
bool CoreChecks::PreCallValidateCmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout,
VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size,
const void *pValues) const {
bool skip = false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
skip |= ValidateCmd(cb_state, CMD_PUSHCONSTANTS);
// Check if pipeline_layout VkPushConstantRange(s) overlapping offset, size have stageFlags set for each stage in the command
// stageFlags argument, *and* that the command stageFlags argument has bits set for the stageFlags in each overlapping range.
if (!skip) {
const auto &ranges = *GetPipelineLayout(layout)->push_constant_ranges;
VkShaderStageFlags found_stages = 0;
for (const auto &range : ranges) {
if ((offset >= range.offset) && (offset + size <= range.offset + range.size)) {
VkShaderStageFlags matching_stages = range.stageFlags & stageFlags;
if (matching_stages != range.stageFlags) {
skip |=
LogError(commandBuffer, "VUID-vkCmdPushConstants-offset-01796",
"vkCmdPushConstants(): stageFlags (%s, offset (%" PRIu32 "), and size (%" PRIu32
"), must contain all stages in overlapping VkPushConstantRange stageFlags (%s), offset (%" PRIu32
"), and size (%" PRIu32 ") in %s.",
string_VkShaderStageFlags(stageFlags).c_str(), offset, size,
string_VkShaderStageFlags(range.stageFlags).c_str(), range.offset, range.size,
report_data->FormatHandle(layout).c_str());
}
// Accumulate all stages we've found
found_stages = matching_stages | found_stages;
}
}
if (found_stages != stageFlags) {
uint32_t missing_stages = ~found_stages & stageFlags;
skip |= LogError(
commandBuffer, "VUID-vkCmdPushConstants-offset-01795",
"vkCmdPushConstants(): %s, VkPushConstantRange in %s overlapping offset = %d and size = %d, do not contain %s.",
string_VkShaderStageFlags(stageFlags).c_str(), report_data->FormatHandle(layout).c_str(), offset, size,
string_VkShaderStageFlags(missing_stages).c_str());
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
VkQueryPool queryPool, uint32_t slot) const {
if (disabled[query_validation]) return false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_WRITETIMESTAMP);
const QUERY_POOL_STATE *query_pool_state = GetQueryPoolState(queryPool);
if ((query_pool_state != nullptr) && (query_pool_state->createInfo.queryType != VK_QUERY_TYPE_TIMESTAMP)) {
skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdWriteTimestamp-queryPool-01416",
"vkCmdWriteTimestamp(): Query Pool %s was not created with VK_QUERY_TYPE_TIMESTAMP.",
report_data->FormatHandle(queryPool).c_str());
}
const uint32_t timestamp_valid_bits =
GetPhysicalDeviceState()->queue_family_properties[cb_state->command_pool->queueFamilyIndex].timestampValidBits;
if (timestamp_valid_bits == 0) {
skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdWriteTimestamp-timestampValidBits-00829",
"vkCmdWriteTimestamp(): Query Pool %s has a timestampValidBits value of zero.",
report_data->FormatHandle(queryPool).c_str());
}
if ((query_pool_state != nullptr) && (slot >= query_pool_state->createInfo.queryCount)) {
skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdWriteTimestamp-query-04904",
"vkCmdWriteTimestamp(): query (%" PRIu32 ") is not lower than the number of queries (%" PRIu32
") in Query pool %s.",
slot, query_pool_state->createInfo.queryCount, report_data->FormatHandle(queryPool).c_str());
}
return skip;
}
bool CoreChecks::PreCallValidateCmdWriteTimestamp2KHR(VkCommandBuffer commandBuffer, VkPipelineStageFlags2KHR stage,
VkQueryPool queryPool, uint32_t slot) const {
if (disabled[query_validation]) return false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
if (!enabled_features.synchronization2_features.synchronization2) {
skip |= LogError(commandBuffer, "VUID-vkCmdWriteTimestamp2KHR-synchronization2-03858",
"vkCmdWriteTimestamp2KHR(): Synchronization2 feature is not enabled");
}
skip |= ValidateCmd(cb_state, CMD_WRITETIMESTAMP);
Location loc(Func::vkCmdWriteTimestamp2KHR, Field::stage);
if ((stage & (stage - 1)) != 0) {
skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdWriteTimestamp2KHR-stage-03859",
"%s (%s) must only set a single pipeline stage.", loc.Message().c_str(),
string_VkPipelineStageFlags2KHR(stage).c_str());
}
skip |= ValidatePipelineStage(LogObjectList(cb_state->commandBuffer()), loc, cb_state->GetQueueFlags(), stage);
loc.field = Field::queryPool;
const QUERY_POOL_STATE *query_pool_state = GetQueryPoolState(queryPool);
if ((query_pool_state != nullptr) && (query_pool_state->createInfo.queryType != VK_QUERY_TYPE_TIMESTAMP)) {
skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdWriteTimestamp2KHR-queryPool-03861",
"%s Query Pool %s was not created with VK_QUERY_TYPE_TIMESTAMP.", loc.Message().c_str(),
report_data->FormatHandle(queryPool).c_str());
}
if ((query_pool_state != nullptr) && (slot >= query_pool_state->createInfo.queryCount)) {
skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdWriteTimestamp2KHR-query-04903",
"vkCmdWriteTimestamp2KHR(): query (%" PRIu32 ") is not lower than the number of queries (%" PRIu32
") in Query pool %s.",
slot, query_pool_state->createInfo.queryCount, report_data->FormatHandle(queryPool).c_str());
}
const uint32_t timestampValidBits =
GetPhysicalDeviceState()->queue_family_properties[cb_state->command_pool->queueFamilyIndex].timestampValidBits;
if (timestampValidBits == 0) {
skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdWriteTimestamp2KHR-timestampValidBits-03863",
"%s Query Pool %s has a timestampValidBits value of zero.", loc.Message().c_str(),
report_data->FormatHandle(queryPool).c_str());
}
return skip;
}
void CoreChecks::PreCallRecordCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
VkQueryPool queryPool, uint32_t slot) {
if (disabled[query_validation]) return;
// Enqueue the submit time validation check here, before the submit time state update in StateTracker::PostCall...
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
QueryObject query = {queryPool, slot};
const char *func_name = "vkCmdWriteTimestamp()";
cb_state->queryUpdates.emplace_back([commandBuffer, query, func_name](const ValidationStateTracker *device_data,
bool do_validate, VkQueryPool &firstPerfQueryPool,
uint32_t perfPass, QueryMap *localQueryToStateMap) {
if (!do_validate) return false;
return VerifyQueryIsReset(device_data, commandBuffer, query, func_name, firstPerfQueryPool, perfPass, localQueryToStateMap);
});
}
void CoreChecks::PreCallRecordCmdWriteTimestamp2KHR(VkCommandBuffer commandBuffer, VkPipelineStageFlags2KHR pipelineStage,
VkQueryPool queryPool, uint32_t slot) {
if (disabled[query_validation]) return;
// Enqueue the submit time validation check here, before the submit time state update in StateTracker::PostCall...
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
QueryObject query = {queryPool, slot};
const char *func_name = "vkCmdWriteTimestamp()";
cb_state->queryUpdates.emplace_back([commandBuffer, query, func_name](const ValidationStateTracker *device_data,
bool do_validate, VkQueryPool &firstPerfQueryPool,
uint32_t perfPass, QueryMap *localQueryToStateMap) {
if (!do_validate) return false;
return VerifyQueryIsReset(device_data, commandBuffer, query, func_name, firstPerfQueryPool, perfPass, localQueryToStateMap);
});
}
void CoreChecks::PreCallRecordCmdWriteAccelerationStructuresPropertiesKHR(VkCommandBuffer commandBuffer,
uint32_t accelerationStructureCount,
const VkAccelerationStructureKHR *pAccelerationStructures,
VkQueryType queryType, VkQueryPool queryPool,
uint32_t firstQuery) {
if (disabled[query_validation]) return;
// Enqueue the submit time validation check here, before the submit time state update in StateTracker::PostCall...
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
const char *func_name = "vkCmdWriteAccelerationStructuresPropertiesKHR()";
cb_state->queryUpdates.emplace_back([accelerationStructureCount, commandBuffer, firstQuery, func_name, queryPool](
const ValidationStateTracker *device_data, bool do_validate,
VkQueryPool &firstPerfQueryPool, uint32_t perfPass, QueryMap *localQueryToStateMap) {
if (!do_validate) return false;
bool skip = false;
for (uint32_t i = 0; i < accelerationStructureCount; i++) {
QueryObject query = {{queryPool, firstQuery + i}, perfPass};
skip |= VerifyQueryIsReset(device_data, commandBuffer, query, func_name, firstPerfQueryPool, perfPass,
localQueryToStateMap);
}
return skip;
});
}
bool CoreChecks::MatchUsage(uint32_t count, const VkAttachmentReference2 *attachments, const VkFramebufferCreateInfo *fbci,
VkImageUsageFlagBits usage_flag, const char *error_code) const {
bool skip = false;
if (attachments) {
for (uint32_t attach = 0; attach < count; attach++) {
if (attachments[attach].attachment != VK_ATTACHMENT_UNUSED) {
// Attachment counts are verified elsewhere, but prevent an invalid access
if (attachments[attach].attachment < fbci->attachmentCount) {
if ((fbci->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT) == 0) {
const VkImageView *image_view = &fbci->pAttachments[attachments[attach].attachment];
auto view_state = GetImageViewState(*image_view);
if (view_state) {
const VkImageCreateInfo *ici = &GetImageState(view_state->create_info.image)->createInfo;
if (ici != nullptr) {
auto creation_usage = ici->usage;
const auto stencil_usage_info = LvlFindInChain<VkImageStencilUsageCreateInfo>(ici->pNext);
if (stencil_usage_info) {
creation_usage |= stencil_usage_info->stencilUsage;
}
if ((creation_usage & usage_flag) == 0) {
skip |= LogError(device, error_code,
"vkCreateFramebuffer: Framebuffer Attachment (%d) conflicts with the image's "
"IMAGE_USAGE flags (%s).",
attachments[attach].attachment, string_VkImageUsageFlagBits(usage_flag));
}
}
}
} else {
const VkFramebufferAttachmentsCreateInfo *fbaci =
LvlFindInChain<VkFramebufferAttachmentsCreateInfo>(fbci->pNext);
if (fbaci != nullptr && fbaci->pAttachmentImageInfos != nullptr &&
fbaci->attachmentImageInfoCount > attachments[attach].attachment) {
uint32_t image_usage = fbaci->pAttachmentImageInfos[attachments[attach].attachment].usage;
if ((image_usage & usage_flag) == 0) {
skip |=
LogError(device, error_code,
"vkCreateFramebuffer: Framebuffer attachment info (%d) conflicts with the image's "
"IMAGE_USAGE flags (%s).",
attachments[attach].attachment, string_VkImageUsageFlagBits(usage_flag));
}
}
}
}
}
}
}
return skip;
}
bool CoreChecks::ValidateFramebufferCreateInfo(const VkFramebufferCreateInfo *pCreateInfo) const {
bool skip = false;
const VkFramebufferAttachmentsCreateInfo *framebuffer_attachments_create_info =
LvlFindInChain<VkFramebufferAttachmentsCreateInfo>(pCreateInfo->pNext);
if ((pCreateInfo->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT) != 0) {
if (!enabled_features.core12.imagelessFramebuffer) {
skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-03189",
"vkCreateFramebuffer(): VkFramebufferCreateInfo flags includes VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT, "
"but the imagelessFramebuffer feature is not enabled.");
}
if (framebuffer_attachments_create_info == nullptr) {
skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-03190",
"vkCreateFramebuffer(): VkFramebufferCreateInfo flags includes VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT, "
"but no instance of VkFramebufferAttachmentsCreateInfo is present in the pNext chain.");
} else {
if (framebuffer_attachments_create_info->attachmentImageInfoCount != 0 &&
framebuffer_attachments_create_info->attachmentImageInfoCount != pCreateInfo->attachmentCount) {
skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-03191",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachmentCount is %u, but "
"VkFramebufferAttachmentsCreateInfo attachmentImageInfoCount is %u.",
pCreateInfo->attachmentCount, framebuffer_attachments_create_info->attachmentImageInfoCount);
}
}
}
if (framebuffer_attachments_create_info) {
for (uint32_t i = 0; i < framebuffer_attachments_create_info->attachmentImageInfoCount; ++i) {
if (framebuffer_attachments_create_info->pAttachmentImageInfos[i].pNext != nullptr) {
skip |= LogError(device, "VUID-VkFramebufferAttachmentImageInfo-pNext-pNext",
"vkCreateFramebuffer(): VkFramebufferAttachmentsCreateInfo[%" PRIu32 "].pNext is not NULL.", i);
}
}
}
auto rp_state = GetRenderPassState(pCreateInfo->renderPass);
if (rp_state) {
const VkRenderPassCreateInfo2 *rpci = rp_state->createInfo.ptr();
if (rpci->attachmentCount != pCreateInfo->attachmentCount) {
skip |= LogError(pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-attachmentCount-00876",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachmentCount of %u does not match attachmentCount "
"of %u of %s being used to create Framebuffer.",
pCreateInfo->attachmentCount, rpci->attachmentCount,
report_data->FormatHandle(pCreateInfo->renderPass).c_str());
} else {
// attachmentCounts match, so make sure corresponding attachment details line up
if ((pCreateInfo->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT) == 0) {
const VkImageView *image_views = pCreateInfo->pAttachments;
for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
auto view_state = GetImageViewState(image_views[i]);
if (view_state == nullptr) {
skip |= LogError(
image_views[i], "VUID-VkFramebufferCreateInfo-flags-02778",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u is not a valid VkImageView.", i);
} else {
auto &ivci = view_state->create_info;
auto &subresource_range = view_state->normalized_subresource_range;
if (ivci.format != rpci->pAttachments[i].format) {
skip |= LogError(
pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-pAttachments-00880",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has format of %s that does not "
"match the format of %s used by the corresponding attachment for %s.",
i, string_VkFormat(ivci.format), string_VkFormat(rpci->pAttachments[i].format),
report_data->FormatHandle(pCreateInfo->renderPass).c_str());
}
const VkImageCreateInfo *ici = &GetImageState(ivci.image)->createInfo;
if (ici->samples != rpci->pAttachments[i].samples) {
skip |=
LogError(pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-pAttachments-00881",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has %s samples that do not "
"match the %s "
"samples used by the corresponding attachment for %s.",
i, string_VkSampleCountFlagBits(ici->samples),
string_VkSampleCountFlagBits(rpci->pAttachments[i].samples),
report_data->FormatHandle(pCreateInfo->renderPass).c_str());
}
// Verify that image memory is valid
auto image_data = GetImageState(ivci.image);
skip |= ValidateMemoryIsBoundToImage(image_data, "vkCreateFramebuffer()",
kVUID_Core_Bound_Resource_FreedMemoryAccess);
// Verify that view only has a single mip level
if (subresource_range.levelCount != 1) {
skip |= LogError(
device, "VUID-VkFramebufferCreateInfo-pAttachments-00883",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has mip levelCount of %u but "
"only a single mip level (levelCount == 1) is allowed when creating a Framebuffer.",
i, subresource_range.levelCount);
}
const uint32_t mip_level = subresource_range.baseMipLevel;
uint32_t mip_width = max(1u, ici->extent.width >> mip_level);
uint32_t mip_height = max(1u, ici->extent.height >> mip_level);
bool used_as_input_color_resolve_depth_stencil_attachment = false;
bool used_as_fragment_shading_rate_attachment = false;
bool fsr_non_zero_viewmasks = false;
for (uint32_t j = 0; j < rpci->subpassCount; ++j) {
const VkSubpassDescription2 &subpass = rpci->pSubpasses[j];
uint32_t highest_view_bit = 0;
for (uint32_t k = 0; k < 32; ++k) {
if (((subpass.viewMask >> k) & 1) != 0) {
highest_view_bit = k;
}
}
for (uint32_t k = 0; k < rpci->pSubpasses[j].inputAttachmentCount; ++k) {
if (subpass.pInputAttachments[k].attachment == i) {
used_as_input_color_resolve_depth_stencil_attachment = true;
break;
}
}
for (uint32_t k = 0; k < rpci->pSubpasses[j].colorAttachmentCount; ++k) {
if (subpass.pColorAttachments[k].attachment == i ||
(subpass.pResolveAttachments && subpass.pResolveAttachments[k].attachment == i)) {
used_as_input_color_resolve_depth_stencil_attachment = true;
break;
}
}
if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment == i) {
used_as_input_color_resolve_depth_stencil_attachment = true;
}
if (used_as_input_color_resolve_depth_stencil_attachment) {
if (subresource_range.layerCount <= highest_view_bit) {
skip |= LogError(
device, "VUID-VkFramebufferCreateInfo-renderPass-04536",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has a layer count (%u) "
"less than or equal to the highest bit in the view mask (%u) of subpass %u.",
i, subresource_range.layerCount, highest_view_bit, j);
}
}
if (enabled_features.fragment_shading_rate_features.attachmentFragmentShadingRate) {
const VkFragmentShadingRateAttachmentInfoKHR *fsr_attachment;
fsr_attachment = LvlFindInChain<VkFragmentShadingRateAttachmentInfoKHR>(subpass.pNext);
if (fsr_attachment && fsr_attachment->pFragmentShadingRateAttachment &&
fsr_attachment->pFragmentShadingRateAttachment->attachment == i) {
used_as_fragment_shading_rate_attachment = true;
if ((mip_width * fsr_attachment->shadingRateAttachmentTexelSize.width) < pCreateInfo->width) {
skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-04539",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level "
"%u is used as a "
"fragment shading rate attachment in subpass %u, but the product of its "
"width (%u) and the "
"specified shading rate texel width (%u) are smaller than the "
"corresponding framebuffer width (%u).",
i, subresource_range.baseMipLevel, j, mip_width,
fsr_attachment->shadingRateAttachmentTexelSize.width, pCreateInfo->width);
}
if ((mip_height * fsr_attachment->shadingRateAttachmentTexelSize.height) <
pCreateInfo->height) {
skip |=
LogError(device, "VUID-VkFramebufferCreateInfo-flags-04540",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u "
"is used as a "
"fragment shading rate attachment in subpass %u, but the product of its "
"height (%u) and the "
"specified shading rate texel height (%u) are smaller than the corresponding "
"framebuffer height (%u).",
i, subresource_range.baseMipLevel, j, mip_height,
fsr_attachment->shadingRateAttachmentTexelSize.height, pCreateInfo->height);
}
if (highest_view_bit != 0) {
fsr_non_zero_viewmasks = true;
}
if (subresource_range.layerCount <= highest_view_bit) {
skip |= LogError(
device, "VUID-VkFramebufferCreateInfo-flags-04537",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has a layer count (%u) "
"less than or equal to the highest bit in the view mask (%u) of subpass %u.",
i, subresource_range.layerCount, highest_view_bit, j);
}
}
}
}
if (enabled_features.fragment_density_map_features.fragmentDensityMap) {
const VkRenderPassFragmentDensityMapCreateInfoEXT *fdm_attachment;
fdm_attachment = LvlFindInChain<VkRenderPassFragmentDensityMapCreateInfoEXT>(rpci->pNext);
if (fdm_attachment && fdm_attachment->fragmentDensityMapAttachment.attachment == i) {
uint32_t ceiling_width = static_cast<uint32_t>(ceil(
static_cast<float>(pCreateInfo->width) /
std::max(static_cast<float>(
phys_dev_ext_props.fragment_density_map_props.maxFragmentDensityTexelSize.width),
1.0f)));
if (mip_width < ceiling_width) {
skip |= LogError(
device, "VUID-VkFramebufferCreateInfo-pAttachments-02555",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has width "
"smaller than the corresponding the ceiling of framebuffer width / "
"maxFragmentDensityTexelSize.width "
"Here are the respective dimensions for attachment #%u, the ceiling value:\n "
"attachment #%u, framebuffer:\n"
"width: %u, the ceiling value: %u\n",
i, subresource_range.baseMipLevel, i, i, mip_width, ceiling_width);
}
uint32_t ceiling_height = static_cast<uint32_t>(ceil(
static_cast<float>(pCreateInfo->height) /
std::max(static_cast<float>(
phys_dev_ext_props.fragment_density_map_props.maxFragmentDensityTexelSize.height),
1.0f)));
if (mip_height < ceiling_height) {
skip |= LogError(
device, "VUID-VkFramebufferCreateInfo-pAttachments-02556",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has height "
"smaller than the corresponding the ceiling of framebuffer height / "
"maxFragmentDensityTexelSize.height "
"Here are the respective dimensions for attachment #%u, the ceiling value:\n "
"attachment #%u, framebuffer:\n"
"height: %u, the ceiling value: %u\n",
i, subresource_range.baseMipLevel, i, i, mip_height, ceiling_height);
}
if (view_state->normalized_subresource_range.layerCount != 1) {
skip |= LogError(device, "VUID-VkFramebufferCreateInfo-pAttachments-02744",
"vkCreateFramebuffer(): pCreateInfo->pAttachments[%" PRIu32
"] is referenced by "
"VkRenderPassFragmentDensityMapCreateInfoEXT::fragmentDensityMapAttachment in "
"the pNext chain, but it was create with subresourceRange.layerCount (%" PRIu32
") different from 1.",
i, view_state->normalized_subresource_range.layerCount);
}
}
}
if (used_as_input_color_resolve_depth_stencil_attachment) {
if (mip_width < pCreateInfo->width) {
skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-04533",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has "
"width (%u) smaller than the corresponding framebuffer width (%u).",
i, mip_level, mip_width, pCreateInfo->width);
}
if (mip_height < pCreateInfo->height) {
skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-04534",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has "
"height (%u) smaller than the corresponding framebuffer height (%u).",
i, mip_level, mip_height, pCreateInfo->height);
}
uint32_t layerCount = view_state->create_info.subresourceRange.layerCount == VK_REMAINING_ARRAY_LAYERS
? GetImageState(ivci.image)->createInfo.arrayLayers
: view_state->create_info.subresourceRange.layerCount;
if (layerCount < pCreateInfo->layers) {
skip |=
LogError(device, "VUID-VkFramebufferCreateInfo-flags-04535",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has a layer count (%u) "
"smaller than the corresponding framebuffer layer count (%u).",
i, layerCount, pCreateInfo->layers);
}
}
if (used_as_fragment_shading_rate_attachment && !fsr_non_zero_viewmasks) {
if (subresource_range.layerCount != 1 && subresource_range.layerCount < pCreateInfo->layers) {
skip |=
LogError(device, "VUID-VkFramebufferCreateInfo-flags-04538",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has a layer count (%u) "
"smaller than the corresponding framebuffer layer count (%u).",
i, subresource_range.layerCount, pCreateInfo->layers);
}
}
if (IsIdentitySwizzle(ivci.components) == false) {
skip |= LogError(
device, "VUID-VkFramebufferCreateInfo-pAttachments-00884",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has non-identy swizzle. All "
"framebuffer attachments must have been created with the identity swizzle. Here are the actual "
"swizzle values:\n"
"r swizzle = %s\n"
"g swizzle = %s\n"
"b swizzle = %s\n"
"a swizzle = %s\n",
i, string_VkComponentSwizzle(ivci.components.r), string_VkComponentSwizzle(ivci.components.g),
string_VkComponentSwizzle(ivci.components.b), string_VkComponentSwizzle(ivci.components.a));
}
if ((ivci.viewType == VK_IMAGE_VIEW_TYPE_2D) || (ivci.viewType == VK_IMAGE_VIEW_TYPE_2D)) {
const auto image_state = GetImageState(ivci.image);
if (image_state->createInfo.imageType == VK_IMAGE_TYPE_3D) {
if (FormatIsDepthOrStencil(ivci.format)) {
LogObjectList objlist(device);
objlist.add(ivci.image);
skip |= LogError(
objlist, "VUID-VkFramebufferCreateInfo-pAttachments-00891",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has an image view type of "
"%s "
"which was taken from image %s of type VK_IMAGE_TYPE_3D, but the image view format is a "
"depth/stencil format %s",
i, string_VkImageViewType(ivci.viewType), report_data->FormatHandle(ivci.image).c_str(),
string_VkFormat(ivci.format));
}
}
}
if (ivci.viewType == VK_IMAGE_VIEW_TYPE_3D) {
LogObjectList objlist(device);
objlist.add(image_views[i]);
skip |= LogError(objlist, "VUID-VkFramebufferCreateInfo-flags-04113",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has an image view type "
"of VK_IMAGE_VIEW_TYPE_3D",
i);
}
}
}
} else if (framebuffer_attachments_create_info) {
// VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT is set
for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
auto &aii = framebuffer_attachments_create_info->pAttachmentImageInfos[i];
bool format_found = false;
for (uint32_t j = 0; j < aii.viewFormatCount; ++j) {
if (aii.pViewFormats[j] == rpci->pAttachments[i].format) {
format_found = true;
}
}
if (!format_found) {
skip |= LogError(pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-flags-03205",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info #%u does not include "
"format %s used "
"by the corresponding attachment for renderPass (%s).",
i, string_VkFormat(rpci->pAttachments[i].format),
report_data->FormatHandle(pCreateInfo->renderPass).c_str());
}
bool used_as_input_color_resolve_depth_stencil_attachment = false;
bool used_as_fragment_shading_rate_attachment = false;
bool fsr_non_zero_viewmasks = false;
for (uint32_t j = 0; j < rpci->subpassCount; ++j) {
const VkSubpassDescription2 &subpass = rpci->pSubpasses[j];
uint32_t highest_view_bit = 0;
for (int k = 0; k < 32; ++k) {
if (((subpass.viewMask >> k) & 1) != 0) {
highest_view_bit = k;
}
}
for (uint32_t k = 0; k < rpci->pSubpasses[j].inputAttachmentCount; ++k) {
if (subpass.pInputAttachments[k].attachment == i) {
used_as_input_color_resolve_depth_stencil_attachment = true;
break;
}
}
for (uint32_t k = 0; k < rpci->pSubpasses[j].colorAttachmentCount; ++k) {
if (subpass.pColorAttachments[k].attachment == i ||
(subpass.pResolveAttachments && subpass.pResolveAttachments[k].attachment == i)) {
used_as_input_color_resolve_depth_stencil_attachment = true;
break;
}
}
if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment == i) {
used_as_input_color_resolve_depth_stencil_attachment = true;
}
if (enabled_features.fragment_shading_rate_features.attachmentFragmentShadingRate) {
const VkFragmentShadingRateAttachmentInfoKHR *fsr_attachment;
fsr_attachment = LvlFindInChain<VkFragmentShadingRateAttachmentInfoKHR>(subpass.pNext);
if (fsr_attachment && fsr_attachment->pFragmentShadingRateAttachment->attachment == i) {
used_as_fragment_shading_rate_attachment = true;
if ((aii.width * fsr_attachment->shadingRateAttachmentTexelSize.width) < pCreateInfo->width) {
skip |= LogError(
device, "VUID-VkFramebufferCreateInfo-flags-04543",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u is used as a "
"fragment shading rate attachment in subpass %u, but the product of its width (%u) and the "
"specified shading rate texel width (%u) are smaller than the corresponding framebuffer "
"width (%u).",
i, j, aii.width, fsr_attachment->shadingRateAttachmentTexelSize.width, pCreateInfo->width);
}
if ((aii.height * fsr_attachment->shadingRateAttachmentTexelSize.height) < pCreateInfo->height) {
skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-04544",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u is used as a "
"fragment shading rate attachment in subpass %u, but the product of its "
"height (%u) and the "
"specified shading rate texel height (%u) are smaller than the corresponding "
"framebuffer height (%u).",
i, j, aii.height, fsr_attachment->shadingRateAttachmentTexelSize.height,
pCreateInfo->height);
}
if (highest_view_bit != 0) {
fsr_non_zero_viewmasks = true;
}
if (aii.layerCount != 1 && aii.layerCount <= highest_view_bit) {
skip |= LogError(
device, kVUIDUndefined,
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has a layer count (%u) "
"less than or equal to the highest bit in the view mask (%u) of subpass %u.",
i, aii.layerCount, highest_view_bit, j);
}
}
}
}
if (used_as_input_color_resolve_depth_stencil_attachment) {
if (aii.width < pCreateInfo->width) {
skip |= LogError(
device, "VUID-VkFramebufferCreateInfo-flags-04541",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info #%u has a width of only #%u, "
"but framebuffer has a width of #%u.",
i, aii.width, pCreateInfo->width);
}
if (aii.height < pCreateInfo->height) {
skip |= LogError(
device, "VUID-VkFramebufferCreateInfo-flags-04542",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info #%u has a height of only #%u, "
"but framebuffer has a height of #%u.",
i, aii.height, pCreateInfo->height);
}
const char *mismatched_layers_no_multiview_vuid = IsExtEnabled(device_extensions.vk_khr_multiview)
? "VUID-VkFramebufferCreateInfo-renderPass-04546"
: "VUID-VkFramebufferCreateInfo-flags-04547";
if ((rpci->subpassCount == 0) || (rpci->pSubpasses[0].viewMask == 0)) {
if (aii.layerCount < pCreateInfo->layers) {
skip |= LogError(
device, mismatched_layers_no_multiview_vuid,
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info #%u has only #%u layers, "
"but framebuffer has #%u layers.",
i, aii.layerCount, pCreateInfo->layers);
}
}
}
if (used_as_fragment_shading_rate_attachment && !fsr_non_zero_viewmasks) {
if (aii.layerCount != 1 && aii.layerCount < pCreateInfo->layers) {
skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-04545",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has a layer count (%u) "
"smaller than the corresponding framebuffer layer count (%u).",
i, aii.layerCount, pCreateInfo->layers);
}
}
}
// Validate image usage
uint32_t attachment_index = VK_ATTACHMENT_UNUSED;
for (uint32_t i = 0; i < rpci->subpassCount; ++i) {
skip |= MatchUsage(rpci->pSubpasses[i].colorAttachmentCount, rpci->pSubpasses[i].pColorAttachments, pCreateInfo,
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-flags-03201");
skip |=
MatchUsage(rpci->pSubpasses[i].colorAttachmentCount, rpci->pSubpasses[i].pResolveAttachments, pCreateInfo,
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-flags-03201");
skip |= MatchUsage(1, rpci->pSubpasses[i].pDepthStencilAttachment, pCreateInfo,
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-flags-03202");
skip |= MatchUsage(rpci->pSubpasses[i].inputAttachmentCount, rpci->pSubpasses[i].pInputAttachments, pCreateInfo,
VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-flags-03204");
const VkSubpassDescriptionDepthStencilResolve *depth_stencil_resolve =
LvlFindInChain<VkSubpassDescriptionDepthStencilResolve>(rpci->pSubpasses[i].pNext);
if (IsExtEnabled(device_extensions.vk_khr_depth_stencil_resolve) && depth_stencil_resolve != nullptr) {
skip |= MatchUsage(1, depth_stencil_resolve->pDepthStencilResolveAttachment, pCreateInfo,
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-flags-03203");
}
const VkFragmentShadingRateAttachmentInfoKHR *fragment_shading_rate_attachment_info =
LvlFindInChain<VkFragmentShadingRateAttachmentInfoKHR>(rpci->pSubpasses[i].pNext);
if (enabled_features.fragment_shading_rate_features.attachmentFragmentShadingRate &&
fragment_shading_rate_attachment_info != nullptr) {
skip |= MatchUsage(1, fragment_shading_rate_attachment_info->pFragmentShadingRateAttachment, pCreateInfo,
VK_IMAGE_USAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR,
"VUID-VkFramebufferCreateInfo-flags-04549");
}
}
if (IsExtEnabled(device_extensions.vk_khr_multiview)) {
if ((rpci->subpassCount > 0) && (rpci->pSubpasses[0].viewMask != 0)) {
for (uint32_t i = 0; i < rpci->subpassCount; ++i) {
const VkSubpassDescriptionDepthStencilResolve *depth_stencil_resolve =
LvlFindInChain<VkSubpassDescriptionDepthStencilResolve>(rpci->pSubpasses[i].pNext);
uint32_t view_bits = rpci->pSubpasses[i].viewMask;
uint32_t highest_view_bit = 0;
for (int j = 0; j < 32; ++j) {
if (((view_bits >> j) & 1) != 0) {
highest_view_bit = j;
}
}
for (uint32_t j = 0; j < rpci->pSubpasses[i].colorAttachmentCount; ++j) {
attachment_index = rpci->pSubpasses[i].pColorAttachments[j].attachment;
if (attachment_index != VK_ATTACHMENT_UNUSED) {
uint32_t layer_count =
framebuffer_attachments_create_info->pAttachmentImageInfos[attachment_index].layerCount;
if (layer_count <= highest_view_bit) {
skip |= LogError(
pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-renderPass-03198",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info %u "
"only specifies %u layers, but the view mask for subpass %u in renderPass (%s) "
"includes layer %u, with that attachment specified as a color attachment %u.",
attachment_index, layer_count, i,
report_data->FormatHandle(pCreateInfo->renderPass).c_str(), highest_view_bit, j);
}
}
if (rpci->pSubpasses[i].pResolveAttachments) {
attachment_index = rpci->pSubpasses[i].pResolveAttachments[j].attachment;
if (attachment_index != VK_ATTACHMENT_UNUSED) {
uint32_t layer_count =
framebuffer_attachments_create_info->pAttachmentImageInfos[attachment_index].layerCount;
if (layer_count <= highest_view_bit) {
skip |= LogError(
pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-renderPass-03198",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info %u "
"only specifies %u layers, but the view mask for subpass %u in renderPass (%s) "
"includes layer %u, with that attachment specified as a resolve attachment %u.",
attachment_index, layer_count, i,
report_data->FormatHandle(pCreateInfo->renderPass).c_str(), highest_view_bit, j);
}
}
}
}
for (uint32_t j = 0; j < rpci->pSubpasses[i].inputAttachmentCount; ++j) {
attachment_index = rpci->pSubpasses[i].pInputAttachments[j].attachment;
if (attachment_index != VK_ATTACHMENT_UNUSED) {
uint32_t layer_count =
framebuffer_attachments_create_info->pAttachmentImageInfos[attachment_index].layerCount;
if (layer_count <= highest_view_bit) {
skip |= LogError(
pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-renderPass-03198",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info %u "
"only specifies %u layers, but the view mask for subpass %u in renderPass (%s) "
"includes layer %u, with that attachment specified as an input attachment %u.",
attachment_index, layer_count, i,
report_data->FormatHandle(pCreateInfo->renderPass).c_str(), highest_view_bit, j);
}
}
}
if (rpci->pSubpasses[i].pDepthStencilAttachment != nullptr) {
attachment_index = rpci->pSubpasses[i].pDepthStencilAttachment->attachment;
if (attachment_index != VK_ATTACHMENT_UNUSED) {
uint32_t layer_count =
framebuffer_attachments_create_info->pAttachmentImageInfos[attachment_index].layerCount;
if (layer_count <= highest_view_bit) {
skip |= LogError(
pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-renderPass-03198",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info %u "
"only specifies %u layers, but the view mask for subpass %u in renderPass (%s) "
"includes layer %u, with that attachment specified as a depth/stencil attachment.",
attachment_index, layer_count, i,
report_data->FormatHandle(pCreateInfo->renderPass).c_str(), highest_view_bit);
}
}
if (IsExtEnabled(device_extensions.vk_khr_depth_stencil_resolve) &&
depth_stencil_resolve != nullptr &&
depth_stencil_resolve->pDepthStencilResolveAttachment != nullptr) {
attachment_index = depth_stencil_resolve->pDepthStencilResolveAttachment->attachment;
if (attachment_index != VK_ATTACHMENT_UNUSED) {
uint32_t layer_count =
framebuffer_attachments_create_info->pAttachmentImageInfos[attachment_index].layerCount;
if (layer_count <= highest_view_bit) {
skip |= LogError(
pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-renderPass-03198",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info %u "
"only specifies %u layers, but the view mask for subpass %u in renderPass (%s) "
"includes layer %u, with that attachment specified as a depth/stencil resolve "
"attachment.",
attachment_index, layer_count, i,
report_data->FormatHandle(pCreateInfo->renderPass).c_str(), highest_view_bit);
}
}
}
}
}
}
}
}
if ((pCreateInfo->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT) == 0) {
// Verify correct attachment usage flags
for (uint32_t subpass = 0; subpass < rpci->subpassCount; subpass++) {
const VkSubpassDescription2 &subpass_description = rpci->pSubpasses[subpass];
// Verify input attachments:
skip |= MatchUsage(subpass_description.inputAttachmentCount, subpass_description.pInputAttachments, pCreateInfo,
VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-pAttachments-00879");
// Verify color attachments:
skip |= MatchUsage(subpass_description.colorAttachmentCount, subpass_description.pColorAttachments, pCreateInfo,
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-pAttachments-00877");
// Verify depth/stencil attachments:
skip |=
MatchUsage(1, subpass_description.pDepthStencilAttachment, pCreateInfo,
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-pAttachments-02633");
// Verify depth/stecnil resolve
if (IsExtEnabled(device_extensions.vk_khr_depth_stencil_resolve)) {
const VkSubpassDescriptionDepthStencilResolve *ds_resolve =
LvlFindInChain<VkSubpassDescriptionDepthStencilResolve>(subpass_description.pNext);
if (ds_resolve) {
skip |= MatchUsage(1, ds_resolve->pDepthStencilResolveAttachment, pCreateInfo,
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT,
"VUID-VkFramebufferCreateInfo-pAttachments-02634");
}
}
// Verify fragment shading rate attachments
if (enabled_features.fragment_shading_rate_features.attachmentFragmentShadingRate) {
const VkFragmentShadingRateAttachmentInfoKHR *fragment_shading_rate_attachment_info =
LvlFindInChain<VkFragmentShadingRateAttachmentInfoKHR>(subpass_description.pNext);
if (fragment_shading_rate_attachment_info) {
skip |= MatchUsage(1, fragment_shading_rate_attachment_info->pFragmentShadingRateAttachment,
pCreateInfo, VK_IMAGE_USAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR,
"VUID-VkFramebufferCreateInfo-flags-04548");
}
}
}
}
bool b_has_non_zero_view_masks = false;
for (uint32_t i = 0; i < rpci->subpassCount; ++i) {
if (rpci->pSubpasses[i].viewMask != 0) {
b_has_non_zero_view_masks = true;
break;
}
}
if (b_has_non_zero_view_masks && pCreateInfo->layers != 1) {
skip |= LogError(pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-renderPass-02531",
"vkCreateFramebuffer(): VkFramebufferCreateInfo has #%u layers but "
"renderPass (%s) was specified with non-zero view masks\n",
pCreateInfo->layers, report_data->FormatHandle(pCreateInfo->renderPass).c_str());
}
}
}
// Verify FB dimensions are within physical device limits
if (pCreateInfo->width > phys_dev_props.limits.maxFramebufferWidth) {
skip |= LogError(device, "VUID-VkFramebufferCreateInfo-width-00886",
"vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width exceeds physical device limits. Requested "
"width: %u, device max: %u\n",
pCreateInfo->width, phys_dev_props.limits.maxFramebufferWidth);
}
if (pCreateInfo->height > phys_dev_props.limits.maxFramebufferHeight) {
skip |=
LogError(device, "VUID-VkFramebufferCreateInfo-height-00888",
"vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height exceeds physical device limits. Requested "
"height: %u, device max: %u\n",
pCreateInfo->height, phys_dev_props.limits.maxFramebufferHeight);
}
if (pCreateInfo->layers > phys_dev_props.limits.maxFramebufferLayers) {
skip |=
LogError(device, "VUID-VkFramebufferCreateInfo-layers-00890",
"vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers exceeds physical device limits. Requested "
"layers: %u, device max: %u\n",
pCreateInfo->layers, phys_dev_props.limits.maxFramebufferLayers);
}
// Verify FB dimensions are greater than zero
if (pCreateInfo->width <= 0) {
skip |= LogError(device, "VUID-VkFramebufferCreateInfo-width-00885",
"vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width must be greater than zero.");
}
if (pCreateInfo->height <= 0) {
skip |= LogError(device, "VUID-VkFramebufferCreateInfo-height-00887",
"vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height must be greater than zero.");
}
if (pCreateInfo->layers <= 0) {
skip |= LogError(device, "VUID-VkFramebufferCreateInfo-layers-00889",
"vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers must be greater than zero.");
}
return skip;
}
bool CoreChecks::PreCallValidateCreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkFramebuffer *pFramebuffer) const {
// TODO : Verify that renderPass FB is created with is compatible with FB
bool skip = false;
skip |= ValidateFramebufferCreateInfo(pCreateInfo);
return skip;
}
static bool FindDependency(const uint32_t index, const uint32_t dependent, const std::vector<DAGNode> &subpass_to_node,
layer_data::unordered_set<uint32_t> &processed_nodes) {
// If we have already checked this node we have not found a dependency path so return false.
if (processed_nodes.count(index)) return false;
processed_nodes.insert(index);
const DAGNode &node = subpass_to_node[index];
// Look for a dependency path. If one exists return true else recurse on the previous nodes.
if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) {
for (auto elem : node.prev) {
if (FindDependency(elem, dependent, subpass_to_node, processed_nodes)) return true;
}
} else {
return true;
}
return false;
}
bool CoreChecks::IsImageLayoutReadOnly(VkImageLayout layout) const {
if ((layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) || (layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) ||
(layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL) ||
(layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL)) {
return true;
}
return false;
}
bool CoreChecks::CheckDependencyExists(const VkRenderPass renderpass, const uint32_t subpass, const VkImageLayout layout,
const std::vector<SubpassLayout> &dependent_subpasses,
const std::vector<DAGNode> &subpass_to_node, bool &skip) const {
bool result = true;
bool b_image_layout_read_only = IsImageLayoutReadOnly(layout);
// Loop through all subpasses that share the same attachment and make sure a dependency exists
for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
const SubpassLayout &sp = dependent_subpasses[k];
if (subpass == sp.index) continue;
if (b_image_layout_read_only && IsImageLayoutReadOnly(sp.layout)) continue;
const DAGNode &node = subpass_to_node[subpass];
// Check for a specified dependency between the two nodes. If one exists we are done.
auto prev_elem = std::find(node.prev.begin(), node.prev.end(), sp.index);
auto next_elem = std::find(node.next.begin(), node.next.end(), sp.index);
if (prev_elem == node.prev.end() && next_elem == node.next.end()) {
// If no dependency exits an implicit dependency still might. If not, throw an error.
layer_data::unordered_set<uint32_t> processed_nodes;
if (!(FindDependency(subpass, sp.index, subpass_to_node, processed_nodes) ||
FindDependency(sp.index, subpass, subpass_to_node, processed_nodes))) {
skip |=
LogError(renderpass, kVUID_Core_DrawState_InvalidRenderpass,
"A dependency between subpasses %d and %d must exist but one is not specified.", subpass, sp.index);
result = false;
}
}
}
return result;
}
bool CoreChecks::CheckPreserved(const VkRenderPass renderpass, const VkRenderPassCreateInfo2 *pCreateInfo, const int index,
const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth,
bool &skip) const {
const DAGNode &node = subpass_to_node[index];
// If this node writes to the attachment return true as next nodes need to preserve the attachment.
const VkSubpassDescription2 &subpass = pCreateInfo->pSubpasses[index];
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
if (attachment == subpass.pColorAttachments[j].attachment) return true;
}
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
if (attachment == subpass.pInputAttachments[j].attachment) return true;
}
if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
if (attachment == subpass.pDepthStencilAttachment->attachment) return true;
}
bool result = false;
// Loop through previous nodes and see if any of them write to the attachment.
for (auto elem : node.prev) {
result |= CheckPreserved(renderpass, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip);
}
// If the attachment was written to by a previous node than this node needs to preserve it.
if (result && depth > 0) {
bool has_preserved = false;
for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
if (subpass.pPreserveAttachments[j] == attachment) {
has_preserved = true;
break;
}
}
if (!has_preserved) {
skip |= LogError(renderpass, kVUID_Core_DrawState_InvalidRenderpass,
"Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
}
}
return result;
}
template <class T>
bool IsRangeOverlapping(T offset1, T size1, T offset2, T size2) {
return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) ||
((offset1 > offset2) && (offset1 < (offset2 + size2)));
}
bool IsRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) {
return (IsRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) &&
IsRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
}
bool CoreChecks::ValidateDependencies(FRAMEBUFFER_STATE const *framebuffer, RENDER_PASS_STATE const *renderPass) const {
bool skip = false;
auto const framebuffer_info = framebuffer->createInfo.ptr();
auto const create_info = renderPass->createInfo.ptr();
auto const &subpass_to_node = renderPass->subpass_to_node;
struct Attachment {
std::vector<SubpassLayout> outputs;
std::vector<SubpassLayout> inputs;
std::vector<uint32_t> overlapping;
};
std::vector<Attachment> attachments(create_info->attachmentCount);
if (!(framebuffer_info->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT)) {
// Find overlapping attachments
for (uint32_t i = 0; i < create_info->attachmentCount; ++i) {
for (uint32_t j = i + 1; j < create_info->attachmentCount; ++j) {
VkImageView viewi = framebuffer_info->pAttachments[i];
VkImageView viewj = framebuffer_info->pAttachments[j];
if (viewi == viewj) {
attachments[i].overlapping.emplace_back(j);
attachments[j].overlapping.emplace_back(i);
continue;
}
auto view_state_i = GetImageViewState(viewi);
auto view_state_j = GetImageViewState(viewj);
if (!view_state_i || !view_state_j) {
continue;
}
auto view_ci_i = view_state_i->create_info;
auto view_ci_j = view_state_j->create_info;
if (view_ci_i.image == view_ci_j.image &&
IsRegionOverlapping(view_ci_i.subresourceRange, view_ci_j.subresourceRange)) {
attachments[i].overlapping.emplace_back(j);
attachments[j].overlapping.emplace_back(i);
continue;
}
auto image_data_i = GetImageState(view_ci_i.image);
auto image_data_j = GetImageState(view_ci_j.image);
if (!image_data_i || !image_data_j) {
continue;
}
const auto *binding_i = image_data_i->Binding();
const auto *binding_j = image_data_j->Binding();
if (binding_i && binding_j && binding_i->mem_state == binding_j->mem_state &&
IsRangeOverlapping(binding_i->offset, binding_i->size, binding_j->offset, binding_j->size)) {
attachments[i].overlapping.emplace_back(j);
attachments[j].overlapping.emplace_back(i);
}
}
}
}
// Find for each attachment the subpasses that use them.
layer_data::unordered_set<uint32_t> attachment_indices;
for (uint32_t i = 0; i < create_info->subpassCount; ++i) {
const VkSubpassDescription2 &subpass = create_info->pSubpasses[i];
attachment_indices.clear();
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
uint32_t attachment = subpass.pInputAttachments[j].attachment;
if (attachment == VK_ATTACHMENT_UNUSED) continue;
SubpassLayout sp = {i, subpass.pInputAttachments[j].layout};
attachments[attachment].inputs.emplace_back(sp);
for (auto overlapping_attachment : attachments[attachment].overlapping) {
attachments[overlapping_attachment].inputs.emplace_back(sp);
}
}
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
uint32_t attachment = subpass.pColorAttachments[j].attachment;
if (attachment == VK_ATTACHMENT_UNUSED) continue;
SubpassLayout sp = {i, subpass.pColorAttachments[j].layout};
attachments[attachment].outputs.emplace_back(sp);
for (auto overlapping_attachment : attachments[attachment].overlapping) {
attachments[overlapping_attachment].outputs.emplace_back(sp);
}
attachment_indices.insert(attachment);
}
if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
SubpassLayout sp = {i, subpass.pDepthStencilAttachment->layout};
attachments[attachment].outputs.emplace_back(sp);
for (auto overlapping_attachment : attachments[attachment].overlapping) {
attachments[overlapping_attachment].outputs.emplace_back(sp);
}
if (attachment_indices.count(attachment)) {
skip |=
LogError(renderPass->renderPass(), kVUID_Core_DrawState_InvalidRenderpass,
"Cannot use same attachment (%u) as both color and depth output in same subpass (%u).", attachment, i);
}
}
}
// If there is a dependency needed make sure one exists
for (uint32_t i = 0; i < create_info->subpassCount; ++i) {
const VkSubpassDescription2 &subpass = create_info->pSubpasses[i];
// If the attachment is an input then all subpasses that output must have a dependency relationship
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
uint32_t attachment = subpass.pInputAttachments[j].attachment;
if (attachment == VK_ATTACHMENT_UNUSED) continue;
CheckDependencyExists(renderPass->renderPass(), i, subpass.pInputAttachments[j].layout, attachments[attachment].outputs,
subpass_to_node, skip);
}
// If the attachment is an output then all subpasses that use the attachment must have a dependency relationship
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
uint32_t attachment = subpass.pColorAttachments[j].attachment;
if (attachment == VK_ATTACHMENT_UNUSED) continue;
CheckDependencyExists(renderPass->renderPass(), i, subpass.pColorAttachments[j].layout, attachments[attachment].outputs,
subpass_to_node, skip);
CheckDependencyExists(renderPass->renderPass(), i, subpass.pColorAttachments[j].layout, attachments[attachment].inputs,
subpass_to_node, skip);
}
if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment;
CheckDependencyExists(renderPass->renderPass(), i, subpass.pDepthStencilAttachment->layout,
attachments[attachment].outputs, subpass_to_node, skip);
CheckDependencyExists(renderPass->renderPass(), i, subpass.pDepthStencilAttachment->layout,
attachments[attachment].inputs, subpass_to_node, skip);
}
}
// Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was
// written.
for (uint32_t i = 0; i < create_info->subpassCount; ++i) {
const VkSubpassDescription2 &subpass = create_info->pSubpasses[i];
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
CheckPreserved(renderPass->renderPass(), create_info, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0,
skip);
}
}
return skip;
}
bool CoreChecks::ValidateRenderPassDAG(RenderPassCreateVersion rp_version, const VkRenderPassCreateInfo2 *pCreateInfo) const {
bool skip = false;
const char *vuid;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
const VkSubpassDependency2 &dependency = pCreateInfo->pDependencies[i];
auto latest_src_stage = sync_utils::GetLogicallyLatestGraphicsPipelineStage(dependency.srcStageMask);
auto earliest_dst_stage = sync_utils::GetLogicallyEarliestGraphicsPipelineStage(dependency.dstStageMask);
// The first subpass here serves as a good proxy for "is multiview enabled" - since all view masks need to be non-zero if
// any are, which enables multiview.
if (use_rp2 && (dependency.dependencyFlags & VK_DEPENDENCY_VIEW_LOCAL_BIT) && (pCreateInfo->pSubpasses[0].viewMask == 0)) {
skip |= LogError(
device, "VUID-VkRenderPassCreateInfo2-viewMask-03059",
"Dependency %u specifies the VK_DEPENDENCY_VIEW_LOCAL_BIT, but multiview is not enabled for this render pass.", i);
} else if (use_rp2 && !(dependency.dependencyFlags & VK_DEPENDENCY_VIEW_LOCAL_BIT) && dependency.viewOffset != 0) {
skip |= LogError(device, "VUID-VkSubpassDependency2-dependencyFlags-03092",
"Dependency %u specifies the VK_DEPENDENCY_VIEW_LOCAL_BIT, but also specifies a view offset of %u.", i,
dependency.viewOffset);
} else if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL || dependency.dstSubpass == VK_SUBPASS_EXTERNAL) {
if (dependency.srcSubpass == dependency.dstSubpass) {
vuid = use_rp2 ? "VUID-VkSubpassDependency2-srcSubpass-03085" : "VUID-VkSubpassDependency-srcSubpass-00865";
skip |= LogError(device, vuid, "The src and dst subpasses in dependency %u are both external.", i);
} else if (dependency.dependencyFlags & VK_DEPENDENCY_VIEW_LOCAL_BIT) {
if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL) {
vuid = "VUID-VkSubpassDependency-dependencyFlags-02520";
} else { // dependency.dstSubpass == VK_SUBPASS_EXTERNAL
vuid = "VUID-VkSubpassDependency-dependencyFlags-02521";
}
if (use_rp2) {
// Create render pass 2 distinguishes between source and destination external dependencies.
if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL) {
vuid = "VUID-VkSubpassDependency2-dependencyFlags-03090";
} else {
vuid = "VUID-VkSubpassDependency2-dependencyFlags-03091";
}
}
skip |=
LogError(device, vuid,
"Dependency %u specifies an external dependency but also specifies VK_DEPENDENCY_VIEW_LOCAL_BIT.", i);
}
} else if (dependency.srcSubpass > dependency.dstSubpass) {
vuid = use_rp2 ? "VUID-VkSubpassDependency2-srcSubpass-03084" : "VUID-VkSubpassDependency-srcSubpass-00864";
skip |= LogError(device, vuid,
"Dependency %u specifies a dependency from a later subpass (%u) to an earlier subpass (%u), which is "
"disallowed to prevent cyclic dependencies.",
i, dependency.srcSubpass, dependency.dstSubpass);
} else if (dependency.srcSubpass == dependency.dstSubpass) {
if (dependency.viewOffset != 0) {
vuid = use_rp2 ? "VUID-VkSubpassDependency2-viewOffset-02530" : "VUID-VkRenderPassCreateInfo-pNext-01930";
skip |= LogError(device, vuid, "Dependency %u specifies a self-dependency but has a non-zero view offset of %u", i,
dependency.viewOffset);
} else if ((dependency.dependencyFlags | VK_DEPENDENCY_VIEW_LOCAL_BIT) != dependency.dependencyFlags &&
pCreateInfo->pSubpasses[dependency.srcSubpass].viewMask > 1) {
vuid = use_rp2 ? "VUID-VkRenderPassCreateInfo2-pDependencies-03060" : "VUID-VkSubpassDependency-srcSubpass-00872";
skip |= LogError(device, vuid,
"Dependency %u specifies a self-dependency for subpass %u with a non-zero view mask, but does not "
"specify VK_DEPENDENCY_VIEW_LOCAL_BIT.",
i, dependency.srcSubpass);
} else if ((HasNonFramebufferStagePipelineStageFlags(dependency.srcStageMask) ||
HasNonFramebufferStagePipelineStageFlags(dependency.dstStageMask)) &&
(sync_utils::GetGraphicsPipelineStageLogicalOrdinal(latest_src_stage) >
sync_utils::GetGraphicsPipelineStageLogicalOrdinal(earliest_dst_stage))) {
vuid = use_rp2 ? "VUID-VkSubpassDependency2-srcSubpass-03087" : "VUID-VkSubpassDependency-srcSubpass-00867";
skip |= LogError(
device, vuid,
"Dependency %u specifies a self-dependency from logically-later stage (%s) to a logically-earlier stage (%s).",
i, sync_utils::StringPipelineStageFlags(latest_src_stage).c_str(),
sync_utils::StringPipelineStageFlags(earliest_dst_stage).c_str());
} else if ((HasNonFramebufferStagePipelineStageFlags(dependency.srcStageMask) == false) &&
(HasNonFramebufferStagePipelineStageFlags(dependency.dstStageMask) == false) &&
((dependency.dependencyFlags & VK_DEPENDENCY_BY_REGION_BIT) == 0)) {
vuid = use_rp2 ? "VUID-VkSubpassDependency2-srcSubpass-02245" : "VUID-VkSubpassDependency-srcSubpass-02243";
skip |= LogError(device, vuid,
"Dependency %u specifies a self-dependency for subpass %u with both stages including a "
"framebuffer-space stage, but does not specify VK_DEPENDENCY_BY_REGION_BIT in dependencyFlags.",
i, dependency.srcSubpass);
}
} else if ((dependency.srcSubpass < dependency.dstSubpass) &&
((pCreateInfo->pSubpasses[dependency.srcSubpass].flags & VK_SUBPASS_DESCRIPTION_SHADER_RESOLVE_BIT_QCOM) != 0)) {
vuid = use_rp2 ? "VUID-VkRenderPassCreateInfo2-flags-04909" : "VUID-VkSubpassDescription-flags-03343";
skip |= LogError(device, vuid,
"Dependency %u specifies that subpass %u has a dependency on a later subpass"
"and includes VK_SUBPASS_DESCRIPTION_SHADER_RESOLVE_BIT_QCOM subpass flags.",
i, dependency.srcSubpass);
}
}
return skip;
}
bool CoreChecks::ValidateAttachmentIndex(RenderPassCreateVersion rp_version, uint32_t attachment, uint32_t attachment_count,
const char *error_type, const char *function_name) const {
bool skip = false;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
assert(attachment != VK_ATTACHMENT_UNUSED);
if (attachment >= attachment_count) {
const char *vuid =
use_rp2 ? "VUID-VkRenderPassCreateInfo2-attachment-03051" : "VUID-VkRenderPassCreateInfo-attachment-00834";
skip |= LogError(device, vuid, "%s: %s attachment %d must be less than the total number of attachments %d.", function_name,
error_type, attachment, attachment_count);
}
return skip;
}
enum AttachmentType {
ATTACHMENT_COLOR = 1,
ATTACHMENT_DEPTH = 2,
ATTACHMENT_INPUT = 4,
ATTACHMENT_PRESERVE = 8,
ATTACHMENT_RESOLVE = 16,
};
char const *StringAttachmentType(uint8_t type) {
switch (type) {
case ATTACHMENT_COLOR:
return "color";
case ATTACHMENT_DEPTH:
return "depth";
case ATTACHMENT_INPUT:
return "input";
case ATTACHMENT_PRESERVE:
return "preserve";
case ATTACHMENT_RESOLVE:
return "resolve";
default:
return "(multiple)";
}
}
bool CoreChecks::AddAttachmentUse(RenderPassCreateVersion rp_version, uint32_t subpass, std::vector<uint8_t> &attachment_uses,
std::vector<VkImageLayout> &attachment_layouts, uint32_t attachment, uint8_t new_use,
VkImageLayout new_layout) const {
if (attachment >= attachment_uses.size()) return false; /* out of range, but already reported */
bool skip = false;
auto &uses = attachment_uses[attachment];
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
const char *vuid;
const char *const function_name = use_rp2 ? "vkCreateRenderPass2()" : "vkCreateRenderPass()";
if (uses & new_use) {
if (attachment_layouts[attachment] != new_layout) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-layout-02528" : "VUID-VkSubpassDescription-layout-02519";
skip |= LogError(device, vuid, "%s: subpass %u already uses attachment %u with a different image layout (%s vs %s).",
function_name, subpass, attachment, string_VkImageLayout(attachment_layouts[attachment]),
string_VkImageLayout(new_layout));
}
} else if (((new_use & ATTACHMENT_COLOR) && (uses & ATTACHMENT_DEPTH)) ||
((uses & ATTACHMENT_COLOR) && (new_use & ATTACHMENT_DEPTH))) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-pDepthStencilAttachment-04440"
: "VUID-VkSubpassDescription-pDepthStencilAttachment-04438";
skip |= LogError(device, vuid, "%s: subpass %u uses attachment %u as both %s and %s attachment.", function_name, subpass,
attachment, StringAttachmentType(uses), StringAttachmentType(new_use));
} else if ((uses && (new_use & ATTACHMENT_PRESERVE)) || (new_use && (uses & ATTACHMENT_PRESERVE))) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-pPreserveAttachments-03074"
: "VUID-VkSubpassDescription-pPreserveAttachments-00854";
skip |= LogError(device, vuid, "%s: subpass %u uses attachment %u as both %s and %s attachment.", function_name, subpass,
attachment, StringAttachmentType(uses), StringAttachmentType(new_use));
} else {
attachment_layouts[attachment] = new_layout;
uses |= new_use;
}
return skip;
}
// Handles attachment references regardless of type (input, color, depth, etc)
// Input attachments have extra VUs associated with them
bool CoreChecks::ValidateAttachmentReference(RenderPassCreateVersion rp_version, VkAttachmentReference2 reference,
const VkFormat attachment_format, bool input, const char *error_type,
const char *function_name) const {
bool skip = false;
// Currently all VUs require attachment to not be UNUSED
assert(reference.attachment != VK_ATTACHMENT_UNUSED);
// currently VkAttachmentReference and VkAttachmentReference2 have no overlapping VUs
if (rp_version == RENDER_PASS_VERSION_1) {
switch (reference.layout) {
case VK_IMAGE_LAYOUT_UNDEFINED:
case VK_IMAGE_LAYOUT_PREINITIALIZED:
case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR:
case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL:
case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL:
case VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL:
case VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL:
skip |= LogError(device, "VUID-VkAttachmentReference-layout-00857",
"%s: Layout for %s is %s but must not be "
"VK_IMAGE_LAYOUT_[UNDEFINED|PREINITIALIZED|PRESENT_SRC_KHR|DEPTH_ATTACHMENT_OPTIMAL|DEPTH_READ_"
"ONLY_OPTIMAL|STENCIL_ATTACHMENT_OPTIMAL|STENCIL_READ_ONLY_OPTIMAL].",
function_name, error_type, string_VkImageLayout(reference.layout));
break;
default:
break;
}
} else {
const auto *attachment_reference_stencil_layout = LvlFindInChain<VkAttachmentReferenceStencilLayout>(reference.pNext);
switch (reference.layout) {
case VK_IMAGE_LAYOUT_UNDEFINED:
case VK_IMAGE_LAYOUT_PREINITIALIZED:
case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR:
skip |=
LogError(device, "VUID-VkAttachmentReference2-layout-03077",
"%s: Layout for %s is %s but must not be VK_IMAGE_LAYOUT_[UNDEFINED|PREINITIALIZED|PRESENT_SRC_KHR].",
function_name, error_type, string_VkImageLayout(reference.layout));
break;
// Only other layouts in VUs to be checked
case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL:
case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL:
case VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL:
case VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL:
// First need to make sure feature bit is enabled and the format is actually a depth and/or stencil
if (!enabled_features.core12.separateDepthStencilLayouts) {
skip |= LogError(device, "VUID-VkAttachmentReference2-separateDepthStencilLayouts-03313",
"%s: Layout for %s is %s but without separateDepthStencilLayouts enabled the layout must not "
"be VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL, "
"VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL, or VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL.",
function_name, error_type, string_VkImageLayout(reference.layout));
} else if (!FormatIsDepthOrStencil(attachment_format)) {
// using this over FormatIsColor() incase a multiplane and/or undef would sneak in
// "color" format is still an ambiguous term in spec (internal issue #2484)
skip |= LogError(
device, "VUID-VkAttachmentReference2-attachment-04754",
"%s: Layout for %s is %s but the attachment is a not a depth/stencil format (%s) so the layout must not "
"be VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL, "
"VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL, or VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL.",
function_name, error_type, string_VkImageLayout(reference.layout), string_VkFormat(attachment_format));
} else {
if ((reference.layout == VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL) ||
(reference.layout == VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL)) {
if (FormatIsDepthOnly(attachment_format)) {
skip |= LogError(
device, "VUID-VkAttachmentReference2-attachment-04756",
"%s: Layout for %s is %s but the attachment is a depth-only format (%s) so the layout must not "
"be VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL or VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL.",
function_name, error_type, string_VkImageLayout(reference.layout),
string_VkFormat(attachment_format));
}
} else {
// DEPTH_ATTACHMENT_OPTIMAL || DEPTH_READ_ONLY_OPTIMAL
if (FormatIsStencilOnly(attachment_format)) {
skip |= LogError(
device, "VUID-VkAttachmentReference2-attachment-04757",
"%s: Layout for %s is %s but the attachment is a depth-only format (%s) so the layout must not "
"be VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL or VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL.",
function_name, error_type, string_VkImageLayout(reference.layout),
string_VkFormat(attachment_format));
}
if (attachment_reference_stencil_layout) {
// This check doesn't rely on the aspect mask value
const VkImageLayout stencil_layout = attachment_reference_stencil_layout->stencilLayout;
// clang-format off
if (stencil_layout == VK_IMAGE_LAYOUT_UNDEFINED ||
stencil_layout == VK_IMAGE_LAYOUT_PREINITIALIZED ||
stencil_layout == VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL ||
stencil_layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL ||
stencil_layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL ||
stencil_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL ||
stencil_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL ||
stencil_layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL ||
stencil_layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL ||
stencil_layout == VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) {
skip |= LogError(device, "VUID-VkAttachmentReferenceStencilLayout-stencilLayout-03318",
"%s: In %s with pNext chain instance VkAttachmentReferenceStencilLayout, "
"the stencilLayout (%s) must not be "
"VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_PREINITIALIZED, "
"VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, "
"VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL, "
"VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL, "
"VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, "
"VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, "
"VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL, "
"VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL, or "
"VK_IMAGE_LAYOUT_PRESENT_SRC_KHR.",
function_name, error_type, string_VkImageLayout(stencil_layout));
}
// clang-format on
} else if (FormatIsDepthAndStencil(attachment_format)) {
skip |= LogError(
device, "VUID-VkAttachmentReference2-attachment-04755",
"%s: Layout for %s is %s but the attachment is a depth and stencil format (%s) so if the layout is "
"VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL or VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL there needs "
"to be a VkAttachmentReferenceStencilLayout in the pNext chain to set the seperate stencil layout "
"because the separateDepthStencilLayouts feature is enabled.",
function_name, error_type, string_VkImageLayout(reference.layout),
string_VkFormat(attachment_format));
}
}
}
break;
default:
break;
}
}
return skip;
}
bool CoreChecks::ValidateRenderpassAttachmentUsage(RenderPassCreateVersion rp_version, const VkRenderPassCreateInfo2 *pCreateInfo,
const char *function_name) const {
bool skip = false;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
const char *vuid;
for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
VkFormat format = pCreateInfo->pAttachments[i].format;
if (pCreateInfo->pAttachments[i].initialLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
if ((FormatIsColor(format) || FormatHasDepth(format)) &&
pCreateInfo->pAttachments[i].loadOp == VK_ATTACHMENT_LOAD_OP_LOAD) {
skip |= LogWarning(device, kVUID_Core_DrawState_InvalidRenderpass,
"%s: Render pass pAttachment[%u] has loadOp == VK_ATTACHMENT_LOAD_OP_LOAD and initialLayout == "
"VK_IMAGE_LAYOUT_UNDEFINED. This is probably not what you intended. Consider using "
"VK_ATTACHMENT_LOAD_OP_DONT_CARE instead if the image truely is undefined at the start of the "
"render pass.",
function_name, i);
}
if (FormatHasStencil(format) && pCreateInfo->pAttachments[i].stencilLoadOp == VK_ATTACHMENT_LOAD_OP_LOAD) {
skip |=
LogWarning(device, kVUID_Core_DrawState_InvalidRenderpass,
"%s: Render pass pAttachment[%u] has stencilLoadOp == VK_ATTACHMENT_LOAD_OP_LOAD and initialLayout "
"== VK_IMAGE_LAYOUT_UNDEFINED. This is probably not what you intended. Consider using "
"VK_ATTACHMENT_LOAD_OP_DONT_CARE instead if the image truely is undefined at the start of the "
"render pass.",
function_name, i);
}
}
}
// Track when we're observing the first use of an attachment
std::vector<bool> attach_first_use(pCreateInfo->attachmentCount, true);
for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
const VkSubpassDescription2 &subpass = pCreateInfo->pSubpasses[i];
std::vector<uint8_t> attachment_uses(pCreateInfo->attachmentCount);
std::vector<VkImageLayout> attachment_layouts(pCreateInfo->attachmentCount);
// Track if attachments are used as input as well as another type
layer_data::unordered_set<uint32_t> input_attachments;
if (subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_GRAPHICS &&
subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_SUBPASS_SHADING_HUAWEI) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-pipelineBindPoint-04953"
: "VUID-VkSubpassDescription-pipelineBindPoint-04952";
skip |= LogError(device, vuid,
"%s: Pipeline bind point for pSubpasses[%d] must be VK_PIPELINE_BIND_POINT_GRAPHICS or "
"VK_PIPELINE_BIND_POINT_SUBPASS_SHADING_HUAWEI.",
function_name, i);
}
// Check input attachments first
// - so we can detect first-use-as-input for VU #00349
// - if other color or depth/stencil is also input, it limits valid layouts
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
auto const &attachment_ref = subpass.pInputAttachments[j];
const uint32_t attachment_index = attachment_ref.attachment;
const VkImageAspectFlags aspect_mask = attachment_ref.aspectMask;
if (attachment_index != VK_ATTACHMENT_UNUSED) {
input_attachments.insert(attachment_index);
std::string error_type = "pSubpasses[" + std::to_string(i) + "].pInputAttachments[" + std::to_string(j) + "]";
skip |= ValidateAttachmentIndex(rp_version, attachment_index, pCreateInfo->attachmentCount, error_type.c_str(),
function_name);
if (aspect_mask & VK_IMAGE_ASPECT_METADATA_BIT) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-attachment-02801"
: "VUID-VkInputAttachmentAspectReference-aspectMask-01964";
skip |= LogError(
device, vuid,
"%s: Aspect mask for input attachment reference %d in subpass %d includes VK_IMAGE_ASPECT_METADATA_BIT.",
function_name, j, i);
} else if (aspect_mask & (VK_IMAGE_ASPECT_MEMORY_PLANE_0_BIT_EXT | VK_IMAGE_ASPECT_MEMORY_PLANE_1_BIT_EXT |
VK_IMAGE_ASPECT_MEMORY_PLANE_2_BIT_EXT | VK_IMAGE_ASPECT_MEMORY_PLANE_3_BIT_EXT)) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-attachment-04563"
: "VUID-VkInputAttachmentAspectReference-aspectMask-02250";
skip |= LogError(device, vuid,
"%s: Aspect mask for input attachment reference %d in subpass %d includes "
"VK_IMAGE_ASPECT_MEMORY_PLANE_*_BIT_EXT bit.",
function_name, j, i);
}
// safe to dereference pCreateInfo->pAttachments[]
if (attachment_index < pCreateInfo->attachmentCount) {
const VkFormat attachment_format = pCreateInfo->pAttachments[attachment_index].format;
skip |= ValidateAttachmentReference(rp_version, attachment_ref, attachment_format, true, error_type.c_str(),
function_name);
skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts, attachment_index, ATTACHMENT_INPUT,
attachment_ref.layout);
vuid = use_rp2 ? "VUID-VkRenderPassCreateInfo2-attachment-02525" : "VUID-VkRenderPassCreateInfo-pNext-01963";
skip |= ValidateImageAspectMask(VK_NULL_HANDLE, attachment_format, aspect_mask, function_name, vuid);
if (attach_first_use[attachment_index]) {
skip |=
ValidateLayoutVsAttachmentDescription(report_data, rp_version, subpass.pInputAttachments[j].layout,
attachment_index, pCreateInfo->pAttachments[attachment_index]);
bool used_as_depth = (subpass.pDepthStencilAttachment != NULL &&
subpass.pDepthStencilAttachment->attachment == attachment_index);
bool used_as_color = false;
for (uint32_t k = 0; !used_as_depth && !used_as_color && k < subpass.colorAttachmentCount; ++k) {
used_as_color = (subpass.pColorAttachments[k].attachment == attachment_index);
}
if (!used_as_depth && !used_as_color &&
pCreateInfo->pAttachments[attachment_index].loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-loadOp-03064" : "VUID-VkSubpassDescription-loadOp-00846";
skip |= LogError(device, vuid,
"%s: attachment %u is first used as an input attachment in %s with loadOp set to "
"VK_ATTACHMENT_LOAD_OP_CLEAR.",
function_name, attachment_index, error_type.c_str());
}
}
attach_first_use[attachment_index] = false;
const VkFormatFeatureFlags valid_flags =
VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT | VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT;
const VkFormatFeatureFlags format_features = GetPotentialFormatFeatures(attachment_format);
if ((format_features & valid_flags) == 0) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-pInputAttachments-02897"
: "VUID-VkSubpassDescription-pInputAttachments-02647";
skip |=
LogError(device, vuid,
"%s: Input attachment %s format (%s) does not contain VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT "
"| VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT.",
function_name, error_type.c_str(), string_VkFormat(attachment_format));
}
}
if (rp_version == RENDER_PASS_VERSION_2) {
// These are validated automatically as part of parameter validation for create renderpass 1
// as they are in a struct that only applies to input attachments - not so for v2.
// Check for 0
if (aspect_mask == 0) {
skip |= LogError(device, "VUID-VkSubpassDescription2-attachment-02800",
"%s: Input attachment %s aspect mask must not be 0.", function_name, error_type.c_str());
} else {
const VkImageAspectFlags valid_bits =
(VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT |
VK_IMAGE_ASPECT_METADATA_BIT | VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT |
VK_IMAGE_ASPECT_PLANE_2_BIT | VK_IMAGE_ASPECT_MEMORY_PLANE_0_BIT_EXT |
VK_IMAGE_ASPECT_MEMORY_PLANE_1_BIT_EXT | VK_IMAGE_ASPECT_MEMORY_PLANE_2_BIT_EXT |
VK_IMAGE_ASPECT_MEMORY_PLANE_3_BIT_EXT);
// Check for valid aspect mask bits
if (aspect_mask & ~valid_bits) {
skip |= LogError(device, "VUID-VkSubpassDescription2-attachment-02799",
"%s: Input attachment %s aspect mask (0x%" PRIx32 ")is invalid.", function_name,
error_type.c_str(), aspect_mask);
}
}
}
// Validate layout
vuid = use_rp2 ? "VUID-VkSubpassDescription2-None-04439" : "VUID-VkSubpassDescription-None-04437";
switch (attachment_ref.layout) {
case VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR:
case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL:
case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL:
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
case VK_IMAGE_LAYOUT_GENERAL:
case VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL_KHR:
case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL:
case VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL:
break; // valid layouts
default:
skip |= LogError(device, vuid,
"%s: %s layout is %s but input attachments must be "
"VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR, "
"VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL, "
"VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL, "
"VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL, "
"VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL, "
"VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, "
"VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL, or "
"VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL_KHR",
function_name, error_type.c_str(), string_VkImageLayout(attachment_ref.layout));
break;
}
}
}
for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
std::string error_type = "pSubpasses[" + std::to_string(i) + "].pPreserveAttachments[" + std::to_string(j) + "]";
uint32_t attachment = subpass.pPreserveAttachments[j];
if (attachment == VK_ATTACHMENT_UNUSED) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-attachment-03073" : "VUID-VkSubpassDescription-attachment-00853";
skip |= LogError(device, vuid, "%s: Preserve attachment (%d) must not be VK_ATTACHMENT_UNUSED.", function_name, j);
} else {
skip |= ValidateAttachmentIndex(rp_version, attachment, pCreateInfo->attachmentCount, error_type.c_str(),
function_name);
if (attachment < pCreateInfo->attachmentCount) {
skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts, attachment, ATTACHMENT_PRESERVE,
VkImageLayout(0) /* preserve doesn't have any layout */);
}
}
}
bool subpass_performs_resolve = false;
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
if (subpass.pResolveAttachments) {
std::string error_type = "pSubpasses[" + std::to_string(i) + "].pResolveAttachments[" + std::to_string(j) + "]";
auto const &attachment_ref = subpass.pResolveAttachments[j];
if (attachment_ref.attachment != VK_ATTACHMENT_UNUSED) {
skip |= ValidateAttachmentIndex(rp_version, attachment_ref.attachment, pCreateInfo->attachmentCount,
error_type.c_str(), function_name);
// safe to dereference pCreateInfo->pAttachments[]
if (attachment_ref.attachment < pCreateInfo->attachmentCount) {
const VkFormat attachment_format = pCreateInfo->pAttachments[attachment_ref.attachment].format;
skip |= ValidateAttachmentReference(rp_version, attachment_ref, attachment_format, false,
error_type.c_str(), function_name);
skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts, attachment_ref.attachment,
ATTACHMENT_RESOLVE, attachment_ref.layout);
subpass_performs_resolve = true;
if (pCreateInfo->pAttachments[attachment_ref.attachment].samples != VK_SAMPLE_COUNT_1_BIT) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-pResolveAttachments-03067"
: "VUID-VkSubpassDescription-pResolveAttachments-00849";
skip |= LogError(
device, vuid,
"%s: Subpass %u requests multisample resolve into attachment %u, which must "
"have VK_SAMPLE_COUNT_1_BIT but has %s.",
function_name, i, attachment_ref.attachment,
string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment_ref.attachment].samples));
}
const VkFormatFeatureFlags format_features = GetPotentialFormatFeatures(attachment_format);
if ((format_features & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT) == 0) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-pResolveAttachments-02899"
: "VUID-VkSubpassDescription-pResolveAttachments-02649";
skip |= LogError(device, vuid,
"%s: Resolve attachment %s format (%s) does not contain "
"VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT.",
function_name, error_type.c_str(), string_VkFormat(attachment_format));
}
// VK_QCOM_render_pass_shader_resolve check of resolve attachmnents
if ((subpass.flags & VK_SUBPASS_DESCRIPTION_SHADER_RESOLVE_BIT_QCOM) != 0) {
vuid = use_rp2 ? "VUID-VkRenderPassCreateInfo2-flags-04907" : "VUID-VkSubpassDescription-flags-03341";
skip |= LogError(
device, vuid,
"%s: Subpass %u enables shader resolve, which requires every element of pResolve attachments"
" must be VK_ATTACHMENT_UNUSED, but element %u contains a reference to attachment %u instead.",
function_name, i, j, attachment_ref.attachment);
}
}
}
}
}
if (subpass.pDepthStencilAttachment) {
std::string error_type = "pSubpasses[" + std::to_string(i) + "].pDepthStencilAttachment";
const uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
const VkImageLayout image_layout = subpass.pDepthStencilAttachment->layout;
if (attachment != VK_ATTACHMENT_UNUSED) {
skip |= ValidateAttachmentIndex(rp_version, attachment, pCreateInfo->attachmentCount, error_type.c_str(),
function_name);
// safe to dereference pCreateInfo->pAttachments[]
if (attachment < pCreateInfo->attachmentCount) {
const VkFormat attachment_format = pCreateInfo->pAttachments[attachment].format;
skip |= ValidateAttachmentReference(rp_version, *subpass.pDepthStencilAttachment, attachment_format, false,
error_type.c_str(), function_name);
skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts, attachment, ATTACHMENT_DEPTH,
image_layout);
if (attach_first_use[attachment]) {
skip |= ValidateLayoutVsAttachmentDescription(report_data, rp_version, image_layout, attachment,
pCreateInfo->pAttachments[attachment]);
}
attach_first_use[attachment] = false;
const VkFormatFeatureFlags format_features = GetPotentialFormatFeatures(attachment_format);
if ((format_features & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT) == 0) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-pDepthStencilAttachment-02900"
: "VUID-VkSubpassDescription-pDepthStencilAttachment-02650";
skip |= LogError(device, vuid,
"%s: Depth Stencil %s format (%s) does not contain "
"VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT.",
function_name, error_type.c_str(), string_VkFormat(attachment_format));
}
}
// Check for valid imageLayout
vuid = use_rp2 ? "VUID-VkSubpassDescription2-None-04439" : "VUID-VkSubpassDescription-None-04437";
switch (image_layout) {
case VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR:
case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL:
case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL:
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
case VK_IMAGE_LAYOUT_GENERAL:
break; // valid layouts
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL:
case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL:
case VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL:
case VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL:
case VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL_KHR:
case VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL_KHR:
if (input_attachments.find(attachment) != input_attachments.end()) {
skip |= LogError(
device, vuid,
"%s: %s is also an input attachment so the layout (%s) must not be "
"VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL, "
"VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL, "
"VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL_KHR "
"or VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL_KHR.",
function_name, error_type.c_str(), string_VkImageLayout(image_layout));
}
break;
default:
skip |= LogError(device, vuid,
"%s: %s layout is %s but depth/stencil attachments must be "
"VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, "
"VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL, "
"VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL, "
"VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL, "
"VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL, "
"VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR, "
"VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL, "
"VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL, "
"VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, "
"VK_IMAGE_LAYOUT_GENERAL, "
"VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL_KHR or"
"VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL_KHR.",
function_name, error_type.c_str(), string_VkImageLayout(image_layout));
break;
}
}
}
uint32_t last_sample_count_attachment = VK_ATTACHMENT_UNUSED;
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
std::string error_type = "pSubpasses[" + std::to_string(i) + "].pColorAttachments[" + std::to_string(j) + "]";
auto const &attachment_ref = subpass.pColorAttachments[j];
const uint32_t attachment_index = attachment_ref.attachment;
if (attachment_index != VK_ATTACHMENT_UNUSED) {
skip |= ValidateAttachmentIndex(rp_version, attachment_index, pCreateInfo->attachmentCount, error_type.c_str(),
function_name);
// safe to dereference pCreateInfo->pAttachments[]
if (attachment_index < pCreateInfo->attachmentCount) {
const VkFormat attachment_format = pCreateInfo->pAttachments[attachment_index].format;
skip |= ValidateAttachmentReference(rp_version, attachment_ref, attachment_format, false, error_type.c_str(),
function_name);
skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts, attachment_index, ATTACHMENT_COLOR,
attachment_ref.layout);
VkSampleCountFlagBits current_sample_count = pCreateInfo->pAttachments[attachment_index].samples;
if (last_sample_count_attachment != VK_ATTACHMENT_UNUSED) {
VkSampleCountFlagBits last_sample_count =
pCreateInfo->pAttachments[subpass.pColorAttachments[last_sample_count_attachment].attachment].samples;
if (current_sample_count != last_sample_count) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-pColorAttachments-03069"
: "VUID-VkSubpassDescription-pColorAttachments-01417";
skip |= LogError(
device, vuid,
"%s: Subpass %u attempts to render to color attachments with inconsistent sample counts."
"Color attachment ref %u has sample count %s, whereas previous color attachment ref %u has "
"sample count %s.",
function_name, i, j, string_VkSampleCountFlagBits(current_sample_count),
last_sample_count_attachment, string_VkSampleCountFlagBits(last_sample_count));
}
}
last_sample_count_attachment = j;
if (subpass_performs_resolve && current_sample_count == VK_SAMPLE_COUNT_1_BIT) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-pResolveAttachments-03066"
: "VUID-VkSubpassDescription-pResolveAttachments-00848";
skip |= LogError(device, vuid,
"%s: Subpass %u requests multisample resolve from attachment %u which has "
"VK_SAMPLE_COUNT_1_BIT.",
function_name, i, attachment_index);
}
if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED &&
subpass.pDepthStencilAttachment->attachment < pCreateInfo->attachmentCount) {
const auto depth_stencil_sample_count =
pCreateInfo->pAttachments[subpass.pDepthStencilAttachment->attachment].samples;
if (IsExtEnabled(device_extensions.vk_amd_mixed_attachment_samples)) {
if (current_sample_count > depth_stencil_sample_count) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-pColorAttachments-03070"
: "VUID-VkSubpassDescription-pColorAttachments-01506";
skip |=
LogError(device, vuid, "%s: %s has %s which is larger than depth/stencil attachment %s.",
function_name, error_type.c_str(), string_VkSampleCountFlagBits(current_sample_count),
string_VkSampleCountFlagBits(depth_stencil_sample_count));
break;
}
}
if (!IsExtEnabled(device_extensions.vk_amd_mixed_attachment_samples) &&
!IsExtEnabled(device_extensions.vk_nv_framebuffer_mixed_samples) &&
current_sample_count != depth_stencil_sample_count) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-pDepthStencilAttachment-03071"
: "VUID-VkSubpassDescription-pDepthStencilAttachment-01418";
skip |= LogError(device, vuid,
"%s: Subpass %u attempts to render to use a depth/stencil attachment with sample "
"count that differs "
"from color attachment %u."
"The depth attachment ref has sample count %s, whereas color attachment ref %u has "
"sample count %s.",
function_name, i, j, string_VkSampleCountFlagBits(depth_stencil_sample_count), j,
string_VkSampleCountFlagBits(current_sample_count));
break;
}
}
const VkFormatFeatureFlags format_features = GetPotentialFormatFeatures(attachment_format);
if ((format_features & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT) == 0) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-pColorAttachments-02898"
: "VUID-VkSubpassDescription-pColorAttachments-02648";
skip |= LogError(device, vuid,
"%s: Color attachment %s format (%s) does not contain "
"VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT.",
function_name, error_type.c_str(), string_VkFormat(attachment_format));
}
if (attach_first_use[attachment_index]) {
skip |=
ValidateLayoutVsAttachmentDescription(report_data, rp_version, subpass.pColorAttachments[j].layout,
attachment_index, pCreateInfo->pAttachments[attachment_index]);
}
attach_first_use[attachment_index] = false;
}
// Check for valid imageLayout
vuid = use_rp2 ? "VUID-VkSubpassDescription2-None-04439" : "VUID-VkSubpassDescription-None-04437";
switch (attachment_ref.layout) {
case VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR:
case VK_IMAGE_LAYOUT_GENERAL:
break; // valid layouts
case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
case VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL_KHR:
if (input_attachments.find(attachment_index) != input_attachments.end()) {
skip |= LogError(device, vuid,
"%s: %s is also an input attachment so the layout (%s) must not be "
"VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL or VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL_KHR.",
function_name, error_type.c_str(), string_VkImageLayout(attachment_ref.layout));
}
break;
default:
skip |= LogError(device, vuid,
"%s: %s layout is %s but color attachments must be "
"VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, "
"VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL_KHR, "
"VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR or "
"VK_IMAGE_LAYOUT_GENERAL.",
function_name, error_type.c_str(), string_VkImageLayout(attachment_ref.layout));
break;
}
}
if (subpass_performs_resolve && subpass.pResolveAttachments[j].attachment != VK_ATTACHMENT_UNUSED &&
subpass.pResolveAttachments[j].attachment < pCreateInfo->attachmentCount) {
if (attachment_index == VK_ATTACHMENT_UNUSED) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-pResolveAttachments-03065"
: "VUID-VkSubpassDescription-pResolveAttachments-00847";
skip |= LogError(device, vuid,
"%s: Subpass %u requests multisample resolve from attachment %u which has "
"attachment=VK_ATTACHMENT_UNUSED.",
function_name, i, attachment_index);
} else {
const auto &color_desc = pCreateInfo->pAttachments[attachment_index];
const auto &resolve_desc = pCreateInfo->pAttachments[subpass.pResolveAttachments[j].attachment];
if (color_desc.format != resolve_desc.format) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-pResolveAttachments-03068"
: "VUID-VkSubpassDescription-pResolveAttachments-00850";
skip |= LogError(device, vuid,
"%s: %s resolves to an attachment with a "
"different format. color format: %u, resolve format: %u.",
function_name, error_type.c_str(), color_desc.format, resolve_desc.format);
}
}
}
}
}
return skip;
}
bool CoreChecks::ValidateCreateRenderPass(VkDevice device, RenderPassCreateVersion rp_version,
const VkRenderPassCreateInfo2 *pCreateInfo, const char *function_name) const {
bool skip = false;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
const char *vuid;
skip |= ValidateRenderpassAttachmentUsage(rp_version, pCreateInfo, function_name);
skip |= ValidateRenderPassDAG(rp_version, pCreateInfo);
// Validate multiview correlation and view masks
bool view_mask_zero = false;
bool view_mask_non_zero = false;
for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
const VkSubpassDescription2 &subpass = pCreateInfo->pSubpasses[i];
if (subpass.viewMask != 0) {
view_mask_non_zero = true;
} else {
view_mask_zero = true;
}
if ((subpass.flags & VK_SUBPASS_DESCRIPTION_PER_VIEW_POSITION_X_ONLY_BIT_NVX) != 0 &&
(subpass.flags & VK_SUBPASS_DESCRIPTION_PER_VIEW_ATTRIBUTES_BIT_NVX) == 0) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-flags-03076" : "VUID-VkSubpassDescription-flags-00856";
skip |= LogError(device, vuid,
"%s: The flags parameter of subpass description %u includes "
"VK_SUBPASS_DESCRIPTION_PER_VIEW_POSITION_X_ONLY_BIT_NVX but does not also include "
"VK_SUBPASS_DESCRIPTION_PER_VIEW_ATTRIBUTES_BIT_NVX.",
function_name, i);
}
}
if (rp_version == RENDER_PASS_VERSION_2) {
if (view_mask_non_zero && view_mask_zero) {
skip |= LogError(device, "VUID-VkRenderPassCreateInfo2-viewMask-03058",
"%s: Some view masks are non-zero whilst others are zero.", function_name);
}
if (view_mask_zero && pCreateInfo->correlatedViewMaskCount != 0) {
skip |= LogError(device, "VUID-VkRenderPassCreateInfo2-viewMask-03057",
"%s: Multiview is not enabled but correlation masks are still provided", function_name);
}
}
uint32_t aggregated_cvms = 0;
for (uint32_t i = 0; i < pCreateInfo->correlatedViewMaskCount; ++i) {
if (aggregated_cvms & pCreateInfo->pCorrelatedViewMasks[i]) {
vuid = use_rp2 ? "VUID-VkRenderPassCreateInfo2-pCorrelatedViewMasks-03056"
: "VUID-VkRenderPassMultiviewCreateInfo-pCorrelationMasks-00841";
skip |=
LogError(device, vuid, "%s: pCorrelatedViewMasks[%u] contains a previously appearing view bit.", function_name, i);
}
aggregated_cvms |= pCreateInfo->pCorrelatedViewMasks[i];
}
LogObjectList objects(device);
auto func_name = use_rp2 ? Func::vkCreateRenderPass2 : Func::vkCreateRenderPass;
auto structure = use_rp2 ? Struct::VkSubpassDependency2 : Struct::VkSubpassDependency;
for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
auto const &dependency = pCreateInfo->pDependencies[i];
Location loc(func_name, structure, Field::pDependencies, i);
skip |= ValidateSubpassDependency(objects, loc, dependency);
}
return skip;
}
bool CoreChecks::PreCallValidateCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) const {
bool skip = false;
// Handle extension structs from KHR_multiview and KHR_maintenance2 that can only be validated for RP1 (indices out of bounds)
const VkRenderPassMultiviewCreateInfo *multiview_info = LvlFindInChain<VkRenderPassMultiviewCreateInfo>(pCreateInfo->pNext);
if (multiview_info) {
if (multiview_info->subpassCount && multiview_info->subpassCount != pCreateInfo->subpassCount) {
skip |= LogError(device, "VUID-VkRenderPassCreateInfo-pNext-01928",
"vkCreateRenderPass(): Subpass count is %u but multiview info has a subpass count of %u.",
pCreateInfo->subpassCount, multiview_info->subpassCount);
} else if (multiview_info->dependencyCount && multiview_info->dependencyCount != pCreateInfo->dependencyCount) {
skip |= LogError(device, "VUID-VkRenderPassCreateInfo-pNext-01929",
"vkCreateRenderPass(): Dependency count is %u but multiview info has a dependency count of %u.",
pCreateInfo->dependencyCount, multiview_info->dependencyCount);
}
bool all_zero = true;
bool all_not_zero = true;
for (uint32_t i = 0; i < multiview_info->subpassCount; ++i) {
all_zero &= multiview_info->pViewMasks[i] == 0;
all_not_zero &= !(multiview_info->pViewMasks[i] == 0);
}
if (!all_zero && !all_not_zero) {
skip |= LogError(
device, "VUID-VkRenderPassCreateInfo-pNext-02513",
"vkCreateRenderPass(): elements of VkRenderPassMultiviewCreateInfo pViewMasks must all be either 0 or not 0.");
}
}
const VkRenderPassInputAttachmentAspectCreateInfo *input_attachment_aspect_info =
LvlFindInChain<VkRenderPassInputAttachmentAspectCreateInfo>(pCreateInfo->pNext);
if (input_attachment_aspect_info) {
for (uint32_t i = 0; i < input_attachment_aspect_info->aspectReferenceCount; ++i) {
uint32_t subpass = input_attachment_aspect_info->pAspectReferences[i].subpass;
uint32_t attachment = input_attachment_aspect_info->pAspectReferences[i].inputAttachmentIndex;
if (subpass >= pCreateInfo->subpassCount) {
skip |= LogError(device, "VUID-VkRenderPassCreateInfo-pNext-01926",
"vkCreateRenderPass(): Subpass index %u specified by input attachment aspect info %u is greater "
"than the subpass "
"count of %u for this render pass.",
subpass, i, pCreateInfo->subpassCount);
} else if (pCreateInfo->pSubpasses && attachment >= pCreateInfo->pSubpasses[subpass].inputAttachmentCount) {
skip |= LogError(device, "VUID-VkRenderPassCreateInfo-pNext-01927",
"vkCreateRenderPass(): Input attachment index %u specified by input attachment aspect info %u is "
"greater than the "
"input attachment count of %u for this subpass.",
attachment, i, pCreateInfo->pSubpasses[subpass].inputAttachmentCount);
}
}
}
const VkRenderPassFragmentDensityMapCreateInfoEXT *fragment_density_map_info =
LvlFindInChain<VkRenderPassFragmentDensityMapCreateInfoEXT>(pCreateInfo->pNext);
if (fragment_density_map_info) {
if (fragment_density_map_info->fragmentDensityMapAttachment.attachment != VK_ATTACHMENT_UNUSED) {
if (fragment_density_map_info->fragmentDensityMapAttachment.attachment >= pCreateInfo->attachmentCount) {
skip |= LogError(device, "VUID-VkRenderPassFragmentDensityMapCreateInfoEXT-fragmentDensityMapAttachment-02547",
"vkCreateRenderPass(): fragmentDensityMapAttachment %u must be less than attachmentCount %u of "
"for this render pass.",
fragment_density_map_info->fragmentDensityMapAttachment.attachment, pCreateInfo->attachmentCount);
} else {
if (!(fragment_density_map_info->fragmentDensityMapAttachment.layout ==
VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT ||
fragment_density_map_info->fragmentDensityMapAttachment.layout == VK_IMAGE_LAYOUT_GENERAL)) {
skip |= LogError(device, "VUID-VkRenderPassFragmentDensityMapCreateInfoEXT-fragmentDensityMapAttachment-02549",
"vkCreateRenderPass(): Layout of fragmentDensityMapAttachment %u' must be equal to "
"VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT, or VK_IMAGE_LAYOUT_GENERAL.",
fragment_density_map_info->fragmentDensityMapAttachment.attachment);
}
if (!(pCreateInfo->pAttachments[fragment_density_map_info->fragmentDensityMapAttachment.attachment].loadOp ==
VK_ATTACHMENT_LOAD_OP_LOAD ||
pCreateInfo->pAttachments[fragment_density_map_info->fragmentDensityMapAttachment.attachment].loadOp ==
VK_ATTACHMENT_LOAD_OP_DONT_CARE)) {
skip |= LogError(
device, "VUID-VkRenderPassFragmentDensityMapCreateInfoEXT-fragmentDensityMapAttachment-02550",
"vkCreateRenderPass(): FragmentDensityMapAttachment %u' must reference an attachment with a loadOp "
"equal to VK_ATTACHMENT_LOAD_OP_LOAD or VK_ATTACHMENT_LOAD_OP_DONT_CARE.",
fragment_density_map_info->fragmentDensityMapAttachment.attachment);
}
if (pCreateInfo->pAttachments[fragment_density_map_info->fragmentDensityMapAttachment.attachment].storeOp !=
VK_ATTACHMENT_STORE_OP_DONT_CARE) {
skip |= LogError(
device, "VUID-VkRenderPassFragmentDensityMapCreateInfoEXT-fragmentDensityMapAttachment-02551",
"vkCreateRenderPass(): FragmentDensityMapAttachment %u' must reference an attachment with a storeOp "
"equal to VK_ATTACHMENT_STORE_OP_DONT_CARE.",
fragment_density_map_info->fragmentDensityMapAttachment.attachment);
}
}
}
}
if (!skip) {
safe_VkRenderPassCreateInfo2 create_info_2;
ConvertVkRenderPassCreateInfoToV2KHR(*pCreateInfo, &create_info_2);
skip |= ValidateCreateRenderPass(device, RENDER_PASS_VERSION_1, create_info_2.ptr(), "vkCreateRenderPass()");
}
return skip;
}
bool CoreChecks::ValidateDepthStencilResolve(const VkPhysicalDeviceVulkan12Properties &core12_props,
const VkRenderPassCreateInfo2 *pCreateInfo, const char *function_name) const {
bool skip = false;
// If the pNext list of VkSubpassDescription2 includes a VkSubpassDescriptionDepthStencilResolve structure,
// then that structure describes depth/stencil resolve operations for the subpass.
for (uint32_t i = 0; i < pCreateInfo->subpassCount; i++) {
const VkSubpassDescription2 &subpass = pCreateInfo->pSubpasses[i];
const auto *resolve = LvlFindInChain<VkSubpassDescriptionDepthStencilResolve>(subpass.pNext);
if (resolve == nullptr) {
continue;
}
const bool resolve_attachment_not_unused = (resolve->pDepthStencilResolveAttachment != nullptr &&
resolve->pDepthStencilResolveAttachment->attachment != VK_ATTACHMENT_UNUSED);
const bool valid_resolve_attachment_index =
(resolve_attachment_not_unused && resolve->pDepthStencilResolveAttachment->attachment < pCreateInfo->attachmentCount);
const bool ds_attachment_not_unused =
(subpass.pDepthStencilAttachment != nullptr && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED);
const bool valid_ds_attachment_index =
(ds_attachment_not_unused && subpass.pDepthStencilAttachment->attachment < pCreateInfo->attachmentCount);
if (resolve_attachment_not_unused && subpass.pDepthStencilAttachment != nullptr &&
subpass.pDepthStencilAttachment->attachment == VK_ATTACHMENT_UNUSED) {
skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03177",
"%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve "
"structure with resolve attachment %u, but pDepthStencilAttachment=VK_ATTACHMENT_UNUSED.",
function_name, i, resolve->pDepthStencilResolveAttachment->attachment);
}
if (resolve_attachment_not_unused && resolve->depthResolveMode == VK_RESOLVE_MODE_NONE &&
resolve->stencilResolveMode == VK_RESOLVE_MODE_NONE) {
skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03178",
"%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve "
"structure with resolve attachment %u, but both depth and stencil resolve modes are "
"VK_RESOLVE_MODE_NONE.",
function_name, i, resolve->pDepthStencilResolveAttachment->attachment);
}
if (resolve_attachment_not_unused && valid_ds_attachment_index &&
pCreateInfo->pAttachments[subpass.pDepthStencilAttachment->attachment].samples == VK_SAMPLE_COUNT_1_BIT) {
skip |= LogError(
device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03179",
"%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve "
"structure with resolve attachment %u. However pDepthStencilAttachment has sample count=VK_SAMPLE_COUNT_1_BIT.",
function_name, i, resolve->pDepthStencilResolveAttachment->attachment);
}
if (valid_resolve_attachment_index &&
pCreateInfo->pAttachments[resolve->pDepthStencilResolveAttachment->attachment].samples != VK_SAMPLE_COUNT_1_BIT) {
skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03180",
"%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve "
"structure with resolve attachment %u which has sample count=VK_SAMPLE_COUNT_1_BIT.",
function_name, i, resolve->pDepthStencilResolveAttachment->attachment);
}
if (resolve_attachment_not_unused) {
VkFormat resolve_format = pCreateInfo->pAttachments[resolve->pDepthStencilResolveAttachment->attachment].format;
const VkFormatFeatureFlags potential_format_features = GetPotentialFormatFeatures(resolve_format);
if ((potential_format_features & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT) == 0) {
skip |= LogError(
device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-02651",
"%s: Subpass %" PRIu32
" includes a VkSubpassDescriptionDepthStencilResolve "
"structure with resolve attachment %" PRIu32
" with a format (%s) whose features do not contain VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT.",
function_name, i, resolve->pDepthStencilResolveAttachment->attachment, string_VkFormat(resolve_format));
}
}
VkFormat depth_stencil_attachment_format =
(valid_ds_attachment_index ? pCreateInfo->pAttachments[subpass.pDepthStencilAttachment->attachment].format
: VK_FORMAT_UNDEFINED);
VkFormat depth_stencil_resolve_attachment_format =
(valid_resolve_attachment_index ? pCreateInfo->pAttachments[resolve->pDepthStencilResolveAttachment->attachment].format
: VK_FORMAT_UNDEFINED);
if (valid_ds_attachment_index && valid_resolve_attachment_index) {
const auto resolve_depth_size = FormatDepthSize(depth_stencil_resolve_attachment_format);
const auto resolve_stencil_size = FormatStencilSize(depth_stencil_resolve_attachment_format);
if (resolve_depth_size > 0 && ((FormatDepthSize(depth_stencil_attachment_format) != resolve_depth_size) ||
(FormatDepthNumericalType(depth_stencil_attachment_format) !=
FormatDepthNumericalType(depth_stencil_resolve_attachment_format)))) {
skip |= LogError(
device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03181",
"%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve "
"structure with resolve attachment %u which has a depth component (size %u). The depth component "
"of pDepthStencilAttachment must have the same number of bits (currently %u) and the same numerical type.",
function_name, i, resolve->pDepthStencilResolveAttachment->attachment, resolve_depth_size,
FormatDepthSize(depth_stencil_attachment_format));
}
if (resolve_stencil_size > 0 && ((FormatStencilSize(depth_stencil_attachment_format) != resolve_stencil_size) ||
(FormatStencilNumericalType(depth_stencil_attachment_format) !=
FormatStencilNumericalType(depth_stencil_resolve_attachment_format)))) {
skip |= LogError(
device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03182",
"%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve "
"structure with resolve attachment %u which has a stencil component (size %u). The stencil component "
"of pDepthStencilAttachment must have the same number of bits (currently %u) and the same numerical type.",
function_name, i, resolve->pDepthStencilResolveAttachment->attachment, resolve_stencil_size,
FormatStencilSize(depth_stencil_attachment_format));
}
}
if (!(resolve->depthResolveMode == VK_RESOLVE_MODE_NONE ||
resolve->depthResolveMode & core12_props.supportedDepthResolveModes)) {
skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-depthResolveMode-03183",
"%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve "
"structure with invalid depthResolveMode=%u.",
function_name, i, resolve->depthResolveMode);
}
if (!(resolve->stencilResolveMode == VK_RESOLVE_MODE_NONE ||
resolve->stencilResolveMode & core12_props.supportedStencilResolveModes)) {
skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-stencilResolveMode-03184",
"%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve "
"structure with invalid stencilResolveMode=%u.",
function_name, i, resolve->stencilResolveMode);
}
if (valid_resolve_attachment_index && FormatIsDepthAndStencil(depth_stencil_resolve_attachment_format) &&
core12_props.independentResolve == VK_FALSE && core12_props.independentResolveNone == VK_FALSE &&
!(resolve->depthResolveMode == resolve->stencilResolveMode)) {
skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03185",
"%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve "
"structure. The values of depthResolveMode (%u) and stencilResolveMode (%u) must be identical.",
function_name, i, resolve->depthResolveMode, resolve->stencilResolveMode);
}
if (valid_resolve_attachment_index && FormatIsDepthAndStencil(depth_stencil_resolve_attachment_format) &&
core12_props.independentResolve == VK_FALSE && core12_props.independentResolveNone == VK_TRUE &&
!(resolve->depthResolveMode == resolve->stencilResolveMode || resolve->depthResolveMode == VK_RESOLVE_MODE_NONE ||
resolve->stencilResolveMode == VK_RESOLVE_MODE_NONE)) {
skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03186",
"%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve "
"structure. The values of depthResolveMode (%u) and stencilResolveMode (%u) must be identical, or "
"one of them must be %u.",
function_name, i, resolve->depthResolveMode, resolve->stencilResolveMode, VK_RESOLVE_MODE_NONE);
}
// VK_QCOM_render_pass_shader_resolve check of depth/stencil attachmnent
if (((subpass.flags & VK_SUBPASS_DESCRIPTION_SHADER_RESOLVE_BIT_QCOM) != 0) && (resolve_attachment_not_unused)) {
skip |= LogError(device, "VUID-VkSubpassDescription-flags-03342",
"%s: Subpass %u enables shader resolve, which requires the depth/stencil resolve attachment"
" must be VK_ATTACHMENT_UNUSED, but a reference to attachment %u was found instead.",
function_name, i, resolve->pDepthStencilResolveAttachment->attachment);
}
}
return skip;
}
bool CoreChecks::ValidateCreateRenderPass2(VkDevice device, const VkRenderPassCreateInfo2 *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass,
const char *function_name) const {
bool skip = false;
if (IsExtEnabled(device_extensions.vk_khr_depth_stencil_resolve)) {
skip |= ValidateDepthStencilResolve(phys_dev_props_core12, pCreateInfo, function_name);
}
skip |= ValidateFragmentShadingRateAttachments(device, pCreateInfo);
safe_VkRenderPassCreateInfo2 create_info_2(pCreateInfo);
skip |= ValidateCreateRenderPass(device, RENDER_PASS_VERSION_2, create_info_2.ptr(), function_name);
return skip;
}
bool CoreChecks::ValidateFragmentShadingRateAttachments(VkDevice device, const VkRenderPassCreateInfo2 *pCreateInfo) const {
bool skip = false;
if (enabled_features.fragment_shading_rate_features.attachmentFragmentShadingRate) {
for (uint32_t attachment_description = 0; attachment_description < pCreateInfo->attachmentCount; ++attachment_description) {
std::vector<uint32_t> used_as_fragment_shading_rate_attachment;
// Prepass to find any use as a fragment shading rate attachment structures and validate them independently
for (uint32_t subpass = 0; subpass < pCreateInfo->subpassCount; ++subpass) {
const VkFragmentShadingRateAttachmentInfoKHR *fragment_shading_rate_attachment =
LvlFindInChain<VkFragmentShadingRateAttachmentInfoKHR>(pCreateInfo->pSubpasses[subpass].pNext);
if (fragment_shading_rate_attachment && fragment_shading_rate_attachment->pFragmentShadingRateAttachment) {
const VkAttachmentReference2 &attachment_reference =
*(fragment_shading_rate_attachment->pFragmentShadingRateAttachment);
if (attachment_reference.attachment == attachment_description) {
used_as_fragment_shading_rate_attachment.push_back(subpass);
}
if (((pCreateInfo->flags & VK_RENDER_PASS_CREATE_TRANSFORM_BIT_QCOM) != 0) &&
(attachment_reference.attachment != VK_ATTACHMENT_UNUSED)) {
skip |= LogError(device, "VUID-VkRenderPassCreateInfo2-flags-04521",
"vkCreateRenderPass2: Render pass includes VK_RENDER_PASS_CREATE_TRANSFORM_BIT_QCOM but "
"a fragment shading rate attachment is specified in subpass %u.",
subpass);
}
if (attachment_reference.attachment != VK_ATTACHMENT_UNUSED) {
const VkFormatFeatureFlags potential_format_features =
GetPotentialFormatFeatures(pCreateInfo->pAttachments[attachment_reference.attachment].format);
if (!(potential_format_features & VK_FORMAT_FEATURE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR)) {
skip |=
LogError(device, "VUID-VkRenderPassCreateInfo2-pAttachments-04586",
"vkCreateRenderPass2: Attachment description %u is used in subpass %u as a fragment "
"shading rate attachment, but specifies format %s, which does not support "
"VK_FORMAT_FEATURE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR.",
attachment_reference.attachment, subpass,
string_VkFormat(pCreateInfo->pAttachments[attachment_reference.attachment].format));
}
if (attachment_reference.layout != VK_IMAGE_LAYOUT_GENERAL &&
attachment_reference.layout != VK_IMAGE_LAYOUT_FRAGMENT_SHADING_RATE_ATTACHMENT_OPTIMAL_KHR) {
skip |= LogError(
device, "VUID-VkFragmentShadingRateAttachmentInfoKHR-pFragmentShadingRateAttachment-04524",
"vkCreateRenderPass2: Fragment shading rate attachment in subpass %u specifies a layout of %s.",
subpass, string_VkImageLayout(attachment_reference.layout));
}
if (!IsPowerOfTwo(fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width)) {
skip |=
LogError(device, "VUID-VkFragmentShadingRateAttachmentInfoKHR-pFragmentShadingRateAttachment-04525",
"vkCreateRenderPass2: Fragment shading rate attachment in subpass %u has a "
"non-power-of-two texel width of %u.",
subpass, fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width);
}
if (fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width <
phys_dev_ext_props.fragment_shading_rate_props.minFragmentShadingRateAttachmentTexelSize.width) {
skip |= LogError(
device, "VUID-VkFragmentShadingRateAttachmentInfoKHR-pFragmentShadingRateAttachment-04526",
"vkCreateRenderPass2: Fragment shading rate attachment in subpass %u has a texel width of %u which "
"is lower than the advertised minimum width %u.",
subpass, fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width,
phys_dev_ext_props.fragment_shading_rate_props.minFragmentShadingRateAttachmentTexelSize.width);
}
if (fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width >
phys_dev_ext_props.fragment_shading_rate_props.maxFragmentShadingRateAttachmentTexelSize.width) {
skip |= LogError(
device, "VUID-VkFragmentShadingRateAttachmentInfoKHR-pFragmentShadingRateAttachment-04527",
"vkCreateRenderPass2: Fragment shading rate attachment in subpass %u has a texel width of %u which "
"is higher than the advertised maximum width %u.",
subpass, fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width,
phys_dev_ext_props.fragment_shading_rate_props.maxFragmentShadingRateAttachmentTexelSize.width);
}
if (!IsPowerOfTwo(fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height)) {
skip |=
LogError(device, "VUID-VkFragmentShadingRateAttachmentInfoKHR-pFragmentShadingRateAttachment-04528",
"vkCreateRenderPass2: Fragment shading rate attachment in subpass %u has a "
"non-power-of-two texel height of %u.",
subpass, fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height);
}
if (fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height <
phys_dev_ext_props.fragment_shading_rate_props.minFragmentShadingRateAttachmentTexelSize.height) {
skip |= LogError(
device, "VUID-VkFragmentShadingRateAttachmentInfoKHR-pFragmentShadingRateAttachment-04529",
"vkCreateRenderPass2: Fragment shading rate attachment in subpass %u has a texel height of %u "
"which is lower than the advertised minimum height %u.",
subpass, fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height,
phys_dev_ext_props.fragment_shading_rate_props.minFragmentShadingRateAttachmentTexelSize.height);
}
if (fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height >
phys_dev_ext_props.fragment_shading_rate_props.maxFragmentShadingRateAttachmentTexelSize.height) {
skip |= LogError(
device, "VUID-VkFragmentShadingRateAttachmentInfoKHR-pFragmentShadingRateAttachment-04530",
"vkCreateRenderPass2: Fragment shading rate attachment in subpass %u has a texel height of %u "
"which is higher than the advertised maximum height %u.",
subpass, fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height,
phys_dev_ext_props.fragment_shading_rate_props.maxFragmentShadingRateAttachmentTexelSize.height);
}
uint32_t aspect_ratio = fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width /
fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height;
uint32_t inverse_aspect_ratio = fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height /
fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width;
if (aspect_ratio >
phys_dev_ext_props.fragment_shading_rate_props.maxFragmentShadingRateAttachmentTexelSizeAspectRatio) {
skip |= LogError(
device, "VUID-VkFragmentShadingRateAttachmentInfoKHR-pFragmentShadingRateAttachment-04531",
"vkCreateRenderPass2: Fragment shading rate attachment in subpass %u has a texel size of %u by %u, "
"which has an aspect ratio %u, which is higher than the advertised maximum aspect ratio %u.",
subpass, fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width,
fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height, aspect_ratio,
phys_dev_ext_props.fragment_shading_rate_props
.maxFragmentShadingRateAttachmentTexelSizeAspectRatio);
}
if (inverse_aspect_ratio >
phys_dev_ext_props.fragment_shading_rate_props.maxFragmentShadingRateAttachmentTexelSizeAspectRatio) {
skip |= LogError(
device, "VUID-VkFragmentShadingRateAttachmentInfoKHR-pFragmentShadingRateAttachment-04532",
"vkCreateRenderPass2: Fragment shading rate attachment in subpass %u has a texel size of %u by %u, "
"which has an inverse aspect ratio of %u, which is higher than the advertised maximum aspect ratio "
"%u.",
subpass, fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width,
fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height, inverse_aspect_ratio,
phys_dev_ext_props.fragment_shading_rate_props
.maxFragmentShadingRateAttachmentTexelSizeAspectRatio);
}
}
}
}
// Lambda function turning a vector of integers into a string
auto vector_to_string = [&](std::vector<uint32_t> vector) {
std::stringstream ss;
size_t size = vector.size();
for (size_t i = 0; i < used_as_fragment_shading_rate_attachment.size(); i++) {
if (size == 2 && i == 1) {
ss << " and ";
} else if (size > 2 && i == size - 2) {
ss << ", and ";
} else if (i != 0) {
ss << ", ";
}
ss << vector[i];
}
return ss.str();
};
// Search for other uses of the same attachment
if (!used_as_fragment_shading_rate_attachment.empty()) {
for (uint32_t subpass = 0; subpass < pCreateInfo->subpassCount; ++subpass) {
const VkSubpassDescription2 &subpass_info = pCreateInfo->pSubpasses[subpass];
const VkSubpassDescriptionDepthStencilResolve *depth_stencil_resolve_attachment =
LvlFindInChain<VkSubpassDescriptionDepthStencilResolve>(subpass_info.pNext);
std::string fsr_attachment_subpasses_string = vector_to_string(used_as_fragment_shading_rate_attachment);
for (uint32_t attachment = 0; attachment < subpass_info.colorAttachmentCount; ++attachment) {
if (subpass_info.pColorAttachments[attachment].attachment == attachment_description) {
skip |= LogError(
device, "VUID-VkRenderPassCreateInfo2-pAttachments-04585",
"vkCreateRenderPass2: Attachment description %u is used as a fragment shading rate attachment in "
"subpass(es) %s but also as color attachment %u in subpass %u",
attachment_description, fsr_attachment_subpasses_string.c_str(), attachment, subpass);
}
}
for (uint32_t attachment = 0; attachment < subpass_info.colorAttachmentCount; ++attachment) {
if (subpass_info.pResolveAttachments &&
subpass_info.pResolveAttachments[attachment].attachment == attachment_description) {
skip |= LogError(
device, "VUID-VkRenderPassCreateInfo2-pAttachments-04585",
"vkCreateRenderPass2: Attachment description %u is used as a fragment shading rate attachment in "
"subpass(es) %s but also as color resolve attachment %u in subpass %u",
attachment_description, fsr_attachment_subpasses_string.c_str(), attachment, subpass);
}
}
for (uint32_t attachment = 0; attachment < subpass_info.inputAttachmentCount; ++attachment) {
if (subpass_info.pInputAttachments[attachment].attachment == attachment_description) {
skip |= LogError(
device, "VUID-VkRenderPassCreateInfo2-pAttachments-04585",
"vkCreateRenderPass2: Attachment description %u is used as a fragment shading rate attachment in "
"subpass(es) %s but also as input attachment %u in subpass %u",
attachment_description, fsr_attachment_subpasses_string.c_str(), attachment, subpass);
}
}
if (subpass_info.pDepthStencilAttachment) {
if (subpass_info.pDepthStencilAttachment->attachment == attachment_description) {
skip |= LogError(
device, "VUID-VkRenderPassCreateInfo2-pAttachments-04585",
"vkCreateRenderPass2: Attachment description %u is used as a fragment shading rate attachment in "
"subpass(es) %s but also as the depth/stencil attachment in subpass %u",
attachment_description, fsr_attachment_subpasses_string.c_str(), subpass);
}
}
if (depth_stencil_resolve_attachment && depth_stencil_resolve_attachment->pDepthStencilResolveAttachment) {
if (depth_stencil_resolve_attachment->pDepthStencilResolveAttachment->attachment ==
attachment_description) {
skip |= LogError(
device, "VUID-VkRenderPassCreateInfo2-pAttachments-04585",
"vkCreateRenderPass2: Attachment description %u is used as a fragment shading rate attachment in "
"subpass(es) %s but also as the depth/stencil resolve attachment in subpass %u",
attachment_description, fsr_attachment_subpasses_string.c_str(), subpass);
}
}
}
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCreateRenderPass2KHR(VkDevice device, const VkRenderPassCreateInfo2 *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) const {
return ValidateCreateRenderPass2(device, pCreateInfo, pAllocator, pRenderPass, "vkCreateRenderPass2KHR()");
}
bool CoreChecks::PreCallValidateCreateRenderPass2(VkDevice device, const VkRenderPassCreateInfo2 *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) const {
return ValidateCreateRenderPass2(device, pCreateInfo, pAllocator, pRenderPass, "vkCreateRenderPass2()");
}
bool CoreChecks::ValidatePrimaryCommandBuffer(const CMD_BUFFER_STATE *pCB, char const *cmd_name, const char *error_code) const {
bool skip = false;
if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
skip |= LogError(pCB->commandBuffer(), error_code, "Cannot execute command %s on a secondary command buffer.", cmd_name);
}
return skip;
}
bool CoreChecks::VerifyRenderAreaBounds(const VkRenderPassBeginInfo *pRenderPassBegin, const char *func_name) const {
bool skip = false;
bool device_group = false;
uint32_t device_group_area_count = 0;
const VkDeviceGroupRenderPassBeginInfo *device_group_render_pass_begin_info =
LvlFindInChain<VkDeviceGroupRenderPassBeginInfo>(pRenderPassBegin->pNext);
if (IsExtEnabled(device_extensions.vk_khr_device_group)) {
device_group = true;
if (device_group_render_pass_begin_info) {
device_group_area_count = device_group_render_pass_begin_info->deviceRenderAreaCount;
}
}
const safe_VkFramebufferCreateInfo *framebuffer_info = &GetFramebufferState(pRenderPassBegin->framebuffer)->createInfo;
if (device_group && device_group_area_count > 0) {
for (uint32_t i = 0; i < device_group_render_pass_begin_info->deviceRenderAreaCount; ++i) {
const auto &deviceRenderArea = device_group_render_pass_begin_info->pDeviceRenderAreas[i];
if (deviceRenderArea.offset.x < 0) {
skip |= LogError(pRenderPassBegin->renderPass, "VUID-VkRenderPassBeginInfo-pNext-02854",
"%s: Cannot execute a render pass with renderArea not within the bound of the framebuffer, "
"VkDeviceGroupRenderPassBeginInfo::pDeviceRenderAreas[%" PRIu32 "].offset.x is negative (%" PRIi32
").",
func_name, i, deviceRenderArea.offset.x);
}
if (deviceRenderArea.offset.y < 0) {
skip |= LogError(pRenderPassBegin->renderPass, "VUID-VkRenderPassBeginInfo-pNext-02855",
"%s: Cannot execute a render pass with renderArea not within the bound of the framebuffer, "
"VkDeviceGroupRenderPassBeginInfo::pDeviceRenderAreas[%" PRIu32 "].offset.y is negative (%" PRIi32
").",
func_name, i, deviceRenderArea.offset.y);
}
if ((deviceRenderArea.offset.x + deviceRenderArea.extent.width) > framebuffer_info->width) {
skip |= LogError(pRenderPassBegin->renderPass, "VUID-VkRenderPassBeginInfo-pNext-02856",
"%s: Cannot execute a render pass with renderArea not within the bound of the framebuffer, "
"VkDeviceGroupRenderPassBeginInfo::pDeviceRenderAreas[%" PRIu32 "] offset.x (%" PRIi32
") + extent.width (%" PRIi32 ") is greater than framebuffer width (%" PRIi32 ").",
func_name, i, deviceRenderArea.offset.x, deviceRenderArea.extent.width, framebuffer_info->width);
}
if ((deviceRenderArea.offset.y + deviceRenderArea.extent.height) > framebuffer_info->height) {
skip |= LogError(pRenderPassBegin->renderPass, "VUID-VkRenderPassBeginInfo-pNext-02857",
"%s: Cannot execute a render pass with renderArea not within the bound of the framebuffer, "
"VkDeviceGroupRenderPassBeginInfo::pDeviceRenderAreas[%" PRIu32 "] offset.y (%" PRIi32
") + extent.height (%" PRIi32 ") is greater than framebuffer height (%" PRIi32 ").",
func_name, i, deviceRenderArea.offset.y, deviceRenderArea.extent.height, framebuffer_info->height);
}
}
} else {
if (pRenderPassBegin->renderArea.offset.x < 0) {
if (device_group) {
skip |=
LogError(pRenderPassBegin->renderPass, "VUID-VkRenderPassBeginInfo-pNext-02850",
"%s: Cannot execute a render pass with renderArea not within the bound of the framebuffer and pNext "
"of VkRenderPassBeginInfo does not contain VkDeviceGroupRenderPassBeginInfo or its "
"deviceRenderAreaCount is 0, renderArea.offset.x is negative (%" PRIi32 ") .",
func_name, pRenderPassBegin->renderArea.offset.x);
} else {
skip |= LogError(pRenderPassBegin->renderPass, "VUID-VkRenderPassBeginInfo-renderArea-02846",
"%s: Cannot execute a render pass with renderArea not within the bound of the framebuffer, "
"renderArea.offset.x is negative (%" PRIi32 ") .",
func_name, pRenderPassBegin->renderArea.offset.x);
}
}
if (pRenderPassBegin->renderArea.offset.y < 0) {
if (device_group) {
skip |=
LogError(pRenderPassBegin->renderPass, "VUID-VkRenderPassBeginInfo-pNext-02851",
"%s: Cannot execute a render pass with renderArea not within the bound of the framebuffer and pNext "
"of VkRenderPassBeginInfo does not contain VkDeviceGroupRenderPassBeginInfo or its "
"deviceRenderAreaCount is 0, renderArea.offset.y is negative (%" PRIi32 ") .",
func_name, pRenderPassBegin->renderArea.offset.y);
} else {
skip |= LogError(pRenderPassBegin->renderPass, "VUID-VkRenderPassBeginInfo-renderArea-02847",
"%s: Cannot execute a render pass with renderArea not within the bound of the framebuffer, "
"renderArea.offset.y is negative (%" PRIi32 ") .",
func_name, pRenderPassBegin->renderArea.offset.y);
}
}
if ((pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > framebuffer_info->width) {
if (device_group) {
skip |=
LogError(pRenderPassBegin->renderPass, "VUID-VkRenderPassBeginInfo-pNext-02852",
"%s: Cannot execute a render pass with renderArea not within the bound of the framebuffer and pNext "
"of VkRenderPassBeginInfo does not contain VkDeviceGroupRenderPassBeginInfo or its "
"deviceRenderAreaCount is 0, renderArea.offset.x (%" PRIi32 ") + renderArea.extent.width (%" PRIi32
") is greater than framebuffer width (%" PRIi32 ").",
func_name, pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.extent.width,
framebuffer_info->width);
} else {
skip |= LogError(
pRenderPassBegin->renderPass, "VUID-VkRenderPassBeginInfo-renderArea-02848",
"%s: Cannot execute a render pass with renderArea not within the bound of the framebuffer, renderArea.offset.x "
"(%" PRIi32 ") + renderArea.extent.width (%" PRIi32 ") is greater than framebuffer width (%" PRIi32 ").",
func_name, pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.extent.width,
framebuffer_info->width);
}
}
if ((pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > framebuffer_info->height) {
if (device_group) {
skip |=
LogError(pRenderPassBegin->renderPass, "VUID-VkRenderPassBeginInfo-pNext-02853",
"%s: Cannot execute a render pass with renderArea not within the bound of the framebuffer and pNext "
"of VkRenderPassBeginInfo does not contain VkDeviceGroupRenderPassBeginInfo or its "
"deviceRenderAreaCount is 0, renderArea.offset.y (%" PRIi32 ") + renderArea.extent.height (%" PRIi32
") is greater than framebuffer height (%" PRIi32 ").",
func_name, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.height,
framebuffer_info->height);
} else {
skip |= LogError(
pRenderPassBegin->renderPass, "VUID-VkRenderPassBeginInfo-renderArea-02849",
"%s: Cannot execute a render pass with renderArea not within the bound of the framebuffer, renderArea.offset.y "
"(%" PRIi32 ") + renderArea.extent.height (%" PRIi32 ") is greater than framebuffer height (%" PRIi32 ").",
func_name, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.height,
framebuffer_info->height);
}
}
}
return skip;
}
bool CoreChecks::VerifyFramebufferAndRenderPassImageViews(const VkRenderPassBeginInfo *pRenderPassBeginInfo,
const char *func_name) const {
bool skip = false;
const VkRenderPassAttachmentBeginInfo *render_pass_attachment_begin_info =
LvlFindInChain<VkRenderPassAttachmentBeginInfo>(pRenderPassBeginInfo->pNext);
if (render_pass_attachment_begin_info && render_pass_attachment_begin_info->attachmentCount != 0) {
const safe_VkFramebufferCreateInfo *framebuffer_create_info =
&GetFramebufferState(pRenderPassBeginInfo->framebuffer)->createInfo;
const VkFramebufferAttachmentsCreateInfo *framebuffer_attachments_create_info =
LvlFindInChain<VkFramebufferAttachmentsCreateInfo>(framebuffer_create_info->pNext);
if ((framebuffer_create_info->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT) == 0) {
skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03207",
"%s: Image views specified at render pass begin, but framebuffer not created with "
"VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT",
func_name);
} else if (framebuffer_attachments_create_info) {
if (framebuffer_attachments_create_info->attachmentImageInfoCount !=
render_pass_attachment_begin_info->attachmentCount) {
skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03208",
"%s: %u image views specified at render pass begin, but framebuffer "
"created expecting %u attachments",
func_name, render_pass_attachment_begin_info->attachmentCount,
framebuffer_attachments_create_info->attachmentImageInfoCount);
} else {
const safe_VkRenderPassCreateInfo2 *render_pass_create_info =
&GetRenderPassState(pRenderPassBeginInfo->renderPass)->createInfo;
for (uint32_t i = 0; i < render_pass_attachment_begin_info->attachmentCount; ++i) {
const auto image_view_state = GetImageViewState(render_pass_attachment_begin_info->pAttachments[i]);
const VkImageViewCreateInfo *image_view_create_info = &image_view_state->create_info;
const auto &subresource_range = image_view_state->normalized_subresource_range;
const VkFramebufferAttachmentImageInfo *framebuffer_attachment_image_info =
&framebuffer_attachments_create_info->pAttachmentImageInfos[i];
const VkImageCreateInfo *image_create_info = &GetImageState(image_view_create_info->image)->createInfo;
if (framebuffer_attachment_image_info->flags != image_create_info->flags) {
skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03209",
"%s: Image view #%u created from an image with flags set as 0x%X, "
"but image info #%u used to create the framebuffer had flags set as 0x%X",
func_name, i, image_create_info->flags, i, framebuffer_attachment_image_info->flags);
}
if (framebuffer_attachment_image_info->usage != image_view_state->inherited_usage) {
// Give clearer message if this error is due to the "inherited" part or not
if (image_create_info->usage == image_view_state->inherited_usage) {
skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-04627",
"%s: Image view #%u created from an image with usage set as 0x%X, "
"but image info #%u used to create the framebuffer had usage set as 0x%X",
func_name, i, image_create_info->usage, i, framebuffer_attachment_image_info->usage);
} else {
skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-04627",
"%s: Image view #%u created from an image with usage set as 0x%X but using "
"VkImageViewUsageCreateInfo the inherited usage is the subset 0x%X "
"and the image info #%u used to create the framebuffer had usage set as 0x%X",
func_name, i, image_create_info->usage, image_view_state->inherited_usage, i,
framebuffer_attachment_image_info->usage);
}
}
uint32_t view_width = image_create_info->extent.width >> subresource_range.baseMipLevel;
if (framebuffer_attachment_image_info->width != view_width) {
skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03211",
"%s: Image view #%u created from an image subresource with width set as %u, "
"but image info #%u used to create the framebuffer had width set as %u",
func_name, i, view_width, i, framebuffer_attachment_image_info->width);
}
uint32_t view_height = image_create_info->extent.height >> subresource_range.baseMipLevel;
if (framebuffer_attachment_image_info->height != view_height) {
skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03212",
"%s: Image view #%u created from an image subresource with height set as %u, "
"but image info #%u used to create the framebuffer had height set as %u",
func_name, i, view_height, i, framebuffer_attachment_image_info->height);
}
if (framebuffer_attachment_image_info->layerCount != subresource_range.layerCount) {
skip |=
LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03213",
"%s: Image view #%u created with a subresource range with a layerCount of %u, "
"but image info #%u used to create the framebuffer had layerCount set as %u",
func_name, i, subresource_range.layerCount, i, framebuffer_attachment_image_info->layerCount);
}
const VkImageFormatListCreateInfo *image_format_list_create_info =
LvlFindInChain<VkImageFormatListCreateInfo>(image_create_info->pNext);
if (image_format_list_create_info) {
if (image_format_list_create_info->viewFormatCount != framebuffer_attachment_image_info->viewFormatCount) {
skip |= LogError(
pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03214",
"VkRenderPassBeginInfo: Image view #%u created with an image with a viewFormatCount of %u, "
"but image info #%u used to create the framebuffer had viewFormatCount set as %u",
i, image_format_list_create_info->viewFormatCount, i,
framebuffer_attachment_image_info->viewFormatCount);
}
for (uint32_t j = 0; j < image_format_list_create_info->viewFormatCount; ++j) {
bool format_found = false;
for (uint32_t k = 0; k < framebuffer_attachment_image_info->viewFormatCount; ++k) {
if (image_format_list_create_info->pViewFormats[j] ==
framebuffer_attachment_image_info->pViewFormats[k]) {
format_found = true;
}
}
if (!format_found) {
skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03215",
"VkRenderPassBeginInfo: Image view #%u created with an image including the format "
"%s in its view format list, "
"but image info #%u used to create the framebuffer does not include this format",
i, string_VkFormat(image_format_list_create_info->pViewFormats[j]), i);
}
}
}
if (render_pass_create_info->pAttachments[i].format != image_view_create_info->format) {
skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03216",
"%s: Image view #%u created with a format of %s, "
"but render pass attachment description #%u created with a format of %s",
func_name, i, string_VkFormat(image_view_create_info->format), i,
string_VkFormat(render_pass_create_info->pAttachments[i].format));
}
if (render_pass_create_info->pAttachments[i].samples != image_create_info->samples) {
skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03217",
"%s: Image view #%u created with an image with %s samples, "
"but render pass attachment description #%u created with %s samples",
func_name, i, string_VkSampleCountFlagBits(image_create_info->samples), i,
string_VkSampleCountFlagBits(render_pass_create_info->pAttachments[i].samples));
}
if (subresource_range.levelCount != 1) {
skip |= LogError(render_pass_attachment_begin_info->pAttachments[i],
"VUID-VkRenderPassAttachmentBeginInfo-pAttachments-03218",
"%s: Image view #%u created with multiple (%u) mip levels.", func_name, i,
subresource_range.levelCount);
}
if (IsIdentitySwizzle(image_view_create_info->components) == false) {
skip |= LogError(
render_pass_attachment_begin_info->pAttachments[i],
"VUID-VkRenderPassAttachmentBeginInfo-pAttachments-03219",
"%s: Image view #%u created with non-identity swizzle. All "
"framebuffer attachments must have been created with the identity swizzle. Here are the actual "
"swizzle values:\n"
"r swizzle = %s\n"
"g swizzle = %s\n"
"b swizzle = %s\n"
"a swizzle = %s\n",
func_name, i, string_VkComponentSwizzle(image_view_create_info->components.r),
string_VkComponentSwizzle(image_view_create_info->components.g),
string_VkComponentSwizzle(image_view_create_info->components.b),
string_VkComponentSwizzle(image_view_create_info->components.a));
}
if (image_view_create_info->viewType == VK_IMAGE_VIEW_TYPE_3D) {
skip |= LogError(render_pass_attachment_begin_info->pAttachments[i],
"VUID-VkRenderPassAttachmentBeginInfo-pAttachments-04114",
"%s: Image view #%u created with type VK_IMAGE_VIEW_TYPE_3D", func_name, i);
}
}
}
}
}
return skip;
}
// If this is a stencil format, make sure the stencil[Load|Store]Op flag is checked, while if it is a depth/color attachment the
// [load|store]Op flag must be checked
// TODO: The memory valid flag in DEVICE_MEMORY_STATE should probably be split to track the validity of stencil memory separately.
template <typename T>
static bool FormatSpecificLoadAndStoreOpSettings(VkFormat format, T color_depth_op, T stencil_op, T op) {
if (color_depth_op != op && stencil_op != op) {
return false;
}
bool check_color_depth_load_op = !FormatIsStencilOnly(format);
bool check_stencil_load_op = FormatIsDepthAndStencil(format) || !check_color_depth_load_op;
return ((check_color_depth_load_op && (color_depth_op == op)) || (check_stencil_load_op && (stencil_op == op)));
}
bool CoreChecks::ValidateCmdBeginRenderPass(VkCommandBuffer commandBuffer, RenderPassCreateVersion rp_version,
const VkRenderPassBeginInfo *pRenderPassBegin, CMD_TYPE cmd_type) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
auto render_pass_state = pRenderPassBegin ? GetRenderPassState(pRenderPassBegin->renderPass) : nullptr;
auto framebuffer = pRenderPassBegin ? GetFramebufferState(pRenderPassBegin->framebuffer) : nullptr;
bool skip = false;
const char *function_name = CommandTypeString(cmd_type);
if (render_pass_state) {
uint32_t clear_op_size = 0; // Make sure pClearValues is at least as large as last LOAD_OP_CLEAR
// Handle extension struct from EXT_sample_locations
const VkRenderPassSampleLocationsBeginInfoEXT *sample_locations_begin_info =
LvlFindInChain<VkRenderPassSampleLocationsBeginInfoEXT>(pRenderPassBegin->pNext);
if (sample_locations_begin_info) {
for (uint32_t i = 0; i < sample_locations_begin_info->attachmentInitialSampleLocationsCount; ++i) {
const VkAttachmentSampleLocationsEXT &sample_location =
sample_locations_begin_info->pAttachmentInitialSampleLocations[i];
skip |= ValidateSampleLocationsInfo(&sample_location.sampleLocationsInfo, function_name);
if (sample_location.attachmentIndex >= render_pass_state->createInfo.attachmentCount) {
skip |=
LogError(device, "VUID-VkAttachmentSampleLocationsEXT-attachmentIndex-01531",
"%s: Attachment index %u specified by attachment sample locations %u is greater than the "
"attachment count of %u for the render pass being begun.",
function_name, sample_location.attachmentIndex, i, render_pass_state->createInfo.attachmentCount);
}
}
for (uint32_t i = 0; i < sample_locations_begin_info->postSubpassSampleLocationsCount; ++i) {
const VkSubpassSampleLocationsEXT &sample_location = sample_locations_begin_info->pPostSubpassSampleLocations[i];
skip |= ValidateSampleLocationsInfo(&sample_location.sampleLocationsInfo, function_name);
if (sample_location.subpassIndex >= render_pass_state->createInfo.subpassCount) {
skip |=
LogError(device, "VUID-VkSubpassSampleLocationsEXT-subpassIndex-01532",
"%s: Subpass index %u specified by subpass sample locations %u is greater than the subpass count "
"of %u for the render pass being begun.",
function_name, sample_location.subpassIndex, i, render_pass_state->createInfo.subpassCount);
}
}
}
for (uint32_t i = 0; i < render_pass_state->createInfo.attachmentCount; ++i) {
auto attachment = &render_pass_state->createInfo.pAttachments[i];
if (FormatSpecificLoadAndStoreOpSettings(attachment->format, attachment->loadOp, attachment->stencilLoadOp,
VK_ATTACHMENT_LOAD_OP_CLEAR)) {
clear_op_size = static_cast<uint32_t>(i) + 1;
if (FormatHasDepth(attachment->format) && pRenderPassBegin->pClearValues) {
skip |= ValidateClearDepthStencilValue(commandBuffer, pRenderPassBegin->pClearValues[i].depthStencil,
function_name);
}
}
}
if (clear_op_size > pRenderPassBegin->clearValueCount) {
skip |= LogError(render_pass_state->renderPass(), "VUID-VkRenderPassBeginInfo-clearValueCount-00902",
"In %s the VkRenderPassBeginInfo struct has a clearValueCount of %u but there "
"must be at least %u entries in pClearValues array to account for the highest index attachment in "
"%s that uses VK_ATTACHMENT_LOAD_OP_CLEAR is %u. Note that the pClearValues array is indexed by "
"attachment number so even if some pClearValues entries between 0 and %u correspond to attachments "
"that aren't cleared they will be ignored.",
function_name, pRenderPassBegin->clearValueCount, clear_op_size,
report_data->FormatHandle(render_pass_state->renderPass()).c_str(), clear_op_size, clear_op_size - 1);
}
skip |= VerifyFramebufferAndRenderPassImageViews(pRenderPassBegin, function_name);
skip |= VerifyRenderAreaBounds(pRenderPassBegin, function_name);
skip |= VerifyFramebufferAndRenderPassLayouts(rp_version, cb_state, pRenderPassBegin,
GetFramebufferState(pRenderPassBegin->framebuffer));
if (framebuffer->rp_state->renderPass() != render_pass_state->renderPass()) {
skip |= ValidateRenderPassCompatibility("render pass", render_pass_state, "framebuffer", framebuffer->rp_state.get(),
function_name, "VUID-VkRenderPassBeginInfo-renderPass-00904");
}
skip |= ValidateDependencies(framebuffer, render_pass_state);
skip |= ValidateCmd(cb_state, cmd_type);
}
auto chained_device_group_struct = LvlFindInChain<VkDeviceGroupRenderPassBeginInfo>(pRenderPassBegin->pNext);
if (chained_device_group_struct) {
skip |= ValidateDeviceMaskToPhysicalDeviceCount(chained_device_group_struct->deviceMask, pRenderPassBegin->renderPass,
"VUID-VkDeviceGroupRenderPassBeginInfo-deviceMask-00905");
skip |= ValidateDeviceMaskToZero(chained_device_group_struct->deviceMask, pRenderPassBegin->renderPass,
"VUID-VkDeviceGroupRenderPassBeginInfo-deviceMask-00906");
skip |= ValidateDeviceMaskToCommandBuffer(cb_state, chained_device_group_struct->deviceMask, pRenderPassBegin->renderPass,
"VUID-VkDeviceGroupRenderPassBeginInfo-deviceMask-00907");
if (chained_device_group_struct->deviceRenderAreaCount != 0 &&
chained_device_group_struct->deviceRenderAreaCount != physical_device_count) {
skip |= LogError(pRenderPassBegin->renderPass, "VUID-VkDeviceGroupRenderPassBeginInfo-deviceRenderAreaCount-00908",
"%s: deviceRenderAreaCount[%" PRIu32 "] is invaild. Physical device count is %" PRIu32 ".",
function_name, chained_device_group_struct->deviceRenderAreaCount, physical_device_count);
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
VkSubpassContents contents) const {
bool skip = ValidateCmdBeginRenderPass(commandBuffer, RENDER_PASS_VERSION_1, pRenderPassBegin, CMD_BEGINRENDERPASS);
return skip;
}
bool CoreChecks::PreCallValidateCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
const VkSubpassBeginInfo *pSubpassBeginInfo) const {
bool skip = ValidateCmdBeginRenderPass(commandBuffer, RENDER_PASS_VERSION_2, pRenderPassBegin, CMD_BEGINRENDERPASS2KHR);
return skip;
}
bool CoreChecks::PreCallValidateCmdBeginRenderPass2(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
const VkSubpassBeginInfo *pSubpassBeginInfo) const {
bool skip = ValidateCmdBeginRenderPass(commandBuffer, RENDER_PASS_VERSION_2, pRenderPassBegin, CMD_BEGINRENDERPASS2);
return skip;
}
void CoreChecks::RecordCmdBeginRenderPassLayouts(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
const VkSubpassContents contents) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
auto render_pass_state = pRenderPassBegin ? GetRenderPassState(pRenderPassBegin->renderPass) : nullptr;
auto framebuffer = pRenderPassBegin ? GetFramebufferState(pRenderPassBegin->framebuffer) : nullptr;
if (render_pass_state) {
// transition attachments to the correct layouts for beginning of renderPass and first subpass
TransitionBeginRenderPassLayouts(cb_state, render_pass_state, framebuffer);
}
}
void CoreChecks::PreCallRecordCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
VkSubpassContents contents) {
StateTracker::PreCallRecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
RecordCmdBeginRenderPassLayouts(commandBuffer, pRenderPassBegin, contents);
}
void CoreChecks::PreCallRecordCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
const VkSubpassBeginInfo *pSubpassBeginInfo) {
StateTracker::PreCallRecordCmdBeginRenderPass2KHR(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
RecordCmdBeginRenderPassLayouts(commandBuffer, pRenderPassBegin, pSubpassBeginInfo->contents);
}
void CoreChecks::PreCallRecordCmdBeginRenderPass2(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
const VkSubpassBeginInfo *pSubpassBeginInfo) {
StateTracker::PreCallRecordCmdBeginRenderPass2(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
RecordCmdBeginRenderPassLayouts(commandBuffer, pRenderPassBegin, pSubpassBeginInfo->contents);
}
bool CoreChecks::ValidateCmdNextSubpass(RenderPassCreateVersion rp_version, VkCommandBuffer commandBuffer,
CMD_TYPE cmd_type) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
const char *vuid;
const char *function_name = CommandTypeString(cmd_type);
skip |= ValidateCmd(cb_state, cmd_type);
auto subpass_count = cb_state->activeRenderPass->createInfo.subpassCount;
if (cb_state->activeSubpass == subpass_count - 1) {
vuid = use_rp2 ? "VUID-vkCmdNextSubpass2-None-03102" : "VUID-vkCmdNextSubpass-None-00909";
skip |= LogError(commandBuffer, vuid, "%s: Attempted to advance beyond final subpass.", function_name);
}
return skip;
}
bool CoreChecks::PreCallValidateCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) const {
return ValidateCmdNextSubpass(RENDER_PASS_VERSION_1, commandBuffer, CMD_NEXTSUBPASS);
}
bool CoreChecks::PreCallValidateCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
const VkSubpassEndInfo *pSubpassEndInfo) const {
return ValidateCmdNextSubpass(RENDER_PASS_VERSION_2, commandBuffer, CMD_NEXTSUBPASS2KHR);
}
bool CoreChecks::PreCallValidateCmdNextSubpass2(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
const VkSubpassEndInfo *pSubpassEndInfo) const {
return ValidateCmdNextSubpass(RENDER_PASS_VERSION_2, commandBuffer, CMD_NEXTSUBPASS2);
}
void CoreChecks::RecordCmdNextSubpassLayouts(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
TransitionSubpassLayouts(cb_state, cb_state->activeRenderPass.get(), cb_state->activeSubpass,
Get<FRAMEBUFFER_STATE>(cb_state->activeRenderPassBeginInfo.framebuffer));
}
void CoreChecks::PostCallRecordCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
StateTracker::PostCallRecordCmdNextSubpass(commandBuffer, contents);
RecordCmdNextSubpassLayouts(commandBuffer, contents);
}
void CoreChecks::PostCallRecordCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
const VkSubpassEndInfo *pSubpassEndInfo) {
StateTracker::PostCallRecordCmdNextSubpass2KHR(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
RecordCmdNextSubpassLayouts(commandBuffer, pSubpassBeginInfo->contents);
}
void CoreChecks::PostCallRecordCmdNextSubpass2(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
const VkSubpassEndInfo *pSubpassEndInfo) {
StateTracker::PostCallRecordCmdNextSubpass2(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
RecordCmdNextSubpassLayouts(commandBuffer, pSubpassBeginInfo->contents);
}
bool CoreChecks::ValidateCmdEndRenderPass(RenderPassCreateVersion rp_version, VkCommandBuffer commandBuffer,
CMD_TYPE cmd_type) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
const char *vuid;
const char *function_name = CommandTypeString(cmd_type);
RENDER_PASS_STATE *rp_state = cb_state->activeRenderPass.get();
if (rp_state) {
if (cb_state->activeSubpass != rp_state->createInfo.subpassCount - 1) {
vuid = use_rp2 ? "VUID-vkCmdEndRenderPass2-None-03103" : "VUID-vkCmdEndRenderPass-None-00910";
skip |= LogError(commandBuffer, vuid, "%s: Called before reaching final subpass.", function_name);
}
}
skip |= ValidateCmd(cb_state, cmd_type);
return skip;
}
bool CoreChecks::PreCallValidateCmdEndRenderPass(VkCommandBuffer commandBuffer) const {
bool skip = ValidateCmdEndRenderPass(RENDER_PASS_VERSION_1, commandBuffer, CMD_ENDRENDERPASS);
return skip;
}
bool CoreChecks::PreCallValidateCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) const {
bool skip = ValidateCmdEndRenderPass(RENDER_PASS_VERSION_2, commandBuffer, CMD_ENDRENDERPASS2KHR);
return skip;
}
bool CoreChecks::PreCallValidateCmdEndRenderPass2(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) const {
bool skip = ValidateCmdEndRenderPass(RENDER_PASS_VERSION_2, commandBuffer, CMD_ENDRENDERPASS2);
return skip;
}
void CoreChecks::RecordCmdEndRenderPassLayouts(VkCommandBuffer commandBuffer) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
TransitionFinalSubpassLayouts(cb_state, cb_state->activeRenderPassBeginInfo.ptr(), cb_state->activeFramebuffer.get());
}
void CoreChecks::PostCallRecordCmdEndRenderPass(VkCommandBuffer commandBuffer) {
// Record the end at the CoreLevel to ensure StateTracker cleanup doesn't step on anything we need.
RecordCmdEndRenderPassLayouts(commandBuffer);
StateTracker::PostCallRecordCmdEndRenderPass(commandBuffer);
}
void CoreChecks::PostCallRecordCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) {
// Record the end at the CoreLevel to ensure StateTracker cleanup doesn't step on anything we need.
RecordCmdEndRenderPassLayouts(commandBuffer);
StateTracker::PostCallRecordCmdEndRenderPass2KHR(commandBuffer, pSubpassEndInfo);
}
void CoreChecks::PostCallRecordCmdEndRenderPass2(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) {
RecordCmdEndRenderPassLayouts(commandBuffer);
StateTracker::PostCallRecordCmdEndRenderPass2(commandBuffer, pSubpassEndInfo);
}
bool CoreChecks::ValidateFramebuffer(VkCommandBuffer primaryBuffer, const CMD_BUFFER_STATE *pCB, VkCommandBuffer secondaryBuffer,
const CMD_BUFFER_STATE *pSubCB, const char *caller) const {
bool skip = false;
if (!pSubCB->beginInfo.pInheritanceInfo) {
return skip;
}
VkFramebuffer primary_fb = pCB->activeFramebuffer ? pCB->activeFramebuffer->framebuffer() : VK_NULL_HANDLE;
VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer;
if (secondary_fb != VK_NULL_HANDLE) {
if (primary_fb != secondary_fb) {
LogObjectList objlist(primaryBuffer);
objlist.add(secondaryBuffer);
objlist.add(secondary_fb);
objlist.add(primary_fb);
skip |= LogError(objlist, "VUID-vkCmdExecuteCommands-pCommandBuffers-00099",
"vkCmdExecuteCommands() called w/ invalid secondary %s which has a %s"
" that is not the same as the primary command buffer's current active %s.",
report_data->FormatHandle(secondaryBuffer).c_str(), report_data->FormatHandle(secondary_fb).c_str(),
report_data->FormatHandle(primary_fb).c_str());
}
auto fb = GetFramebufferState(secondary_fb);
if (!fb) {
LogObjectList objlist(primaryBuffer);
objlist.add(secondaryBuffer);
objlist.add(secondary_fb);
skip |= LogError(objlist, kVUID_Core_DrawState_InvalidSecondaryCommandBuffer,
"vkCmdExecuteCommands() called w/ invalid %s which has invalid %s.",
report_data->FormatHandle(secondaryBuffer).c_str(), report_data->FormatHandle(secondary_fb).c_str());
return skip;
}
}
return skip;
}
bool CoreChecks::ValidateSecondaryCommandBufferState(const CMD_BUFFER_STATE *pCB, const CMD_BUFFER_STATE *pSubCB) const {
bool skip = false;
layer_data::unordered_set<int> active_types;
if (!disabled[query_validation]) {
for (const auto &query_object : pCB->activeQueries) {
auto query_pool_state = GetQueryPoolState(query_object.pool);
if (query_pool_state) {
if (query_pool_state->createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
pSubCB->beginInfo.pInheritanceInfo) {
VkQueryPipelineStatisticFlags cmd_buf_statistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
if ((cmd_buf_statistics & query_pool_state->createInfo.pipelineStatistics) != cmd_buf_statistics) {
LogObjectList objlist(pCB->commandBuffer());
objlist.add(query_object.pool);
skip |= LogError(
objlist, "VUID-vkCmdExecuteCommands-commandBuffer-00104",
"vkCmdExecuteCommands() called w/ invalid %s which has invalid active %s"
". Pipeline statistics is being queried so the command buffer must have all bits set on the queryPool.",
report_data->FormatHandle(pCB->commandBuffer()).c_str(),
report_data->FormatHandle(query_object.pool).c_str());
}
}
active_types.insert(query_pool_state->createInfo.queryType);
}
}
for (const auto &query_object : pSubCB->startedQueries) {
auto query_pool_state = GetQueryPoolState(query_object.pool);
if (query_pool_state && active_types.count(query_pool_state->createInfo.queryType)) {
LogObjectList objlist(pCB->commandBuffer());
objlist.add(query_object.pool);
skip |= LogError(objlist, kVUID_Core_DrawState_InvalidSecondaryCommandBuffer,
"vkCmdExecuteCommands() called w/ invalid %s which has invalid active %s"
" of type %d but a query of that type has been started on secondary %s.",
report_data->FormatHandle(pCB->commandBuffer()).c_str(),
report_data->FormatHandle(query_object.pool).c_str(), query_pool_state->createInfo.queryType,
report_data->FormatHandle(pSubCB->commandBuffer()).c_str());
}
}
}
auto primary_pool = pCB->command_pool.get();
auto secondary_pool = pSubCB->command_pool.get();
if (primary_pool && secondary_pool && (primary_pool->queueFamilyIndex != secondary_pool->queueFamilyIndex)) {
LogObjectList objlist(pSubCB->commandBuffer());
objlist.add(pCB->commandBuffer());
skip |= LogError(objlist, "VUID-vkCmdExecuteCommands-pCommandBuffers-00094",
"vkCmdExecuteCommands(): Primary %s created in queue family %d has secondary "
"%s created in queue family %d.",
report_data->FormatHandle(pCB->commandBuffer()).c_str(), primary_pool->queueFamilyIndex,
report_data->FormatHandle(pSubCB->commandBuffer()).c_str(), secondary_pool->queueFamilyIndex);
}
return skip;
}
// Object that simulates the inherited viewport/scissor state as the device executes the called secondary command buffers.
// Visit the calling primary command buffer first, then the called secondaries in order.
// Contact David Zhao Akeley <dakeley@nvidia.com> for clarifications and bug fixes.
class CoreChecks::ViewportScissorInheritanceTracker {
static_assert(4 == sizeof(CMD_BUFFER_STATE::viewportMask), "Adjust max_viewports to match viewportMask bit width");
static constexpr uint32_t kMaxViewports = 32, kNotTrashed = uint32_t(-2), kTrashedByPrimary = uint32_t(-1);
const ValidationObject &validation_;
const CMD_BUFFER_STATE *primary_state_ = nullptr;
uint32_t viewport_mask_;
uint32_t scissor_mask_;
uint32_t viewport_trashed_by_[kMaxViewports]; // filled in VisitPrimary.
uint32_t scissor_trashed_by_[kMaxViewports];
VkViewport viewports_to_inherit_[kMaxViewports];
uint32_t viewport_count_to_inherit_; // 0 if viewport count (EXT state) has never been defined (but not trashed)
uint32_t scissor_count_to_inherit_; // 0 if scissor count (EXT state) has never been defined (but not trashed)
uint32_t viewport_count_trashed_by_;
uint32_t scissor_count_trashed_by_;
public:
ViewportScissorInheritanceTracker(const ValidationObject &validation) : validation_(validation) {}
bool VisitPrimary(const CMD_BUFFER_STATE *primary_state) {
assert(!primary_state_);
primary_state_ = primary_state;
viewport_mask_ = primary_state->viewportMask | primary_state->viewportWithCountMask;
scissor_mask_ = primary_state->scissorMask | primary_state->scissorWithCountMask;
for (uint32_t n = 0; n < kMaxViewports; ++n) {
uint32_t bit = uint32_t(1) << n;
viewport_trashed_by_[n] = primary_state->trashedViewportMask & bit ? kTrashedByPrimary : kNotTrashed;
scissor_trashed_by_[n] = primary_state->trashedScissorMask & bit ? kTrashedByPrimary : kNotTrashed;
if (viewport_mask_ & bit) {
viewports_to_inherit_[n] = primary_state->dynamicViewports[n];
}
}
viewport_count_to_inherit_ = primary_state->viewportWithCountCount;
scissor_count_to_inherit_ = primary_state->scissorWithCountCount;
viewport_count_trashed_by_ = primary_state->trashedViewportCount ? kTrashedByPrimary : kNotTrashed;
scissor_count_trashed_by_ = primary_state->trashedScissorCount ? kTrashedByPrimary : kNotTrashed;
return false;
}
bool VisitSecondary(uint32_t cmd_buffer_idx, const CMD_BUFFER_STATE *secondary_state) {
bool skip = false;
if (secondary_state->inheritedViewportDepths.empty()) {
skip |= VisitSecondaryNoInheritance(cmd_buffer_idx, secondary_state);
} else {
skip |= VisitSecondaryInheritance(cmd_buffer_idx, secondary_state);
}
// See note at end of VisitSecondaryNoInheritance.
if (secondary_state->trashedViewportCount) {
viewport_count_trashed_by_ = cmd_buffer_idx;
}
if (secondary_state->trashedScissorCount) {
scissor_count_trashed_by_ = cmd_buffer_idx;
}
return skip;
}
private:
// Track state inheritance as specified by VK_NV_inherited_scissor_viewport, including states
// overwritten to undefined value by bound pipelines with non-dynamic state.
bool VisitSecondaryNoInheritance(uint32_t cmd_buffer_idx, const CMD_BUFFER_STATE *secondary_state) {
viewport_mask_ |= secondary_state->viewportMask | secondary_state->viewportWithCountMask;
scissor_mask_ |= secondary_state->scissorMask | secondary_state->scissorWithCountMask;
for (uint32_t n = 0; n < kMaxViewports; ++n) {
uint32_t bit = uint32_t(1) << n;
if ((secondary_state->viewportMask | secondary_state->viewportWithCountMask) & bit) {
viewports_to_inherit_[n] = secondary_state->dynamicViewports[n];
viewport_trashed_by_[n] = kNotTrashed;
}
if ((secondary_state->scissorMask | secondary_state->scissorWithCountMask) & bit) {
scissor_trashed_by_[n] = kNotTrashed;
}
if (secondary_state->viewportWithCountCount != 0) {
viewport_count_to_inherit_ = secondary_state->viewportWithCountCount;
viewport_count_trashed_by_ = kNotTrashed;
}
if (secondary_state->scissorWithCountCount != 0) {
scissor_count_to_inherit_ = secondary_state->scissorWithCountCount;
scissor_count_trashed_by_ = kNotTrashed;
}
// Order of above vs below matters here.
if (secondary_state->trashedViewportMask & bit) {
viewport_trashed_by_[n] = cmd_buffer_idx;
}
if (secondary_state->trashedScissorMask & bit) {
scissor_trashed_by_[n] = cmd_buffer_idx;
}
// Check trashing dynamic viewport/scissor count in VisitSecondary (at end) as even secondary command buffers enabling
// viewport/scissor state inheritance may define this state statically in bound graphics pipelines.
}
return false;
}
// Validate needed inherited state as specified by VK_NV_inherited_scissor_viewport.
bool VisitSecondaryInheritance(uint32_t cmd_buffer_idx, const CMD_BUFFER_STATE *secondary_state) {
bool skip = false;
uint32_t check_viewport_count = 0, check_scissor_count = 0;
// Common code for reporting missing inherited state (for a myriad of reasons).
auto check_missing_inherit = [&](uint32_t was_ever_defined, uint32_t trashed_by, VkDynamicState state, uint32_t index = 0,
uint32_t static_use_count = 0, const VkViewport *inherited_viewport = nullptr,
const VkViewport *expected_viewport_depth = nullptr) {
if (was_ever_defined && trashed_by == kNotTrashed) {
if (state != VK_DYNAMIC_STATE_VIEWPORT) return false;
assert(inherited_viewport != nullptr && expected_viewport_depth != nullptr);
if (inherited_viewport->minDepth != expected_viewport_depth->minDepth ||
inherited_viewport->maxDepth != expected_viewport_depth->maxDepth) {
return validation_.LogError(
primary_state_->commandBuffer(), "VUID-vkCmdDraw-commandBuffer-02701",
"vkCmdExecuteCommands(): Draw commands in pCommandBuffers[%u] (%s) consume inherited viewport %u %s"
"but this state was not inherited as its depth range [%f, %f] does not match "
"pViewportDepths[%u] = [%f, %f]",
unsigned(cmd_buffer_idx), validation_.report_data->FormatHandle(secondary_state->commandBuffer()).c_str(),
unsigned(index), index >= static_use_count ? "(with count) " : "", inherited_viewport->minDepth,
inherited_viewport->maxDepth, unsigned(cmd_buffer_idx), expected_viewport_depth->minDepth,
expected_viewport_depth->maxDepth);
// akeley98 note: This VUID is not ideal; however, there isn't a more relevant VUID as
// it isn't illegal in itself to have mismatched inherited viewport depths.
// The error only occurs upon attempting to consume the viewport.
} else {
return false;
}
}
const char *state_name;
bool format_index = false;
switch (state) {
case VK_DYNAMIC_STATE_SCISSOR:
state_name = "scissor";
format_index = true;
break;
case VK_DYNAMIC_STATE_VIEWPORT:
state_name = "viewport";
format_index = true;
break;
case VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT:
state_name = "dynamic viewport count";
break;
case VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT:
state_name = "dynamic scissor count";
break;
default:
assert(0);
state_name = "<unknown state, report bug>";
break;
}
std::stringstream ss;
ss << "vkCmdExecuteCommands(): Draw commands in pCommandBuffers[" << cmd_buffer_idx << "] ("
<< validation_.report_data->FormatHandle(secondary_state->commandBuffer()).c_str() << ") consume inherited "
<< state_name << " ";
if (format_index) {
if (index >= static_use_count) {
ss << "(with count) ";
}
ss << index << " ";
}
ss << "but this state ";
if (!was_ever_defined) {
ss << "was never defined.";
} else if (trashed_by == kTrashedByPrimary) {
ss << "was left undefined after vkCmdExecuteCommands or vkCmdBindPipeline (with non-dynamic state) in "
"the calling primary command buffer.";
} else {
ss << "was left undefined after vkCmdBindPipeline (with non-dynamic state) in pCommandBuffers[" << trashed_by
<< "].";
}
return validation_.LogError(primary_state_->commandBuffer(), "VUID-vkCmdDraw-commandBuffer-02701", "%s", ss.str().c_str());
};
// Check if secondary command buffer uses viewport/scissor-with-count state, and validate this state if so.
if (secondary_state->usedDynamicViewportCount) {
if (viewport_count_to_inherit_ == 0 || viewport_count_trashed_by_ != kNotTrashed) {
skip |= check_missing_inherit(viewport_count_to_inherit_, viewport_count_trashed_by_,
VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT);
} else {
check_viewport_count = viewport_count_to_inherit_;
}
}
if (secondary_state->usedDynamicScissorCount) {
if (scissor_count_to_inherit_ == 0 || scissor_count_trashed_by_ != kNotTrashed) {
skip |= check_missing_inherit(scissor_count_to_inherit_, scissor_count_trashed_by_,
VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT);
} else {
check_scissor_count = scissor_count_to_inherit_;
}
}
// Check the maximum of (viewports used by pipelines with static viewport count, "" dynamic viewport count)
// but limit to length of inheritedViewportDepths array and uint32_t bit width (validation layer limit).
check_viewport_count = std::min(std::min(kMaxViewports, uint32_t(secondary_state->inheritedViewportDepths.size())),
std::max(check_viewport_count, secondary_state->usedViewportScissorCount));
check_scissor_count = std::min(kMaxViewports, std::max(check_scissor_count, secondary_state->usedViewportScissorCount));
if (secondary_state->usedDynamicViewportCount &&
viewport_count_to_inherit_ > secondary_state->inheritedViewportDepths.size()) {
skip |= validation_.LogError(
primary_state_->commandBuffer(), "VUID-vkCmdDraw-commandBuffer-02701",
"vkCmdExecuteCommands(): "
"Draw commands in pCommandBuffers[%u] (%s) consume inherited dynamic viewport with count state "
"but the dynamic viewport count (%u) exceeds the inheritance limit (viewportDepthCount=%u).",
unsigned(cmd_buffer_idx), validation_.report_data->FormatHandle(secondary_state->commandBuffer()).c_str(),
unsigned(viewport_count_to_inherit_), unsigned(secondary_state->inheritedViewportDepths.size()));
}
for (uint32_t n = 0; n < check_viewport_count; ++n) {
skip |= check_missing_inherit(viewport_mask_ & uint32_t(1) << n, viewport_trashed_by_[n], VK_DYNAMIC_STATE_VIEWPORT, n,
secondary_state->usedViewportScissorCount, &viewports_to_inherit_[n],
&secondary_state->inheritedViewportDepths[n]);
}
for (uint32_t n = 0; n < check_scissor_count; ++n) {
skip |= check_missing_inherit(scissor_mask_ & uint32_t(1) << n, scissor_trashed_by_[n], VK_DYNAMIC_STATE_SCISSOR, n,
secondary_state->usedViewportScissorCount);
}
return skip;
}
};
constexpr uint32_t CoreChecks::ViewportScissorInheritanceTracker::kMaxViewports;
constexpr uint32_t CoreChecks::ViewportScissorInheritanceTracker::kNotTrashed;
constexpr uint32_t CoreChecks::ViewportScissorInheritanceTracker::kTrashedByPrimary;
bool CoreChecks::PreCallValidateCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount,
const VkCommandBuffer *pCommandBuffers) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
const CMD_BUFFER_STATE *sub_cb_state = NULL;
layer_data::unordered_set<const CMD_BUFFER_STATE *> linked_command_buffers;
ViewportScissorInheritanceTracker viewport_scissor_inheritance{*this};
if (enabled_features.inherited_viewport_scissor_features.inheritedViewportScissor2D)
{
skip |= viewport_scissor_inheritance.VisitPrimary(cb_state);
}
bool active_occlusion_query = false;
for (const auto& active_query : cb_state->activeQueries) {
const auto query_pool_state = Get<QUERY_POOL_STATE>(active_query.pool);
if (query_pool_state->createInfo.queryType == VK_QUERY_TYPE_OCCLUSION) {
active_occlusion_query = true;
break;
}
}
for (uint32_t i = 0; i < commandBuffersCount; i++) {
sub_cb_state = GetCBState(pCommandBuffers[i]);
assert(sub_cb_state);
if (enabled_features.inherited_viewport_scissor_features.inheritedViewportScissor2D)
{
skip |= viewport_scissor_inheritance.VisitSecondary(i, sub_cb_state);
}
if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == sub_cb_state->createInfo.level) {
skip |= LogError(pCommandBuffers[i], "VUID-vkCmdExecuteCommands-pCommandBuffers-00088",
"vkCmdExecuteCommands() called w/ Primary %s in element %u of pCommandBuffers array. All "
"cmd buffers in pCommandBuffers array must be secondary.",
report_data->FormatHandle(pCommandBuffers[i]).c_str(), i);
} else if (VK_COMMAND_BUFFER_LEVEL_SECONDARY == sub_cb_state->createInfo.level) {
if (sub_cb_state->beginInfo.pInheritanceInfo != nullptr) {
const auto secondary_rp_state = GetRenderPassState(sub_cb_state->beginInfo.pInheritanceInfo->renderPass);
if (cb_state->activeRenderPass &&
!(sub_cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
LogObjectList objlist(pCommandBuffers[i]);
objlist.add(cb_state->activeRenderPass->renderPass());
skip |= LogError(objlist, "VUID-vkCmdExecuteCommands-pCommandBuffers-00096",
"vkCmdExecuteCommands(): Secondary %s is executed within a %s "
"instance scope, but the Secondary Command Buffer does not have the "
"VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set in VkCommandBufferBeginInfo::flags when "
"the vkBeginCommandBuffer() was called.",
report_data->FormatHandle(pCommandBuffers[i]).c_str(),
report_data->FormatHandle(cb_state->activeRenderPass->renderPass()).c_str());
} else if (!cb_state->activeRenderPass &&
(sub_cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
skip |= LogError(pCommandBuffers[i], "VUID-vkCmdExecuteCommands-pCommandBuffers-00100",
"vkCmdExecuteCommands(): Secondary %s is executed outside a render pass "
"instance scope, but the Secondary Command Buffer does have the "
"VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set in VkCommandBufferBeginInfo::flags when "
"the vkBeginCommandBuffer() was called.",
report_data->FormatHandle(pCommandBuffers[i]).c_str());
} else if (cb_state->activeRenderPass &&
(sub_cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
// Make sure render pass is compatible with parent command buffer pass if has continue
if (cb_state->activeRenderPass->renderPass() != secondary_rp_state->renderPass()) {
skip |= ValidateRenderPassCompatibility(
"primary command buffer", cb_state->activeRenderPass.get(), "secondary command buffer",
secondary_rp_state, "vkCmdExecuteCommands()", "VUID-vkCmdExecuteCommands-pInheritanceInfo-00098");
}
// If framebuffer for secondary CB is not NULL, then it must match active FB from primaryCB
skip |=
ValidateFramebuffer(commandBuffer, cb_state, pCommandBuffers[i], sub_cb_state, "vkCmdExecuteCommands()");
if (!sub_cb_state->cmd_execute_commands_functions.empty()) {
// Inherit primary's activeFramebuffer and while running validate functions
for (auto &function : sub_cb_state->cmd_execute_commands_functions) {
skip |= function(cb_state, cb_state->activeFramebuffer.get());
}
}
}
}
}
// TODO(mlentine): Move more logic into this method
skip |= ValidateSecondaryCommandBufferState(cb_state, sub_cb_state);
skip |= ValidateCommandBufferState(sub_cb_state, "vkCmdExecuteCommands()", 0,
"VUID-vkCmdExecuteCommands-pCommandBuffers-00089");
if (!(sub_cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
if (sub_cb_state->InUse()) {
skip |= LogError(
cb_state->commandBuffer(), "VUID-vkCmdExecuteCommands-pCommandBuffers-00091",
"vkCmdExecuteCommands(): Cannot execute pending %s without VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.",
report_data->FormatHandle(sub_cb_state->commandBuffer()).c_str());
}
// We use an const_cast, because one cannot query a container keyed on a non-const pointer using a const pointer
if (cb_state->linkedCommandBuffers.count(const_cast<CMD_BUFFER_STATE *>(sub_cb_state))) {
LogObjectList objlist(cb_state->commandBuffer());
objlist.add(sub_cb_state->commandBuffer());
skip |= LogError(objlist, "VUID-vkCmdExecuteCommands-pCommandBuffers-00092",
"vkCmdExecuteCommands(): Cannot execute %s without VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
"set if previously executed in %s",
report_data->FormatHandle(sub_cb_state->commandBuffer()).c_str(),
report_data->FormatHandle(cb_state->commandBuffer()).c_str());
}
const auto insert_pair = linked_command_buffers.insert(sub_cb_state);
if (!insert_pair.second) {
skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdExecuteCommands-pCommandBuffers-00093",
"vkCmdExecuteCommands(): Cannot duplicate %s in pCommandBuffers without "
"VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.",
report_data->FormatHandle(cb_state->commandBuffer()).c_str());
}
if (cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
// Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous
LogObjectList objlist(pCommandBuffers[i]);
objlist.add(cb_state->commandBuffer());
skip |= LogWarning(objlist, kVUID_Core_DrawState_InvalidCommandBufferSimultaneousUse,
"vkCmdExecuteCommands(): Secondary %s does not have "
"VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary "
"%s to be treated as if it does not have "
"VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set, even though it does.",
report_data->FormatHandle(pCommandBuffers[i]).c_str(),
report_data->FormatHandle(cb_state->commandBuffer()).c_str());
}
}
if (!cb_state->activeQueries.empty() && !enabled_features.core.inheritedQueries) {
skip |= LogError(pCommandBuffers[i], "VUID-vkCmdExecuteCommands-commandBuffer-00101",
"vkCmdExecuteCommands(): Secondary %s cannot be submitted with a query in flight and "
"inherited queries not supported on this device.",
report_data->FormatHandle(pCommandBuffers[i]).c_str());
}
// Validate initial layout uses vs. the primary cmd buffer state
// Novel Valid usage: "UNASSIGNED-vkCmdExecuteCommands-commandBuffer-00001"
// initial layout usage of secondary command buffers resources must match parent command buffer
const auto *const_cb_state = static_cast<const CMD_BUFFER_STATE *>(cb_state);
for (const auto &sub_layout_map_entry : sub_cb_state->image_layout_map) {
const auto image = sub_layout_map_entry.first;
const auto *image_state = GetImageState(image);
if (!image_state) continue; // Can't set layouts of a dead image
const auto *cb_subres_map = const_cb_state->GetImageSubresourceLayoutMap(image);
// Const getter can be null in which case we have nothing to check against for this image...
if (!cb_subres_map) continue;
const auto *sub_cb_subres_map = &sub_layout_map_entry.second;
// Validate the initial_uses, that they match the current state of the primary cb, or absent a current state,
// that the match any initial_layout.
for (const auto &subres_layout : *sub_cb_subres_map) {
const auto &sub_layout = subres_layout.initial_layout;
const auto &subresource = subres_layout.subresource;
if (VK_IMAGE_LAYOUT_UNDEFINED == sub_layout) continue; // secondary doesn't care about current or initial
// Look up the layout to compared to the intial layout of the sub command buffer (current else initial)
const auto *cb_layouts = cb_subres_map->GetSubresourceLayouts(subresource);
auto cb_layout = cb_layouts ? cb_layouts->current_layout : kInvalidLayout;
const char *layout_type = "current";
if (cb_layout == kInvalidLayout) {
cb_layout = cb_layouts ? cb_layouts->initial_layout : kInvalidLayout;
layout_type = "initial";
}
if ((cb_layout != kInvalidLayout) && (cb_layout != sub_layout)) {
skip |= LogError(pCommandBuffers[i], "UNASSIGNED-vkCmdExecuteCommands-commandBuffer-00001",
"%s: Executed secondary command buffer using %s (subresource: aspectMask 0x%X array layer %u, "
"mip level %u) which expects layout %s--instead, image %s layout is %s.",
"vkCmdExecuteCommands():", report_data->FormatHandle(image).c_str(), subresource.aspectMask,
subresource.arrayLayer, subresource.mipLevel, string_VkImageLayout(sub_layout), layout_type,
string_VkImageLayout(cb_layout));
}
}
}
// All commands buffers involved must be protected or unprotected
if ((cb_state->unprotected == false) && (sub_cb_state->unprotected == true)) {
LogObjectList objlist(cb_state->commandBuffer());
objlist.add(sub_cb_state->commandBuffer());
skip |= LogError(
objlist, "VUID-vkCmdExecuteCommands-commandBuffer-01820",
"vkCmdExecuteCommands(): command buffer %s is protected while secondary command buffer %s is a unprotected",
report_data->FormatHandle(cb_state->commandBuffer()).c_str(),
report_data->FormatHandle(sub_cb_state->commandBuffer()).c_str());
} else if ((cb_state->unprotected == true) && (sub_cb_state->unprotected == false)) {
LogObjectList objlist(cb_state->commandBuffer());
objlist.add(sub_cb_state->commandBuffer());
skip |= LogError(
objlist, "VUID-vkCmdExecuteCommands-commandBuffer-01821",
"vkCmdExecuteCommands(): command buffer %s is unprotected while secondary command buffer %s is a protected",
report_data->FormatHandle(cb_state->commandBuffer()).c_str(),
report_data->FormatHandle(sub_cb_state->commandBuffer()).c_str());
}
if (active_occlusion_query && sub_cb_state->inheritanceInfo.occlusionQueryEnable != VK_TRUE) {
skip |= LogError(pCommandBuffers[i], "VUID-vkCmdExecuteCommands-commandBuffer-00102",
"vkCmdExecuteCommands(): command buffer %s has an active occlusion query, but secondary command "
"buffer %s was recorded with VkCommandBufferInheritanceInfo::occlusionQueryEnable set to VK_FALSE",
report_data->FormatHandle(cb_state->commandBuffer()).c_str(),
report_data->FormatHandle(sub_cb_state->commandBuffer()).c_str());
}
}
skip |= ValidateCmd(cb_state, CMD_EXECUTECOMMANDS);
return skip;
}
bool CoreChecks::PreCallValidateMapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size,
VkFlags flags, void **ppData) const {
bool skip = false;
const DEVICE_MEMORY_STATE *mem_info = GetDevMemState(mem);
if (mem_info) {
if ((phys_dev_mem_props.memoryTypes[mem_info->alloc_info.memoryTypeIndex].propertyFlags &
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
skip = LogError(mem, "VUID-vkMapMemory-memory-00682",
"Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: %s.",
report_data->FormatHandle(mem).c_str());
}
if (mem_info->multi_instance) {
skip = LogError(mem, "VUID-vkMapMemory-memory-00683",
"Memory (%s) must not have been allocated with multiple instances -- either by supplying a deviceMask "
"with more than one bit set, or by allocation from a heap with the MULTI_INSTANCE heap flag set.",
report_data->FormatHandle(mem).c_str());
}
skip |= ValidateMapMemRange(mem_info, offset, size);
}
return skip;
}
bool CoreChecks::PreCallValidateUnmapMemory(VkDevice device, VkDeviceMemory mem) const {
bool skip = false;
const auto mem_info = GetDevMemState(mem);
if (mem_info && !mem_info->mapped_range.size) {
// Valid Usage: memory must currently be mapped
skip |= LogError(mem, "VUID-vkUnmapMemory-memory-00689", "Unmapping Memory without memory being mapped: %s.",
report_data->FormatHandle(mem).c_str());
}
return skip;
}
bool CoreChecks::ValidateMemoryIsMapped(const char *funcName, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) const {
bool skip = false;
for (uint32_t i = 0; i < memRangeCount; ++i) {
auto mem_info = GetDevMemState(pMemRanges[i].memory);
if (mem_info) {
// Makes sure the memory is already mapped
if (mem_info->mapped_range.size == 0) {
skip = LogError(pMemRanges[i].memory, "VUID-VkMappedMemoryRange-memory-00684",
"%s: Attempting to use memory (%s) that is not currently host mapped.", funcName,
report_data->FormatHandle(pMemRanges[i].memory).c_str());
}
if (pMemRanges[i].size == VK_WHOLE_SIZE) {
if (mem_info->mapped_range.offset > pMemRanges[i].offset) {
skip |= LogError(pMemRanges[i].memory, "VUID-VkMappedMemoryRange-size-00686",
"%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER
") is less than Memory Object's offset (" PRINTF_SIZE_T_SPECIFIER ").",
funcName, static_cast<size_t>(pMemRanges[i].offset),
static_cast<size_t>(mem_info->mapped_range.offset));
}
} else {
const uint64_t data_end = (mem_info->mapped_range.size == VK_WHOLE_SIZE)
? mem_info->alloc_info.allocationSize
: (mem_info->mapped_range.offset + mem_info->mapped_range.size);
if ((mem_info->mapped_range.offset > pMemRanges[i].offset) ||
(data_end < (pMemRanges[i].offset + pMemRanges[i].size))) {
skip |= LogError(pMemRanges[i].memory, "VUID-VkMappedMemoryRange-size-00685",
"%s: Flush/Invalidate size or offset (" PRINTF_SIZE_T_SPECIFIER ", " PRINTF_SIZE_T_SPECIFIER
") exceed the Memory Object's upper-bound (" PRINTF_SIZE_T_SPECIFIER ").",
funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size),
static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(data_end));
}
}
}
}
return skip;
}
bool CoreChecks::ValidateMappedMemoryRangeDeviceLimits(const char *func_name, uint32_t mem_range_count,
const VkMappedMemoryRange *mem_ranges) const {
bool skip = false;
for (uint32_t i = 0; i < mem_range_count; ++i) {
const uint64_t atom_size = phys_dev_props.limits.nonCoherentAtomSize;
const VkDeviceSize offset = mem_ranges[i].offset;
const VkDeviceSize size = mem_ranges[i].size;
if (SafeModulo(offset, atom_size) != 0) {
skip |= LogError(mem_ranges->memory, "VUID-VkMappedMemoryRange-offset-00687",
"%s: Offset in pMemRanges[%d] is 0x%" PRIxLEAST64
", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 ").",
func_name, i, offset, atom_size);
}
auto mem_info = GetDevMemState(mem_ranges[i].memory);
if (mem_info) {
const auto allocation_size = mem_info->alloc_info.allocationSize;
if (size == VK_WHOLE_SIZE) {
const auto mapping_offset = mem_info->mapped_range.offset;
const auto mapping_size = mem_info->mapped_range.size;
const auto mapping_end = ((mapping_size == VK_WHOLE_SIZE) ? allocation_size : mapping_offset + mapping_size);
if (SafeModulo(mapping_end, atom_size) != 0 && mapping_end != allocation_size) {
skip |= LogError(mem_ranges->memory, "VUID-VkMappedMemoryRange-size-01389",
"%s: Size in pMemRanges[%d] is VK_WHOLE_SIZE and the mapping end (0x%" PRIxLEAST64
" = 0x%" PRIxLEAST64 " + 0x%" PRIxLEAST64
") not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64
") and not equal to the end of the memory object (0x%" PRIxLEAST64 ").",
func_name, i, mapping_end, mapping_offset, mapping_size, atom_size, allocation_size);
}
} else {
const auto range_end = size + offset;
if (range_end != allocation_size && SafeModulo(size, atom_size) != 0) {
skip |= LogError(mem_ranges->memory, "VUID-VkMappedMemoryRange-size-01390",
"%s: Size in pMemRanges[%d] is 0x%" PRIxLEAST64
", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64
") and offset + size (0x%" PRIxLEAST64 " + 0x%" PRIxLEAST64 " = 0x%" PRIxLEAST64
") not equal to the memory size (0x%" PRIxLEAST64 ").",
func_name, i, size, atom_size, offset, size, range_end, allocation_size);
}
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateFlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
const VkMappedMemoryRange *pMemRanges) const {
bool skip = false;
skip |= ValidateMappedMemoryRangeDeviceLimits("vkFlushMappedMemoryRanges", memRangeCount, pMemRanges);
skip |= ValidateMemoryIsMapped("vkFlushMappedMemoryRanges", memRangeCount, pMemRanges);
return skip;
}
bool CoreChecks::PreCallValidateInvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
const VkMappedMemoryRange *pMemRanges) const {
bool skip = false;
skip |= ValidateMappedMemoryRangeDeviceLimits("vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges);
skip |= ValidateMemoryIsMapped("vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges);
return skip;
}
bool CoreChecks::PreCallValidateGetDeviceMemoryCommitment(VkDevice device, VkDeviceMemory mem, VkDeviceSize *pCommittedMem) const {
bool skip = false;
const auto mem_info = GetDevMemState(mem);
if (mem_info) {
if ((phys_dev_mem_props.memoryTypes[mem_info->alloc_info.memoryTypeIndex].propertyFlags &
VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) == 0) {
skip = LogError(mem, "VUID-vkGetDeviceMemoryCommitment-memory-00690",
"vkGetDeviceMemoryCommitment(): Querying commitment for memory without "
"VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT set: %s.",
report_data->FormatHandle(mem).c_str());
}
}
return skip;
}
bool CoreChecks::ValidateBindImageMemory(uint32_t bindInfoCount, const VkBindImageMemoryInfo *pBindInfos,
const char *api_name) const {
bool skip = false;
bool bind_image_mem_2 = strcmp(api_name, "vkBindImageMemory()") != 0;
char error_prefix[128];
strcpy(error_prefix, api_name);
// Track all image sub resources if they are bound for bind_image_mem_2
// uint32_t[3] is which index in pBindInfos for max 3 planes
// Non disjoint images act as a single plane
layer_data::unordered_map<VkImage, std::array<uint32_t, 3>> resources_bound;
for (uint32_t i = 0; i < bindInfoCount; i++) {
if (bind_image_mem_2 == true) {
sprintf(error_prefix, "%s pBindInfos[%u]", api_name, i);
}
const VkBindImageMemoryInfo &bind_info = pBindInfos[i];
const IMAGE_STATE *image_state = GetImageState(bind_info.image);
if (image_state) {
// Track objects tied to memory
skip |=
ValidateSetMemBinding(bind_info.memory, image_state->Handle(), error_prefix);
const auto plane_info = LvlFindInChain<VkBindImagePlaneMemoryInfo>(bind_info.pNext);
const auto mem_info = GetDevMemState(bind_info.memory);
// Need extra check for disjoint flag incase called without bindImage2 and don't want false positive errors
// no 'else' case as if that happens another VUID is already being triggered for it being invalid
if ((plane_info == nullptr) && (image_state->disjoint == false)) {
// Check non-disjoint images VkMemoryRequirements
// All validation using the image_state->requirements for external AHB is check in android only section
if (image_state->IsExternalAHB() == false) {
const VkMemoryRequirements &mem_req = image_state->requirements[0];
// Validate memory requirements alignment
if (SafeModulo(bind_info.memoryOffset, mem_req.alignment) != 0) {
const char *validation_error;
if (bind_image_mem_2 == false) {
validation_error = "VUID-vkBindImageMemory-memoryOffset-01048";
} else if (IsExtEnabled(device_extensions.vk_khr_sampler_ycbcr_conversion)) {
validation_error = "VUID-VkBindImageMemoryInfo-pNext-01616";
} else {
validation_error = "VUID-VkBindImageMemoryInfo-memoryOffset-01613";
}
skip |=
LogError(bind_info.image, validation_error,
"%s: memoryOffset is 0x%" PRIxLEAST64
" but must be an integer multiple of the VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
", returned from a call to vkGetImageMemoryRequirements with image.",
error_prefix, bind_info.memoryOffset, mem_req.alignment);
}
if (mem_info) {
safe_VkMemoryAllocateInfo alloc_info = mem_info->alloc_info;
// Validate memory requirements size
if (mem_req.size > alloc_info.allocationSize - bind_info.memoryOffset) {
const char *validation_error;
if (bind_image_mem_2 == false) {
validation_error = "VUID-vkBindImageMemory-size-01049";
} else if (IsExtEnabled(device_extensions.vk_khr_sampler_ycbcr_conversion)) {
validation_error = "VUID-VkBindImageMemoryInfo-pNext-01617";
} else {
validation_error = "VUID-VkBindImageMemoryInfo-memory-01614";
}
skip |= LogError(bind_info.image, validation_error,
"%s: memory size minus memoryOffset is 0x%" PRIxLEAST64
" but must be at least as large as VkMemoryRequirements::size value 0x%" PRIxLEAST64
", returned from a call to vkGetImageMemoryRequirements with image.",
error_prefix, alloc_info.allocationSize - bind_info.memoryOffset, mem_req.size);
}
// Validate memory type used
{
const char *validation_error;
if (bind_image_mem_2 == false) {
validation_error = "VUID-vkBindImageMemory-memory-01047";
} else if (IsExtEnabled(device_extensions.vk_khr_sampler_ycbcr_conversion)) {
validation_error = "VUID-VkBindImageMemoryInfo-pNext-01615";
} else {
validation_error = "VUID-VkBindImageMemoryInfo-memory-01612";
}
skip |= ValidateMemoryTypes(mem_info, mem_req.memoryTypeBits, error_prefix, validation_error);
}
}
}
if (bind_image_mem_2 == true) {
// since its a non-disjoint image, finding VkImage in map is a duplicate
auto it = resources_bound.find(image_state->image());
if (it == resources_bound.end()) {
std::array<uint32_t, 3> bound_index = {i, UINT32_MAX, UINT32_MAX};
resources_bound.emplace(image_state->image(), bound_index);
} else {
skip |= LogError(
bind_info.image, "VUID-vkBindImageMemory2-pBindInfos-04006",
"%s: The same non-disjoint image resource is being bound twice at pBindInfos[%d] and pBindInfos[%d]",
error_prefix, it->second[0], i);
}
}
} else if ((plane_info != nullptr) && (image_state->disjoint == true)) {
// Check disjoint images VkMemoryRequirements for given plane
int plane = 0;
// All validation using the image_state->plane*_requirements for external AHB is check in android only section
if (image_state->IsExternalAHB() == false) {
const VkImageAspectFlagBits aspect = plane_info->planeAspect;
switch (aspect) {
case VK_IMAGE_ASPECT_PLANE_0_BIT:
plane = 0;
break;
case VK_IMAGE_ASPECT_PLANE_1_BIT:
plane = 1;
break;
case VK_IMAGE_ASPECT_PLANE_2_BIT:
plane = 2;
break;
default:
assert(false); // parameter validation should have caught this
break;
}
const VkMemoryRequirements &disjoint_mem_req = image_state->requirements[plane];
// Validate memory requirements alignment
if (SafeModulo(bind_info.memoryOffset, disjoint_mem_req.alignment) != 0) {
skip |= LogError(
bind_info.image, "VUID-VkBindImageMemoryInfo-pNext-01620",
"%s: memoryOffset is 0x%" PRIxLEAST64
" but must be an integer multiple of the VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
", returned from a call to vkGetImageMemoryRequirements2 with disjoint image for aspect plane %s.",
error_prefix, bind_info.memoryOffset, disjoint_mem_req.alignment, string_VkImageAspectFlagBits(aspect));
}
if (mem_info) {
safe_VkMemoryAllocateInfo alloc_info = mem_info->alloc_info;
// Validate memory requirements size
if (disjoint_mem_req.size > alloc_info.allocationSize - bind_info.memoryOffset) {
skip |= LogError(
bind_info.image, "VUID-VkBindImageMemoryInfo-pNext-01621",
"%s: memory size minus memoryOffset is 0x%" PRIxLEAST64
" but must be at least as large as VkMemoryRequirements::size value 0x%" PRIxLEAST64
", returned from a call to vkGetImageMemoryRequirements with disjoint image for aspect plane %s.",
error_prefix, alloc_info.allocationSize - bind_info.memoryOffset, disjoint_mem_req.size,
string_VkImageAspectFlagBits(aspect));
}
// Validate memory type used
{
skip |= ValidateMemoryTypes(mem_info, disjoint_mem_req.memoryTypeBits, error_prefix,
"VUID-VkBindImageMemoryInfo-pNext-01619");
}
}
}
auto it = resources_bound.find(image_state->image());
if (it == resources_bound.end()) {
std::array<uint32_t, 3> bound_index = {UINT32_MAX, UINT32_MAX, UINT32_MAX};
bound_index[plane] = i;
resources_bound.emplace(image_state->image(), bound_index);
} else {
if (it->second[plane] == UINT32_MAX) {
it->second[plane] = i;
} else {
skip |= LogError(bind_info.image, "VUID-vkBindImageMemory2-pBindInfos-04006",
"%s: The same disjoint image sub-resource for plane %d is being bound twice at "
"pBindInfos[%d] and pBindInfos[%d]",
error_prefix, plane, it->second[plane], i);
}
}
}
if (mem_info) {
// Validate bound memory range information
// if memory is exported to an AHB then the mem_info->allocationSize must be zero and this check is not needed
if ((mem_info->IsExport() == false) ||
((mem_info->export_handle_type_flags & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID) ==
0)) {
skip |= ValidateInsertImageMemoryRange(bind_info.image, mem_info, bind_info.memoryOffset, error_prefix);
}
// Validate dedicated allocation
if (mem_info->IsDedicatedImage()) {
if (enabled_features.dedicated_allocation_image_aliasing_features.dedicatedAllocationImageAliasing) {
const auto current_image_state = GetImageState(bind_info.image);
if ((bind_info.memoryOffset != 0) || !current_image_state ||
!current_image_state->IsCreateInfoDedicatedAllocationImageAliasingCompatible(
mem_info->dedicated->create_info.image)) {
const char *validation_error;
if (bind_image_mem_2 == false) {
validation_error = "VUID-vkBindImageMemory-memory-02629";
} else {
validation_error = "VUID-VkBindImageMemoryInfo-memory-02629";
}
LogObjectList objlist(bind_info.image);
objlist.add(bind_info.memory);
objlist.add(mem_info->dedicated->handle);
skip |= LogError(
objlist, validation_error,
"%s: for dedicated memory allocation %s, VkMemoryDedicatedAllocateInfo:: %s must compatible "
"with %s and memoryOffset 0x%" PRIxLEAST64 " must be zero.",
error_prefix, report_data->FormatHandle(bind_info.memory).c_str(),
report_data->FormatHandle(mem_info->dedicated->handle).c_str(),
report_data->FormatHandle(bind_info.image).c_str(), bind_info.memoryOffset);
}
} else {
if ((bind_info.memoryOffset != 0) || (mem_info->dedicated->handle.Cast<VkImage>() != bind_info.image)) {
const char *validation_error;
if (bind_image_mem_2 == false) {
validation_error = "VUID-vkBindImageMemory-memory-01509";
} else {
validation_error = "VUID-VkBindImageMemoryInfo-memory-01509";
}
LogObjectList objlist(bind_info.image);
objlist.add(bind_info.memory);
objlist.add(mem_info->dedicated->handle);
skip |=
LogError(objlist, validation_error,
"%s: for dedicated memory allocation %s, VkMemoryDedicatedAllocateInfo:: %s must be equal "
"to %s and memoryOffset 0x%" PRIxLEAST64 " must be zero.",
error_prefix, report_data->FormatHandle(bind_info.memory).c_str(),
report_data->FormatHandle(mem_info->dedicated->handle).c_str(),
report_data->FormatHandle(bind_info.image).c_str(), bind_info.memoryOffset);
}
}
}
// Validate export memory handles
if ((mem_info->export_handle_type_flags != 0) &&
((mem_info->export_handle_type_flags & image_state->external_memory_handle) == 0)) {
const char *vuid =
bind_image_mem_2 ? "VUID-VkBindImageMemoryInfo-memory-02728" : "VUID-vkBindImageMemory-memory-02728";
LogObjectList objlist(bind_info.image);
objlist.add(bind_info.memory);
skip |= LogError(objlist, vuid,
"%s: The VkDeviceMemory (%s) has an external handleType of %s which does not include at least "
"one handle from VkImage (%s) handleType %s.",
error_prefix, report_data->FormatHandle(bind_info.memory).c_str(),
string_VkExternalMemoryHandleTypeFlags(mem_info->export_handle_type_flags).c_str(),
report_data->FormatHandle(bind_info.image).c_str(),
string_VkExternalMemoryHandleTypeFlags(image_state->external_memory_handle).c_str());
}
// Validate import memory handles
if (mem_info->IsImportAHB() == true) {
skip |= ValidateImageImportedHandleANDROID(api_name, image_state->external_memory_handle, bind_info.memory,
bind_info.image);
} else if (mem_info->IsImport() == true) {
if ((mem_info->import_handle_type_flags & image_state->external_memory_handle) == 0) {
const char *vuid = nullptr;
if ((bind_image_mem_2) &&
IsExtEnabled(device_extensions.vk_android_external_memory_android_hardware_buffer)) {
vuid = "VUID-VkBindImageMemoryInfo-memory-02989";
} else if ((!bind_image_mem_2) &&
IsExtEnabled(device_extensions.vk_android_external_memory_android_hardware_buffer)) {
vuid = "VUID-vkBindImageMemory-memory-02989";
} else if ((bind_image_mem_2) &&
!IsExtEnabled(device_extensions.vk_android_external_memory_android_hardware_buffer)) {
vuid = "VUID-VkBindImageMemoryInfo-memory-02729";
} else if ((!bind_image_mem_2) &&
!IsExtEnabled(device_extensions.vk_android_external_memory_android_hardware_buffer)) {
vuid = "VUID-vkBindImageMemory-memory-02729";
}
LogObjectList objlist(bind_info.image);
objlist.add(bind_info.memory);
skip |= LogError(objlist, vuid,
"%s: The VkDeviceMemory (%s) was created with an import operation with handleType of %s "
"which is not set in the VkImage (%s) VkExternalMemoryImageCreateInfo::handleType (%s)",
api_name, report_data->FormatHandle(bind_info.memory).c_str(),
string_VkExternalMemoryHandleTypeFlags(mem_info->import_handle_type_flags).c_str(),
report_data->FormatHandle(bind_info.image).c_str(),
string_VkExternalMemoryHandleTypeFlags(image_state->external_memory_handle).c_str());
}
}
// Validate mix of protected buffer and memory
if ((image_state->unprotected == false) && (mem_info->unprotected == true)) {
const char *vuid =
bind_image_mem_2 ? "VUID-VkBindImageMemoryInfo-None-01901" : "VUID-vkBindImageMemory-None-01901";
LogObjectList objlist(bind_info.image);
objlist.add(bind_info.memory);
skip |= LogError(objlist, vuid,
"%s: The VkDeviceMemory (%s) was not created with protected memory but the VkImage (%s) was "
"set to use protected memory.",
api_name, report_data->FormatHandle(bind_info.memory).c_str(),
report_data->FormatHandle(bind_info.image).c_str());
} else if ((image_state->unprotected == true) && (mem_info->unprotected == false)) {
const char *vuid =
bind_image_mem_2 ? "VUID-VkBindImageMemoryInfo-None-01902" : "VUID-vkBindImageMemory-None-01902";
LogObjectList objlist(bind_info.image);
objlist.add(bind_info.memory);
skip |= LogError(objlist, vuid,
"%s: The VkDeviceMemory (%s) was created with protected memory but the VkImage (%s) was not "
"set to use protected memory.",
api_name, report_data->FormatHandle(bind_info.memory).c_str(),
report_data->FormatHandle(bind_info.image).c_str());
}
}
const auto swapchain_info = LvlFindInChain<VkBindImageMemorySwapchainInfoKHR>(bind_info.pNext);
if (swapchain_info) {
if (bind_info.memory != VK_NULL_HANDLE) {
skip |= LogError(bind_info.image, "VUID-VkBindImageMemoryInfo-pNext-01631", "%s: %s is not VK_NULL_HANDLE.",
error_prefix, report_data->FormatHandle(bind_info.memory).c_str());
}
if (image_state->create_from_swapchain != swapchain_info->swapchain) {
LogObjectList objlist(image_state->image());
objlist.add(image_state->create_from_swapchain);
objlist.add(swapchain_info->swapchain);
skip |= LogError(
objlist, kVUID_Core_BindImageMemory_Swapchain,
"%s: %s is created by %s, but the image is bound by %s. The image should be created and bound by the same "
"swapchain",
error_prefix, report_data->FormatHandle(image_state->image()).c_str(),
report_data->FormatHandle(image_state->create_from_swapchain).c_str(),
report_data->FormatHandle(swapchain_info->swapchain).c_str());
}
const auto swapchain_state = GetSwapchainState(swapchain_info->swapchain);
if (swapchain_state && swapchain_state->images.size() <= swapchain_info->imageIndex) {
skip |= LogError(bind_info.image, "VUID-VkBindImageMemorySwapchainInfoKHR-imageIndex-01644",
"%s: imageIndex (%i) is out of bounds of %s images (size: %i)", error_prefix,
swapchain_info->imageIndex, report_data->FormatHandle(swapchain_info->swapchain).c_str(),
static_cast<int>(swapchain_state->images.size()));
}
} else {
if (image_state->create_from_swapchain) {
skip |= LogError(bind_info.image, "VUID-VkBindImageMemoryInfo-image-01630",
"%s: pNext of VkBindImageMemoryInfo doesn't include VkBindImageMemorySwapchainInfoKHR.",
error_prefix);
}
if (!mem_info) {
skip |= LogError(bind_info.image, "VUID-VkBindImageMemoryInfo-pNext-01632", "%s: %s is invalid.", error_prefix,
report_data->FormatHandle(bind_info.memory).c_str());
}
}
const auto bind_image_memory_device_group_info = LvlFindInChain<VkBindImageMemoryDeviceGroupInfo>(bind_info.pNext);
if (bind_image_memory_device_group_info && bind_image_memory_device_group_info->splitInstanceBindRegionCount != 0) {
if (!(image_state->createInfo.flags & VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT)) {
skip |= LogError(bind_info.image, "VUID-VkBindImageMemoryInfo-pNext-01627",
"%s: pNext of VkBindImageMemoryInfo contains VkBindImageMemoryDeviceGroupInfo with "
"splitInstanceBindRegionCount (%" PRIi32
") not equal to 0 and %s is not created with "
"VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT.",
error_prefix, bind_image_memory_device_group_info->splitInstanceBindRegionCount,
report_data->FormatHandle(image_state->image()).c_str());
}
uint32_t phy_dev_square = 1;
if (device_group_create_info.physicalDeviceCount > 0) {
phy_dev_square = device_group_create_info.physicalDeviceCount * device_group_create_info.physicalDeviceCount;
}
if (bind_image_memory_device_group_info->splitInstanceBindRegionCount != phy_dev_square) {
skip |= LogError(
bind_info.image, "VUID-VkBindImageMemoryDeviceGroupInfo-splitInstanceBindRegionCount-01636",
"%s: pNext of VkBindImageMemoryInfo contains VkBindImageMemoryDeviceGroupInfo with "
"splitInstanceBindRegionCount (%" PRIi32
") which is not 0 and different from the number of physical devices in the logical device squared (%" PRIu32
").",
error_prefix, bind_image_memory_device_group_info->splitInstanceBindRegionCount, phy_dev_square);
}
}
if (plane_info) {
// Checks for disjoint bit in image
if (image_state->disjoint == false) {
skip |= LogError(
bind_info.image, "VUID-VkBindImageMemoryInfo-pNext-01618",
"%s: pNext of VkBindImageMemoryInfo contains VkBindImagePlaneMemoryInfo and %s is not created with "
"VK_IMAGE_CREATE_DISJOINT_BIT.",
error_prefix, report_data->FormatHandle(image_state->image()).c_str());
}
// Make sure planeAspect is only a single, valid plane
uint32_t planes = FormatPlaneCount(image_state->createInfo.format);
VkImageAspectFlags aspect = plane_info->planeAspect;
if ((2 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT)) {
skip |= LogError(
bind_info.image, "VUID-VkBindImagePlaneMemoryInfo-planeAspect-02283",
"%s: Image %s VkBindImagePlaneMemoryInfo::planeAspect is %s but can only be VK_IMAGE_ASPECT_PLANE_0_BIT"
"or VK_IMAGE_ASPECT_PLANE_1_BIT.",
error_prefix, report_data->FormatHandle(image_state->image()).c_str(),
string_VkImageAspectFlags(aspect).c_str());
}
if ((3 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT) &&
(aspect != VK_IMAGE_ASPECT_PLANE_2_BIT)) {
skip |= LogError(
bind_info.image, "VUID-VkBindImagePlaneMemoryInfo-planeAspect-02283",
"%s: Image %s VkBindImagePlaneMemoryInfo::planeAspect is %s but can only be VK_IMAGE_ASPECT_PLANE_0_BIT"
"or VK_IMAGE_ASPECT_PLANE_1_BIT or VK_IMAGE_ASPECT_PLANE_2_BIT.",
error_prefix, report_data->FormatHandle(image_state->image()).c_str(),
string_VkImageAspectFlags(aspect).c_str());
}
}
}
const auto bind_image_memory_device_group = LvlFindInChain<VkBindImageMemoryDeviceGroupInfo>(bind_info.pNext);
if (bind_image_memory_device_group) {
if (bind_image_memory_device_group->deviceIndexCount > 0 &&
bind_image_memory_device_group->splitInstanceBindRegionCount > 0) {
skip |= LogError(bind_info.image, "VUID-VkBindImageMemoryDeviceGroupInfo-deviceIndexCount-01633",
"%s: VkBindImageMemoryDeviceGroupInfo in pNext of pBindInfos[%" PRIu32
"] has both deviceIndexCount and splitInstanceBindRegionCount greater than 0.",
error_prefix, i);
}
}
}
// Check to make sure all disjoint planes were bound
for (auto &resource : resources_bound) {
const IMAGE_STATE *image_state = GetImageState(resource.first);
if (image_state->disjoint == true) {
uint32_t total_planes = FormatPlaneCount(image_state->createInfo.format);
for (uint32_t i = 0; i < total_planes; i++) {
if (resource.second[i] == UINT32_MAX) {
skip |= LogError(resource.first, "VUID-vkBindImageMemory2-pBindInfos-02858",
"%s: Plane %u of the disjoint image was not bound. All %d planes need to bound individually "
"in separate pBindInfos in a single call.",
api_name, i, total_planes);
}
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateBindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem,
VkDeviceSize memoryOffset) const {
bool skip = false;
const IMAGE_STATE *image_state = GetImageState(image);
if (image_state) {
// Checks for no disjoint bit
if (image_state->disjoint == true) {
skip |=
LogError(image, "VUID-vkBindImageMemory-image-01608",
"%s must not have been created with the VK_IMAGE_CREATE_DISJOINT_BIT (need to use vkBindImageMemory2).",
report_data->FormatHandle(image).c_str());
}
}
auto bind_info = LvlInitStruct<VkBindImageMemoryInfo>();
bind_info.image = image;
bind_info.memory = mem;
bind_info.memoryOffset = memoryOffset;
skip |= ValidateBindImageMemory(1, &bind_info, "vkBindImageMemory()");
return skip;
}
bool CoreChecks::PreCallValidateBindImageMemory2(VkDevice device, uint32_t bindInfoCount,
const VkBindImageMemoryInfo *pBindInfos) const {
return ValidateBindImageMemory(bindInfoCount, pBindInfos, "vkBindImageMemory2()");
}
bool CoreChecks::PreCallValidateBindImageMemory2KHR(VkDevice device, uint32_t bindInfoCount,
const VkBindImageMemoryInfo *pBindInfos) const {
return ValidateBindImageMemory(bindInfoCount, pBindInfos, "vkBindImageMemory2KHR()");
}
bool CoreChecks::PreCallValidateSetEvent(VkDevice device, VkEvent event) const {
bool skip = false;
const auto event_state = GetEventState(event);
if (event_state) {
if (event_state->write_in_use) {
skip |=
LogError(event, kVUID_Core_DrawState_QueueForwardProgress,
"vkSetEvent(): %s that is already in use by a command buffer.", report_data->FormatHandle(event).c_str());
}
if (event_state->flags & VK_EVENT_CREATE_DEVICE_ONLY_BIT_KHR) {
skip |= LogError(event, "VUID-vkSetEvent-event-03941",
"vkSetEvent(): %s was created with VK_EVENT_CREATE_DEVICE_ONLY_BIT_KHR.",
report_data->FormatHandle(event).c_str());
}
}
return skip;
}
bool CoreChecks::PreCallValidateResetEvent(VkDevice device, VkEvent event) const {
bool skip = false;
const auto event_state = GetEventState(event);
if (event_state) {
if (event_state->flags & VK_EVENT_CREATE_DEVICE_ONLY_BIT_KHR) {
skip |= LogError(event, "VUID-vkResetEvent-event-03823",
"vkResetEvent(): %s was created with VK_EVENT_CREATE_DEVICE_ONLY_BIT_KHR.",
report_data->FormatHandle(event).c_str());
}
}
return skip;
}
bool CoreChecks::PreCallValidateGetEventStatus(VkDevice device, VkEvent event) const {
bool skip = false;
const auto event_state = GetEventState(event);
if (event_state) {
if (event_state->flags & VK_EVENT_CREATE_DEVICE_ONLY_BIT_KHR) {
skip |= LogError(event, "VUID-vkGetEventStatus-event-03940",
"vkGetEventStatus(): %s was created with VK_EVENT_CREATE_DEVICE_ONLY_BIT_KHR.",
report_data->FormatHandle(event).c_str());
}
}
return skip;
}
bool CoreChecks::ValidateSparseMemoryBind(const VkSparseMemoryBind *bind, const char *func_name, const char *parameter_name) const {
bool skip = false;
if (bind) {
const auto *mem_info = Get<DEVICE_MEMORY_STATE>(bind->memory);
if (mem_info) {
if (phys_dev_mem_props.memoryTypes[mem_info->alloc_info.memoryTypeIndex].propertyFlags &
VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) {
skip |=
LogError(bind->memory, "VUID-VkSparseMemoryBind-memory-01097",
"%s: %s memory type has VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT bit set.", func_name, parameter_name);
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateQueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo,
VkFence fence) const {
const auto queue_data = GetQueueState(queue);
const auto fence_state = GetFenceState(fence);
bool skip = ValidateFenceForSubmit(fence_state, "VUID-vkQueueBindSparse-fence-01114", "VUID-vkQueueBindSparse-fence-01113",
"VkQueueBindSparse()");
if (skip) {
return true;
}
const auto queue_flags = GetPhysicalDeviceState()->queue_family_properties[queue_data->queueFamilyIndex].queueFlags;
if (!(queue_flags & VK_QUEUE_SPARSE_BINDING_BIT)) {
skip |= LogError(queue, "VUID-vkQueueBindSparse-queuetype",
"vkQueueBindSparse(): a non-memory-management capable queue -- VK_QUEUE_SPARSE_BINDING_BIT not set.");
}
layer_data::unordered_set<VkSemaphore> signaled_semaphores;
layer_data::unordered_set<VkSemaphore> unsignaled_semaphores;
layer_data::unordered_set<VkSemaphore> internal_semaphores;
auto *vuid_error = IsExtEnabled(device_extensions.vk_khr_timeline_semaphore) ? "VUID-vkQueueBindSparse-pWaitSemaphores-03245"
: kVUID_Core_DrawState_QueueForwardProgress;
for (uint32_t bind_idx = 0; bind_idx < bindInfoCount; ++bind_idx) {
const VkBindSparseInfo &bind_info = pBindInfo[bind_idx];
auto timeline_semaphore_submit_info = LvlFindInChain<VkTimelineSemaphoreSubmitInfo>(pBindInfo->pNext);
std::vector<SEMAPHORE_WAIT> semaphore_waits;
std::vector<VkSemaphore> semaphore_signals;
for (uint32_t i = 0; i < bind_info.waitSemaphoreCount; ++i) {
VkSemaphore semaphore = bind_info.pWaitSemaphores[i];
const auto semaphore_state = GetSemaphoreState(semaphore);
if (semaphore_state && semaphore_state->type == VK_SEMAPHORE_TYPE_TIMELINE && !timeline_semaphore_submit_info) {
skip |= LogError(semaphore, "VUID-VkBindSparseInfo-pWaitSemaphores-03246",
"VkQueueBindSparse: pBindInfo[%u].pWaitSemaphores[%u] (%s) is a timeline semaphore, but "
"pBindInfo[%u] does not include an instance of VkTimelineSemaphoreSubmitInfo",
bind_idx, i, report_data->FormatHandle(semaphore).c_str(), bind_idx);
}
if (semaphore_state && semaphore_state->type == VK_SEMAPHORE_TYPE_TIMELINE && timeline_semaphore_submit_info &&
bind_info.waitSemaphoreCount != timeline_semaphore_submit_info->waitSemaphoreValueCount) {
skip |= LogError(semaphore, "VUID-VkBindSparseInfo-pNext-03247",
"VkQueueBindSparse: pBindInfo[%u].pWaitSemaphores[%u] (%s) is a timeline semaphore, it contains "
"an instance of VkTimelineSemaphoreSubmitInfo, but waitSemaphoreValueCount (%u) is different "
"than pBindInfo[%u].waitSemaphoreCount (%u)",
bind_idx, i, report_data->FormatHandle(semaphore).c_str(),
timeline_semaphore_submit_info->waitSemaphoreValueCount, bind_idx, bind_info.waitSemaphoreCount);
}
if (semaphore_state && semaphore_state->type == VK_SEMAPHORE_TYPE_BINARY &&
(semaphore_state->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) {
if (unsignaled_semaphores.count(semaphore) ||
(!(signaled_semaphores.count(semaphore)) && !(semaphore_state->signaled) && !SemaphoreWasSignaled(semaphore))) {
LogObjectList objlist(semaphore);
objlist.add(queue);
skip |= LogError(
objlist,
semaphore_state->scope == kSyncScopeInternal ? vuid_error : kVUID_Core_DrawState_QueueForwardProgress,
"vkQueueBindSparse(): Queue %s is waiting on pBindInfo[%u].pWaitSemaphores[%u] (%s) that has no way to be "
"signaled.",
report_data->FormatHandle(queue).c_str(), bind_idx, i, report_data->FormatHandle(semaphore).c_str());
} else {
signaled_semaphores.erase(semaphore);
unsignaled_semaphores.insert(semaphore);
}
}
if (semaphore_state && semaphore_state->type == VK_SEMAPHORE_TYPE_BINARY &&
semaphore_state->scope == kSyncScopeExternalTemporary) {
internal_semaphores.insert(semaphore);
}
}
for (uint32_t i = 0; i < bind_info.signalSemaphoreCount; ++i) {
VkSemaphore semaphore = bind_info.pSignalSemaphores[i];
const auto semaphore_state = GetSemaphoreState(semaphore);
if (semaphore_state && semaphore_state->type == VK_SEMAPHORE_TYPE_TIMELINE && !timeline_semaphore_submit_info) {
skip |= LogError(semaphore, "VUID-VkBindSparseInfo-pWaitSemaphores-03246",
"VkQueueBindSparse: pBindInfo[%u].pSignalSemaphores[%u] (%s) is a timeline semaphore, but "
"pBindInfo[%u] does not include an instance of VkTimelineSemaphoreSubmitInfo",
bind_idx, i, report_data->FormatHandle(semaphore).c_str(), bind_idx);
}
if (semaphore_state && semaphore_state->type == VK_SEMAPHORE_TYPE_TIMELINE && timeline_semaphore_submit_info &&
timeline_semaphore_submit_info->pSignalSemaphoreValues[i] <= semaphore_state->payload) {
LogObjectList objlist(semaphore);
objlist.add(queue);
skip |= LogError(objlist, "VUID-VkBindSparseInfo-pSignalSemaphores-03249",
"VkQueueBindSparse: signal value (0x%" PRIx64
") in %s must be greater than current timeline semaphore %s value (0x%" PRIx64
") in pBindInfo[%u].pSignalSemaphores[%u]",
semaphore_state->payload, report_data->FormatHandle(queue).c_str(),
report_data->FormatHandle(semaphore).c_str(),
timeline_semaphore_submit_info->pSignalSemaphoreValues[i], bind_idx, i);
}
if (semaphore_state && semaphore_state->type == VK_SEMAPHORE_TYPE_TIMELINE && timeline_semaphore_submit_info &&
bind_info.signalSemaphoreCount != timeline_semaphore_submit_info->signalSemaphoreValueCount) {
skip |=
LogError(semaphore, "VUID-VkBindSparseInfo-pNext-03248",
"VkQueueBindSparse: pBindInfo[%u].pSignalSemaphores[%u] (%s) is a timeline semaphore, it contains "
"an instance of VkTimelineSemaphoreSubmitInfo, but signalSemaphoreValueCount (%u) is different "
"than pBindInfo[%u].signalSemaphoreCount (%u)",
bind_idx, i, report_data->FormatHandle(semaphore).c_str(),
timeline_semaphore_submit_info->signalSemaphoreValueCount, bind_idx, bind_info.signalSemaphoreCount);
}
if (semaphore_state && semaphore_state->type == VK_SEMAPHORE_TYPE_BINARY &&
semaphore_state->scope == kSyncScopeInternal) {
if (signaled_semaphores.count(semaphore) ||
(!(unsignaled_semaphores.count(semaphore)) && semaphore_state->signaled)) {
LogObjectList objlist(semaphore);
objlist.add(queue);
objlist.add(semaphore_state->signaler.first);
skip |= LogError(objlist, kVUID_Core_DrawState_QueueForwardProgress,
"vkQueueBindSparse(): %s is signaling pBindInfo[%u].pSignalSemaphores[%u] (%s) that was "
"previously signaled by %s but has not since been waited on by any queue.",
report_data->FormatHandle(queue).c_str(), bind_idx, i,
report_data->FormatHandle(semaphore).c_str(),
report_data->FormatHandle(semaphore_state->signaler.first).c_str());
} else {
unsignaled_semaphores.erase(semaphore);
signaled_semaphores.insert(semaphore);
}
}
}
if (bind_info.pBufferBinds) {
for (uint32_t buffer_idx = 0; buffer_idx < bind_info.bufferBindCount; ++buffer_idx) {
const VkSparseBufferMemoryBindInfo &buffer_bind = bind_info.pBufferBinds[buffer_idx];
if (buffer_bind.pBinds) {
for (uint32_t buffer_bind_idx = 0; buffer_bind_idx < buffer_bind.bindCount; ++buffer_bind_idx) {
const VkSparseMemoryBind &memory_bind = buffer_bind.pBinds[buffer_bind_idx];
std::stringstream parameter_name;
parameter_name << "pBindInfo[" << bind_idx << "].pBufferBinds[" << buffer_idx << " ].pBinds["
<< buffer_bind_idx << "]";
ValidateSparseMemoryBind(&memory_bind, "vkQueueBindSparse()", parameter_name.str().c_str());
const auto *mem_info = Get<DEVICE_MEMORY_STATE>(memory_bind.memory);
if (mem_info) {
if (memory_bind.memoryOffset >= mem_info->alloc_info.allocationSize) {
skip |=
LogError(buffer_bind.buffer, "VUID-VkSparseMemoryBind-memoryOffset-01101",
"vkQueueBindSparse(): pBindInfo[%u].pBufferBinds[%u]: memoryOffset is not less than "
"the size of memory",
bind_idx, buffer_idx);
}
}
}
}
}
}
if (bind_info.pImageOpaqueBinds) {
for (uint32_t image_opaque_idx = 0; image_opaque_idx < bind_info.bufferBindCount; ++image_opaque_idx) {
const VkSparseImageOpaqueMemoryBindInfo &image_opaque_bind = bind_info.pImageOpaqueBinds[image_opaque_idx];
if (image_opaque_bind.pBinds) {
for (uint32_t image_opaque_bind_idx = 0; image_opaque_bind_idx < image_opaque_bind.bindCount;
++image_opaque_bind_idx) {
const VkSparseMemoryBind &memory_bind = image_opaque_bind.pBinds[image_opaque_bind_idx];
std::stringstream parameter_name;
parameter_name << "pBindInfo[" << bind_idx << "].pImageOpaqueBinds[" << image_opaque_idx << " ].pBinds["
<< image_opaque_bind_idx << "]";
ValidateSparseMemoryBind(&memory_bind, "vkQueueBindSparse()", parameter_name.str().c_str());
const auto *mem_info = Get<DEVICE_MEMORY_STATE>(memory_bind.memory);
if (mem_info) {
if (memory_bind.memoryOffset >= mem_info->alloc_info.allocationSize) {
skip |= LogError(
image_opaque_bind.image, "VUID-VkSparseMemoryBind-memoryOffset-01101",
"vkQueueBindSparse(): pBindInfo[%u].pImageOpaqueBinds[%u]: memoryOffset is not less than "
"the size of memory",
bind_idx, image_opaque_idx);
}
}
}
}
}
}
if (bind_info.pImageBinds) {
for (uint32_t image_idx = 0; image_idx < bind_info.imageBindCount; ++image_idx) {
const VkSparseImageMemoryBindInfo &image_bind = bind_info.pImageBinds[image_idx];
const auto image_state = Get<IMAGE_STATE>(image_bind.image);
if (image_state && !(image_state->createInfo.flags & VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT)) {
skip |= LogError(image_bind.image, "VUID-VkSparseImageMemoryBindInfo-image-02901",
"vkQueueBindSparse(): pBindInfo[%u].pImageBinds[%u]: image must have been created with "
"VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT set",
bind_idx, image_idx);
}
if (image_bind.pBinds) {
for (uint32_t image_bind_idx = 0; image_bind_idx < image_bind.bindCount; ++image_bind_idx) {
const VkSparseImageMemoryBind &memory_bind = image_bind.pBinds[image_bind_idx];
const auto *mem_info = Get<DEVICE_MEMORY_STATE>(memory_bind.memory);
if (mem_info) {
if (memory_bind.memoryOffset >= mem_info->alloc_info.allocationSize) {
skip |=
LogError(image_bind.image, "VUID-VkSparseMemoryBind-memoryOffset-01101",
"vkQueueBindSparse(): pBindInfo[%u].pImageBinds[%u]: memoryOffset is not less than "
"the size of memory",
bind_idx, image_idx);
}
}
if (image_state) {
if (memory_bind.subresource.mipLevel >= image_state->createInfo.mipLevels) {
skip |= LogError(image_bind.image, "VUID-VkSparseImageMemoryBindInfo-subresource-01722",
"vkQueueBindSparse(): pBindInfo[%" PRIu32 "].pImageBinds[%" PRIu32
"].subresource.mipLevel (%" PRIu32 ") is not less than mipLevels (%" PRIu32
") of image pBindInfo[%" PRIu32 "].pImageBinds[%" PRIu32 "].image.",
bind_idx, image_idx, memory_bind.subresource.mipLevel, image_state->createInfo.mipLevels,
bind_idx, image_idx);
}
if (memory_bind.subresource.arrayLayer >= image_state->createInfo.arrayLayers) {
skip |= LogError(image_bind.image, "VUID-VkSparseImageMemoryBindInfo-subresource-01723",
"vkQueueBindSparse(): pBindInfo[%" PRIu32 "].pImageBinds[%" PRIu32
"].subresource.arrayLayer (%" PRIu32 ") is not less than arrayLayers (%" PRIu32
") of image pBindInfo[%" PRIu32 "].pImageBinds[%" PRIu32 "].image.",
bind_idx, image_idx, memory_bind.subresource.arrayLayer,
image_state->createInfo.arrayLayers,
bind_idx, image_idx);
}
}
}
}
}
}
}
if (skip) return skip;
// Now verify maxTimelineSemaphoreValueDifference
for (uint32_t bind_idx = 0; bind_idx < bindInfoCount; ++bind_idx) {
Location outer_loc(Func::vkQueueBindSparse, Struct::VkBindSparseInfo);
const VkBindSparseInfo *bind_info = &pBindInfo[bind_idx];
auto *info = LvlFindInChain<VkTimelineSemaphoreSubmitInfo>(bind_info->pNext);
if (info) {
// If there are any timeline semaphores, this condition gets checked before the early return above
if (info->waitSemaphoreValueCount) {
for (uint32_t i = 0; i < bind_info->waitSemaphoreCount; ++i) {
auto loc = outer_loc.dot(Field::pWaitSemaphoreValues, i);
VkSemaphore semaphore = bind_info->pWaitSemaphores[i];
skip |= ValidateMaxTimelineSemaphoreValueDifference(loc, semaphore, info->pWaitSemaphoreValues[i]);
}
}
// If there are any timeline semaphores, this condition gets checked before the early return above
if (info->signalSemaphoreValueCount) {
for (uint32_t i = 0; i < bind_info->signalSemaphoreCount; ++i) {
auto loc = outer_loc.dot(Field::pSignalSemaphoreValues, i);
VkSemaphore semaphore = bind_info->pSignalSemaphores[i];
skip |= ValidateMaxTimelineSemaphoreValueDifference(loc, semaphore, info->pSignalSemaphoreValues[i]);
}
}
}
}
return skip;
}
bool CoreChecks::ValidateSignalSemaphore(VkDevice device, const VkSemaphoreSignalInfo *pSignalInfo, const char *api_name) const {
bool skip = false;
const auto semaphore_state = GetSemaphoreState(pSignalInfo->semaphore);
if (semaphore_state && semaphore_state->type != VK_SEMAPHORE_TYPE_TIMELINE) {
skip |= LogError(pSignalInfo->semaphore, "VUID-VkSemaphoreSignalInfo-semaphore-03257",
"%s(): semaphore %s must be of VK_SEMAPHORE_TYPE_TIMELINE type", api_name,
report_data->FormatHandle(pSignalInfo->semaphore).c_str());
return skip;
}
if (semaphore_state && semaphore_state->payload >= pSignalInfo->value) {
skip |= LogError(pSignalInfo->semaphore, "VUID-VkSemaphoreSignalInfo-value-03258",
"%s(): value must be greater than current semaphore %s value", api_name,
report_data->FormatHandle(pSignalInfo->semaphore).c_str());
}
for (auto &pair : queueMap) {
const auto &queue_state = pair.second;
for (const auto &submission : queue_state->submissions) {
for (const auto &signal_semaphore : submission.signalSemaphores) {
if (signal_semaphore.semaphore == pSignalInfo->semaphore && pSignalInfo->value >= signal_semaphore.payload) {
skip |= LogError(pSignalInfo->semaphore, "VUID-VkSemaphoreSignalInfo-value-03259",
"%s(): value must be greater than value of pending signal operation "
"for semaphore %s",
api_name, report_data->FormatHandle(pSignalInfo->semaphore).c_str());
}
}
}
}
if (!skip) {
Location loc(Func::vkSignalSemaphore, Struct::VkSemaphoreSignalInfo, Field::value);
skip |= ValidateMaxTimelineSemaphoreValueDifference(loc, pSignalInfo->semaphore, pSignalInfo->value);
}
return skip;
}
bool CoreChecks::PreCallValidateSignalSemaphore(VkDevice device, const VkSemaphoreSignalInfo *pSignalInfo) const {
return ValidateSignalSemaphore(device, pSignalInfo, "vkSignalSemaphore");
}
bool CoreChecks::PreCallValidateSignalSemaphoreKHR(VkDevice device, const VkSemaphoreSignalInfo *pSignalInfo) const {
return ValidateSignalSemaphore(device, pSignalInfo, "vkSignalSemaphoreKHR");
}
bool CoreChecks::ValidateImportSemaphore(VkSemaphore semaphore, const char *caller_name) const {
bool skip = false;
const SEMAPHORE_STATE *sema_node = GetSemaphoreState(semaphore);
if (sema_node) {
skip |= ValidateObjectNotInUse(sema_node, caller_name, kVUIDUndefined);
}
return skip;
}
#ifdef VK_USE_PLATFORM_WIN32_KHR
bool CoreChecks::PreCallValidateImportSemaphoreWin32HandleKHR(
VkDevice device, const VkImportSemaphoreWin32HandleInfoKHR *pImportSemaphoreWin32HandleInfo) const {
return ValidateImportSemaphore(pImportSemaphoreWin32HandleInfo->semaphore, "vkImportSemaphoreWin32HandleKHR");
}
#endif // VK_USE_PLATFORM_WIN32_KHR
bool CoreChecks::PreCallValidateImportSemaphoreFdKHR(VkDevice device,
const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo) const {
return ValidateImportSemaphore(pImportSemaphoreFdInfo->semaphore, "vkImportSemaphoreFdKHR");
}
bool CoreChecks::ValidateImportFence(VkFence fence, const char *vuid, const char *caller_name) const {
const FENCE_STATE *fence_node = GetFenceState(fence);
bool skip = false;
if (fence_node && fence_node->scope == kSyncScopeInternal && fence_node->state == FENCE_INFLIGHT) {
skip |=
LogError(fence, vuid, "%s: Fence %s that is currently in use.", caller_name, report_data->FormatHandle(fence).c_str());
}
return skip;
}
#ifdef VK_USE_PLATFORM_WIN32_KHR
bool CoreChecks::PreCallValidateImportFenceWin32HandleKHR(
VkDevice device, const VkImportFenceWin32HandleInfoKHR *pImportFenceWin32HandleInfo) const {
return ValidateImportFence(pImportFenceWin32HandleInfo->fence, "VUID-vkImportFenceWin32HandleKHR-fence-04448",
"vkImportFenceWin32HandleKHR()");
}
#endif // VK_USE_PLATFORM_WIN32_KHR
bool CoreChecks::PreCallValidateImportFenceFdKHR(VkDevice device, const VkImportFenceFdInfoKHR *pImportFenceFdInfo) const {
return ValidateImportFence(pImportFenceFdInfo->fence, "VUID-vkImportFenceFdKHR-fence-01463", "vkImportFenceFdKHR()");
}
static VkImageCreateInfo GetSwapchainImpliedImageCreateInfo(VkSwapchainCreateInfoKHR const *pCreateInfo) {
auto result = LvlInitStruct<VkImageCreateInfo>();
if (pCreateInfo->flags & VK_SWAPCHAIN_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR) {
result.flags |= VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT;
}
if (pCreateInfo->flags & VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR) result.flags |= VK_IMAGE_CREATE_PROTECTED_BIT;
if (pCreateInfo->flags & VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR) {
result.flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT | VK_IMAGE_CREATE_EXTENDED_USAGE_BIT;
}
result.imageType = VK_IMAGE_TYPE_2D;
result.format = pCreateInfo->imageFormat;
result.extent.width = pCreateInfo->imageExtent.width;
result.extent.height = pCreateInfo->imageExtent.height;
result.extent.depth = 1;
result.mipLevels = 1;
result.arrayLayers = pCreateInfo->imageArrayLayers;
result.samples = VK_SAMPLE_COUNT_1_BIT;
result.tiling = VK_IMAGE_TILING_OPTIMAL;
result.usage = pCreateInfo->imageUsage;
result.sharingMode = pCreateInfo->imageSharingMode;
result.queueFamilyIndexCount = pCreateInfo->queueFamilyIndexCount;
result.pQueueFamilyIndices = pCreateInfo->pQueueFamilyIndices;
result.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
return result;
}
bool CoreChecks::ValidateCreateSwapchain(const char *func_name, VkSwapchainCreateInfoKHR const *pCreateInfo,
const SURFACE_STATE *surface_state, const SWAPCHAIN_NODE *old_swapchain_state) const {
// All physical devices and queue families are required to be able to present to any native window on Android; require the
// application to have established support on any other platform.
if (!instance_extensions.vk_khr_android_surface) {
auto support_predicate = [this](decltype(surface_state->gpu_queue_support)::value_type qs) -> bool {
// TODO: should restrict search only to queue families of VkDeviceQueueCreateInfos, not whole phys. device
return (qs.first.gpu == physical_device) && qs.second;
};
const auto &support = surface_state->gpu_queue_support;
bool is_supported = std::any_of(support.begin(), support.end(), support_predicate);
if (!is_supported) {
if (LogError(
device, "VUID-VkSwapchainCreateInfoKHR-surface-01270",
"%s: pCreateInfo->surface is not known at this time to be supported for presentation by this device. The "
"vkGetPhysicalDeviceSurfaceSupportKHR() must be called beforehand, and it must return VK_TRUE support with "
"this surface for at least one queue family of this device.",
func_name)) {
return true;
}
}
}
if (old_swapchain_state) {
if (old_swapchain_state->createInfo.surface != pCreateInfo->surface) {
if (LogError(pCreateInfo->oldSwapchain, "VUID-VkSwapchainCreateInfoKHR-oldSwapchain-01933",
"%s: pCreateInfo->oldSwapchain's surface is not pCreateInfo->surface", func_name)) {
return true;
}
}
if (old_swapchain_state->retired) {
if (LogError(pCreateInfo->oldSwapchain, "VUID-VkSwapchainCreateInfoKHR-oldSwapchain-01933",
"%s: pCreateInfo->oldSwapchain is retired", func_name)) {
return true;
}
}
}
if ((pCreateInfo->imageExtent.width == 0) || (pCreateInfo->imageExtent.height == 0)) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageExtent-01689",
"%s: pCreateInfo->imageExtent = (%d, %d) which is illegal.", func_name, pCreateInfo->imageExtent.width,
pCreateInfo->imageExtent.height)) {
return true;
}
}
auto physical_device_state = GetPhysicalDeviceState();
VkSurfaceCapabilitiesKHR capabilities{};
DispatchGetPhysicalDeviceSurfaceCapabilitiesKHR(physical_device_state->PhysDev(), pCreateInfo->surface, &capabilities);
bool skip = false;
VkSurfaceTransformFlagBitsKHR current_transform = capabilities.currentTransform;
if ((pCreateInfo->preTransform & current_transform) != pCreateInfo->preTransform) {
skip |= LogPerformanceWarning(physical_device, kVUID_Core_Swapchain_PreTransform,
"%s: pCreateInfo->preTransform (%s) doesn't match the currentTransform (%s) returned by "
"vkGetPhysicalDeviceSurfaceCapabilitiesKHR, the presentation engine will transform the image "
"content as part of the presentation operation.",
func_name, string_VkSurfaceTransformFlagBitsKHR(pCreateInfo->preTransform),
string_VkSurfaceTransformFlagBitsKHR(current_transform));
}
const VkPresentModeKHR present_mode = pCreateInfo->presentMode;
const bool shared_present_mode = (VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR == present_mode ||
VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR == present_mode);
// Validate pCreateInfo->minImageCount against VkSurfaceCapabilitiesKHR::{min|max}ImageCount:
// Shared Present Mode must have a minImageCount of 1
if ((pCreateInfo->minImageCount < capabilities.minImageCount) && !shared_present_mode) {
const char *vuid = IsExtEnabled(device_extensions.vk_khr_shared_presentable_image)
? "VUID-VkSwapchainCreateInfoKHR-presentMode-02839"
: "VUID-VkSwapchainCreateInfoKHR-minImageCount-01271";
if (LogError(device, vuid,
"%s called with minImageCount = %d, which is outside the bounds returned by "
"vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d).",
func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount)) {
return true;
}
}
if ((capabilities.maxImageCount > 0) && (pCreateInfo->minImageCount > capabilities.maxImageCount)) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-minImageCount-01272",
"%s called with minImageCount = %d, which is outside the bounds returned by "
"vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d).",
func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount)) {
return true;
}
}
// Validate pCreateInfo->imageExtent against VkSurfaceCapabilitiesKHR::{current|min|max}ImageExtent:
if ((pCreateInfo->imageExtent.width < capabilities.minImageExtent.width) ||
(pCreateInfo->imageExtent.width > capabilities.maxImageExtent.width) ||
(pCreateInfo->imageExtent.height < capabilities.minImageExtent.height) ||
(pCreateInfo->imageExtent.height > capabilities.maxImageExtent.height)) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageExtent-01274",
"%s called with imageExtent = (%d,%d), which is outside the bounds returned by "
"vkGetPhysicalDeviceSurfaceCapabilitiesKHR(): currentExtent = (%d,%d), minImageExtent = (%d,%d), "
"maxImageExtent = (%d,%d).",
func_name, pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height, capabilities.currentExtent.width,
capabilities.currentExtent.height, capabilities.minImageExtent.width, capabilities.minImageExtent.height,
capabilities.maxImageExtent.width, capabilities.maxImageExtent.height)) {
return true;
}
}
// pCreateInfo->preTransform should have exactly one bit set, and that bit must also be set in
// VkSurfaceCapabilitiesKHR::supportedTransforms.
if (!pCreateInfo->preTransform || (pCreateInfo->preTransform & (pCreateInfo->preTransform - 1)) ||
!(pCreateInfo->preTransform & capabilities.supportedTransforms)) {
// This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message. Build
// it up a little at a time, and then log it:
std::string error_string = "";
char str[1024];
// Here's the first part of the message:
sprintf(str, "%s called with a non-supported pCreateInfo->preTransform (i.e. %s). Supported values are:\n", func_name,
string_VkSurfaceTransformFlagBitsKHR(pCreateInfo->preTransform));
error_string += str;
for (int i = 0; i < 32; i++) {
// Build up the rest of the message:
if ((1 << i) & capabilities.supportedTransforms) {
const char *new_str = string_VkSurfaceTransformFlagBitsKHR(static_cast<VkSurfaceTransformFlagBitsKHR>(1 << i));
sprintf(str, " %s\n", new_str);
error_string += str;
}
}
// Log the message that we've built up:
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-preTransform-01279", "%s.", error_string.c_str())) return true;
}
// pCreateInfo->compositeAlpha should have exactly one bit set, and that bit must also be set in
// VkSurfaceCapabilitiesKHR::supportedCompositeAlpha
if (!pCreateInfo->compositeAlpha || (pCreateInfo->compositeAlpha & (pCreateInfo->compositeAlpha - 1)) ||
!((pCreateInfo->compositeAlpha) & capabilities.supportedCompositeAlpha)) {
// This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message. Build
// it up a little at a time, and then log it:
std::string error_string = "";
char str[1024];
// Here's the first part of the message:
sprintf(str, "%s called with a non-supported pCreateInfo->compositeAlpha (i.e. %s). Supported values are:\n", func_name,
string_VkCompositeAlphaFlagBitsKHR(pCreateInfo->compositeAlpha));
error_string += str;
for (int i = 0; i < 32; i++) {
// Build up the rest of the message:
if ((1 << i) & capabilities.supportedCompositeAlpha) {
const char *new_str = string_VkCompositeAlphaFlagBitsKHR(static_cast<VkCompositeAlphaFlagBitsKHR>(1 << i));
sprintf(str, " %s\n", new_str);
error_string += str;
}
}
// Log the message that we've built up:
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-compositeAlpha-01280", "%s.", error_string.c_str())) return true;
}
// Validate pCreateInfo->imageArrayLayers against VkSurfaceCapabilitiesKHR::maxImageArrayLayers:
if (pCreateInfo->imageArrayLayers > capabilities.maxImageArrayLayers) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageArrayLayers-01275",
"%s called with a non-supported imageArrayLayers (i.e. %d). Maximum value is %d.", func_name,
pCreateInfo->imageArrayLayers, capabilities.maxImageArrayLayers)) {
return true;
}
}
const VkImageUsageFlags image_usage = pCreateInfo->imageUsage;
// Validate pCreateInfo->imageUsage against VkSurfaceCapabilitiesKHR::supportedUsageFlags:
// Shared Present Mode uses different set of capabilities to check imageUsage support
if ((image_usage != (image_usage & capabilities.supportedUsageFlags)) && !shared_present_mode) {
const char *vuid = IsExtEnabled(device_extensions.vk_khr_shared_presentable_image)
? "VUID-VkSwapchainCreateInfoKHR-presentMode-01427"
: "VUID-VkSwapchainCreateInfoKHR-imageUsage-01276";
if (LogError(device, vuid,
"%s called with a non-supported pCreateInfo->imageUsage (i.e. 0x%08x). Supported flag bits are 0x%08x.",
func_name, image_usage, capabilities.supportedUsageFlags)) {
return true;
}
}
if (IsExtEnabled(device_extensions.vk_khr_surface_protected_capabilities) &&
(pCreateInfo->flags & VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR)) {
VkPhysicalDeviceSurfaceInfo2KHR surface_info = {VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SURFACE_INFO_2_KHR};
surface_info.surface = pCreateInfo->surface;
VkSurfaceProtectedCapabilitiesKHR surface_protected_capabilities = {VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR};
VkSurfaceCapabilities2KHR surface_capabilities = {VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR};
surface_capabilities.pNext = &surface_protected_capabilities;
DispatchGetPhysicalDeviceSurfaceCapabilities2KHR(physical_device_state->PhysDev(), &surface_info, &surface_capabilities);
if (!surface_protected_capabilities.supportsProtected) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-flags-03187",
"%s: pCreateInfo->flags contains VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR but the surface "
"capabilities does not have VkSurfaceProtectedCapabilitiesKHR.supportsProtected set to VK_TRUE.",
func_name)) {
return true;
}
}
}
// Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfaceFormatsKHR():
std::vector<VkSurfaceFormatKHR> surface_formats;
uint32_t surface_format_count = 0;
DispatchGetPhysicalDeviceSurfaceFormatsKHR(physical_device, pCreateInfo->surface, &surface_format_count, nullptr);
surface_formats.resize(surface_format_count);
DispatchGetPhysicalDeviceSurfaceFormatsKHR(physical_device, pCreateInfo->surface, &surface_format_count, &surface_formats[0]);
{
// Validate pCreateInfo->imageFormat against VkSurfaceFormatKHR::format:
bool found_format = false;
bool found_color_space = false;
bool found_match = false;
for (const auto &format : surface_formats) {
if (pCreateInfo->imageFormat == format.format) {
// Validate pCreateInfo->imageColorSpace against VkSurfaceFormatKHR::colorSpace:
found_format = true;
if (pCreateInfo->imageColorSpace == format.colorSpace) {
found_match = true;
break;
}
} else {
if (pCreateInfo->imageColorSpace == format.colorSpace) {
found_color_space = true;
}
}
}
if (!found_match) {
if (!found_format) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01273",
"%s called with a non-supported pCreateInfo->imageFormat (%s).", func_name,
string_VkFormat(pCreateInfo->imageFormat))) {
return true;
}
}
if (!found_color_space) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01273",
"%s called with a non-supported pCreateInfo->imageColorSpace (%s).", func_name,
string_VkColorSpaceKHR(pCreateInfo->imageColorSpace))) {
return true;
}
}
}
}
// Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfacePresentModesKHR():
std::vector<VkPresentModeKHR> present_modes;
uint32_t present_mode_count = 0;
DispatchGetPhysicalDeviceSurfacePresentModesKHR(physical_device_state->PhysDev(), pCreateInfo->surface, &present_mode_count,
nullptr);
present_modes.resize(present_mode_count);
DispatchGetPhysicalDeviceSurfacePresentModesKHR(physical_device_state->PhysDev(), pCreateInfo->surface, &present_mode_count,
&present_modes[0]);
// Validate pCreateInfo->presentMode against vkGetPhysicalDeviceSurfacePresentModesKHR():
bool found_match = std::find(present_modes.begin(), present_modes.end(), present_mode) != present_modes.end();
if (!found_match) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-presentMode-01281",
"%s called with a non-supported presentMode (i.e. %s).", func_name, string_VkPresentModeKHR(present_mode))) {
return true;
}
}
// Validate state for shared presentable case
if (shared_present_mode) {
if (!IsExtEnabled(device_extensions.vk_khr_shared_presentable_image)) {
if (LogError(
device, kVUID_Core_DrawState_ExtensionNotEnabled,
"%s called with presentMode %s which requires the VK_KHR_shared_presentable_image extension, which has not "
"been enabled.",
func_name, string_VkPresentModeKHR(present_mode))) {
return true;
}
} else if (pCreateInfo->minImageCount != 1) {
if (LogError(
device, "VUID-VkSwapchainCreateInfoKHR-minImageCount-01383",
"%s called with presentMode %s, but minImageCount value is %d. For shared presentable image, minImageCount "
"must be 1.",
func_name, string_VkPresentModeKHR(present_mode), pCreateInfo->minImageCount)) {
return true;
}
}
VkSharedPresentSurfaceCapabilitiesKHR shared_present_capabilities = LvlInitStruct<VkSharedPresentSurfaceCapabilitiesKHR>();
VkSurfaceCapabilities2KHR capabilities2 = LvlInitStruct<VkSurfaceCapabilities2KHR>(&shared_present_capabilities);
VkPhysicalDeviceSurfaceInfo2KHR surface_info = LvlInitStruct<VkPhysicalDeviceSurfaceInfo2KHR>();
surface_info.surface = pCreateInfo->surface;
DispatchGetPhysicalDeviceSurfaceCapabilities2KHR(physical_device_state->PhysDev(), &surface_info, &capabilities2);
if (image_usage != (image_usage & shared_present_capabilities.sharedPresentSupportedUsageFlags)) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageUsage-01384",
"%s called with a non-supported pCreateInfo->imageUsage (i.e. 0x%08x). Supported flag bits for %s "
"present mode are 0x%08x.",
func_name, image_usage, string_VkPresentModeKHR(pCreateInfo->presentMode),
shared_present_capabilities.sharedPresentSupportedUsageFlags)) {
return true;
}
}
}
if ((pCreateInfo->imageSharingMode == VK_SHARING_MODE_CONCURRENT) && pCreateInfo->pQueueFamilyIndices) {
bool skip1 = ValidatePhysicalDeviceQueueFamilies(pCreateInfo->queueFamilyIndexCount, pCreateInfo->pQueueFamilyIndices,
"vkCreateBuffer", "pCreateInfo->pQueueFamilyIndices",
"VUID-VkSwapchainCreateInfoKHR-imageSharingMode-01428");
if (skip1) return true;
}
// Validate pCreateInfo->imageUsage against GetPhysicalDeviceFormatProperties
const VkFormatProperties format_properties = GetPDFormatProperties(pCreateInfo->imageFormat);
const VkFormatFeatureFlags tiling_features = format_properties.optimalTilingFeatures;
if (tiling_features == 0) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778",
"%s: pCreateInfo->imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL has no supported format features on this "
"physical device.",
func_name, string_VkFormat(pCreateInfo->imageFormat))) {
return true;
}
} else if ((image_usage & VK_IMAGE_USAGE_SAMPLED_BIT) && !(tiling_features & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778",
"%s: pCreateInfo->imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL does not support usage that includes "
"VK_IMAGE_USAGE_SAMPLED_BIT.",
func_name, string_VkFormat(pCreateInfo->imageFormat))) {
return true;
}
} else if ((image_usage & VK_IMAGE_USAGE_STORAGE_BIT) && !(tiling_features & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT)) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778",
"%s: pCreateInfo->imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL does not support usage that includes "
"VK_IMAGE_USAGE_STORAGE_BIT.",
func_name, string_VkFormat(pCreateInfo->imageFormat))) {
return true;
}
} else if ((image_usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) && !(tiling_features & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT)) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778",
"%s: pCreateInfo->imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL does not support usage that includes "
"VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT.",
func_name, string_VkFormat(pCreateInfo->imageFormat))) {
return true;
}
} else if ((image_usage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) &&
!(tiling_features & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778",
"%s: pCreateInfo->imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL does not support usage that includes "
"VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT.",
func_name, string_VkFormat(pCreateInfo->imageFormat))) {
return true;
}
} else if ((image_usage & VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT) &&
!(tiling_features & (VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT | VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT))) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778",
"%s: pCreateInfo->imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL does not support usage that includes "
"VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT or VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT.",
func_name, string_VkFormat(pCreateInfo->imageFormat))) {
return true;
}
}
const VkImageCreateInfo image_create_info = GetSwapchainImpliedImageCreateInfo(pCreateInfo);
VkImageFormatProperties image_properties = {};
const VkResult image_properties_result = DispatchGetPhysicalDeviceImageFormatProperties(
physical_device, image_create_info.format, image_create_info.imageType, image_create_info.tiling, image_create_info.usage,
image_create_info.flags, &image_properties);
if (image_properties_result != VK_SUCCESS) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778",
"vkGetPhysicalDeviceImageFormatProperties() unexpectedly failed, "
"when called for %s validation with following params: "
"format: %s, imageType: %s, "
"tiling: %s, usage: %s, "
"flags: %s.",
func_name, string_VkFormat(image_create_info.format), string_VkImageType(image_create_info.imageType),
string_VkImageTiling(image_create_info.tiling), string_VkImageUsageFlags(image_create_info.usage).c_str(),
string_VkImageCreateFlags(image_create_info.flags).c_str())) {
return true;
}
}
// Validate pCreateInfo->imageArrayLayers against VkImageFormatProperties::maxArrayLayers
if (pCreateInfo->imageArrayLayers > image_properties.maxArrayLayers) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778",
"%s called with a non-supported imageArrayLayers (i.e. %d). "
"Maximum value returned by vkGetPhysicalDeviceImageFormatProperties() is %d "
"for imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL",
func_name, pCreateInfo->imageArrayLayers, image_properties.maxArrayLayers,
string_VkFormat(pCreateInfo->imageFormat))) {
return true;
}
}
// Validate pCreateInfo->imageExtent against VkImageFormatProperties::maxExtent
if ((pCreateInfo->imageExtent.width > image_properties.maxExtent.width) ||
(pCreateInfo->imageExtent.height > image_properties.maxExtent.height)) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778",
"%s called with imageExtent = (%d,%d), which is bigger than max extent (%d,%d)"
"returned by vkGetPhysicalDeviceImageFormatProperties(): "
"for imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL",
func_name, pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height, image_properties.maxExtent.width,
image_properties.maxExtent.height, string_VkFormat(pCreateInfo->imageFormat))) {
return true;
}
}
if ((pCreateInfo->flags & VK_SWAPCHAIN_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR) &&
device_group_create_info.physicalDeviceCount == 1) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-physicalDeviceCount-01429",
"%s called with flags containing VK_SWAPCHAIN_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR"
"but logical device was created with VkDeviceGroupDeviceCreateInfo::physicalDeviceCount equal to 1",
func_name)) {
return true;
}
}
return skip;
}
bool CoreChecks::PreCallValidateCreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain) const {
const auto surface_state = GetSurfaceState(pCreateInfo->surface);
const auto old_swapchain_state = GetSwapchainState(pCreateInfo->oldSwapchain);
return ValidateCreateSwapchain("vkCreateSwapchainKHR()", pCreateInfo, surface_state, old_swapchain_state);
}
void CoreChecks::PreCallRecordDestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain,
const VkAllocationCallbacks *pAllocator) {
if (swapchain) {
auto swapchain_data = GetSwapchainState(swapchain);
if (swapchain_data) {
for (const auto &swapchain_image : swapchain_data->images) {
if (!swapchain_image.image_state) continue;
imageLayoutMap.erase(swapchain_image.image_state->image());
qfo_release_image_barrier_map.erase(swapchain_image.image_state->image());
}
}
}
StateTracker::PreCallRecordDestroySwapchainKHR(device, swapchain, pAllocator);
}
void CoreChecks::PostCallRecordGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount,
VkImage *pSwapchainImages, VkResult result) {
// This function will run twice. The first is to get pSwapchainImageCount. The second is to get pSwapchainImages.
// The first time in StateTracker::PostCallRecordGetSwapchainImagesKHR only generates the container's size.
// The second time in StateTracker::PostCallRecordGetSwapchainImagesKHR will create VKImage and IMAGE_STATE.
// So GlobalImageLayoutMap saving new IMAGE_STATEs has to run in the second time.
// pSwapchainImages is not nullptr and it needs to wait until StateTracker::PostCallRecordGetSwapchainImagesKHR.
uint32_t new_swapchain_image_index = 0;
if (((result == VK_SUCCESS) || (result == VK_INCOMPLETE)) && pSwapchainImages) {
auto swapchain_state = GetSwapchainState(swapchain);
const auto image_vector_size = swapchain_state->images.size();
for (; new_swapchain_image_index < *pSwapchainImageCount; ++new_swapchain_image_index) {
if ((new_swapchain_image_index >= image_vector_size) ||
!swapchain_state->images[new_swapchain_image_index].image_state) {
break;
};
}
}
StateTracker::PostCallRecordGetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages, result);
if (((result == VK_SUCCESS) || (result == VK_INCOMPLETE)) && pSwapchainImages) {
for (; new_swapchain_image_index < *pSwapchainImageCount; ++new_swapchain_image_index) {
auto image_state = Get<IMAGE_STATE>(pSwapchainImages[new_swapchain_image_index]);
AddInitialLayoutintoImageLayoutMap(*image_state, imageLayoutMap);
}
}
}
bool CoreChecks::PreCallValidateQueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) const {
bool skip = false;
const auto queue_state = Get<QUEUE_STATE>(queue);
for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
const auto semaphore_state = GetSemaphoreState(pPresentInfo->pWaitSemaphores[i]);
if (semaphore_state && semaphore_state->type != VK_SEMAPHORE_TYPE_BINARY) {
skip |= LogError(pPresentInfo->pWaitSemaphores[i], "VUID-vkQueuePresentKHR-pWaitSemaphores-03267",
"vkQueuePresentKHR: pWaitSemaphores[%u] (%s) is not a VK_SEMAPHORE_TYPE_BINARY", i,
report_data->FormatHandle(pPresentInfo->pWaitSemaphores[i]).c_str());
}
if (semaphore_state && !semaphore_state->signaled && !SemaphoreWasSignaled(pPresentInfo->pWaitSemaphores[i])) {
LogObjectList objlist(queue);
objlist.add(pPresentInfo->pWaitSemaphores[i]);
skip |= LogError(objlist, "VUID-vkQueuePresentKHR-pWaitSemaphores-03268",
"vkQueuePresentKHR: Queue %s is waiting on pWaitSemaphores[%u] (%s) that has no way to be signaled.",
report_data->FormatHandle(queue).c_str(), i,
report_data->FormatHandle(pPresentInfo->pWaitSemaphores[i]).c_str());
}
}
for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
const auto swapchain_data = GetSwapchainState(pPresentInfo->pSwapchains[i]);
if (swapchain_data) {
// VU currently is 2-in-1, covers being a valid index and valid layout
const char *validation_error = IsExtEnabled(device_extensions.vk_khr_shared_presentable_image)
? "VUID-VkPresentInfoKHR-pImageIndices-01430"
: "VUID-VkPresentInfoKHR-pImageIndices-01296";
// Check if index is even possible to be acquired to give better error message
if (pPresentInfo->pImageIndices[i] >= swapchain_data->images.size()) {
skip |= LogError(
pPresentInfo->pSwapchains[i], validation_error,
"vkQueuePresentKHR: pSwapchains[%u] image index is too large (%u). There are only %u images in this swapchain.",
i, pPresentInfo->pImageIndices[i], static_cast<uint32_t>(swapchain_data->images.size()));
} else if (!swapchain_data->images[pPresentInfo->pImageIndices[i]].image_state ||
!swapchain_data->images[pPresentInfo->pImageIndices[i]].acquired) {
skip |= LogError(pPresentInfo->pSwapchains[i], validation_error,
"vkQueuePresentKHR: pSwapchains[%" PRIu32 "] image at index %" PRIu32
" was not acquired from the swapchain.",
i, pPresentInfo->pImageIndices[i]);
} else {
const auto *image_state = swapchain_data->images[pPresentInfo->pImageIndices[i]].image_state;
assert(image_state);
vector<VkImageLayout> layouts;
if (FindLayouts(*image_state, layouts)) {
for (auto layout : layouts) {
if ((layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) &&
(!IsExtEnabled(device_extensions.vk_khr_shared_presentable_image) ||
(layout != VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR))) {
skip |= LogError(queue, validation_error,
"vkQueuePresentKHR(): pSwapchains[%u] images passed to present must be in layout "
"VK_IMAGE_LAYOUT_PRESENT_SRC_KHR or "
"VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR but is in %s.",
i, string_VkImageLayout(layout));
}
}
}
const auto *display_present_info = LvlFindInChain<VkDisplayPresentInfoKHR>(pPresentInfo->pNext);
if (display_present_info) {
if (display_present_info->srcRect.offset.x < 0 || display_present_info->srcRect.offset.y < 0 ||
display_present_info->srcRect.offset.x + display_present_info->srcRect.extent.width >
image_state->createInfo.extent.width ||
display_present_info->srcRect.offset.y + display_present_info->srcRect.extent.height >
image_state->createInfo.extent.height) {
skip |= LogError(queue, "VUID-VkDisplayPresentInfoKHR-srcRect-01257",
"vkQueuePresentKHR(): VkDisplayPresentInfoKHR::srcRect (offset (%" PRIu32 ", %" PRIu32
"), extent (%" PRIu32 ", %" PRIu32
")) in the pNext chain of VkPresentInfoKHR is not a subset of the image begin presented "
"(extent (%" PRIu32 ", %" PRIu32 ")).",
display_present_info->srcRect.offset.x, display_present_info->srcRect.offset.y,
display_present_info->srcRect.extent.width, display_present_info->srcRect.extent.height,
image_state->createInfo.extent.width, image_state->createInfo.extent.height);
}
}
}
// All physical devices and queue families are required to be able to present to any native window on Android; require
// the application to have established support on any other platform.
if (!instance_extensions.vk_khr_android_surface) {
const auto surface_state = GetSurfaceState(swapchain_data->createInfo.surface);
auto support_it = surface_state->gpu_queue_support.find({physical_device, queue_state->queueFamilyIndex});
if (support_it == surface_state->gpu_queue_support.end()) {
skip |= LogError(
pPresentInfo->pSwapchains[i], kVUID_Core_DrawState_SwapchainUnsupportedQueue,
"vkQueuePresentKHR: Presenting pSwapchains[%u] image without calling vkGetPhysicalDeviceSurfaceSupportKHR",
i);
} else if (!support_it->second) {
skip |= LogError(
pPresentInfo->pSwapchains[i], "VUID-vkQueuePresentKHR-pSwapchains-01292",
"vkQueuePresentKHR: Presenting pSwapchains[%u] image on queue that cannot present to this surface.", i);
}
}
}
}
if (pPresentInfo->pNext) {
// Verify ext struct
const auto *present_regions = LvlFindInChain<VkPresentRegionsKHR>(pPresentInfo->pNext);
if (present_regions) {
for (uint32_t i = 0; i < present_regions->swapchainCount; ++i) {
const auto swapchain_data = GetSwapchainState(pPresentInfo->pSwapchains[i]);
assert(swapchain_data);
VkPresentRegionKHR region = present_regions->pRegions[i];
for (uint32_t j = 0; j < region.rectangleCount; ++j) {
VkRectLayerKHR rect = region.pRectangles[j];
// Swap offsets and extents for 90 or 270 degree preTransform rotation
if (swapchain_data->createInfo.preTransform &
(VK_SURFACE_TRANSFORM_ROTATE_90_BIT_KHR | VK_SURFACE_TRANSFORM_ROTATE_270_BIT_KHR |
VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_90_BIT_KHR |
VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_270_BIT_KHR)) {
std::swap(rect.offset.x, rect.offset.y);
std::swap(rect.extent.width, rect.extent.height);
}
if ((rect.offset.x + rect.extent.width) > swapchain_data->createInfo.imageExtent.width) {
skip |=
LogError(pPresentInfo->pSwapchains[i], "VUID-VkRectLayerKHR-offset-04864",
"vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, pRegion[%i].pRectangles[%i], "
"the sum of offset.x (%i) and extent.width (%i) after applying preTransform (%s) is greater "
"than the corresponding swapchain's imageExtent.width (%i).",
i, j, rect.offset.x, rect.extent.width,
string_VkSurfaceTransformFlagBitsKHR(swapchain_data->createInfo.preTransform),
swapchain_data->createInfo.imageExtent.width);
}
if ((rect.offset.y + rect.extent.height) > swapchain_data->createInfo.imageExtent.height) {
skip |=
LogError(pPresentInfo->pSwapchains[i], "VUID-VkRectLayerKHR-offset-04864",
"vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, pRegion[%i].pRectangles[%i], "
"the sum of offset.y (%i) and extent.height (%i) after applying preTransform (%s) is greater "
"than the corresponding swapchain's imageExtent.height (%i).",
i, j, rect.offset.y, rect.extent.height,
string_VkSurfaceTransformFlagBitsKHR(swapchain_data->createInfo.preTransform),
swapchain_data->createInfo.imageExtent.height);
}
if (rect.layer > swapchain_data->createInfo.imageArrayLayers) {
skip |= LogError(
pPresentInfo->pSwapchains[i], "VUID-VkRectLayerKHR-layer-01262",
"vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, pRegion[%i].pRectangles[%i], the layer "
"(%i) is greater than the corresponding swapchain's imageArrayLayers (%i).",
i, j, rect.layer, swapchain_data->createInfo.imageArrayLayers);
}
}
}
}
const auto *present_times_info = LvlFindInChain<VkPresentTimesInfoGOOGLE>(pPresentInfo->pNext);
if (present_times_info) {
if (pPresentInfo->swapchainCount != present_times_info->swapchainCount) {
skip |=
LogError(pPresentInfo->pSwapchains[0], "VUID-VkPresentTimesInfoGOOGLE-swapchainCount-01247",
"vkQueuePresentKHR(): VkPresentTimesInfoGOOGLE.swapchainCount is %i but pPresentInfo->swapchainCount "
"is %i. For VkPresentTimesInfoGOOGLE down pNext chain of VkPresentInfoKHR, "
"VkPresentTimesInfoGOOGLE.swapchainCount must equal VkPresentInfoKHR.swapchainCount.",
present_times_info->swapchainCount, pPresentInfo->swapchainCount);
}
}
const auto *present_id_info = LvlFindInChain<VkPresentIdKHR>(pPresentInfo->pNext);
if (present_id_info) {
if (!enabled_features.present_id_features.presentId) {
for (uint32_t i = 0; i < present_id_info->swapchainCount; i++) {
if (present_id_info->pPresentIds[i] != 0) {
skip |=
LogError(pPresentInfo->pSwapchains[0], "VUID-VkPresentInfoKHR-pNext-06235",
"vkQueuePresentKHR(): presentId feature is not enabled and VkPresentIdKHR::pPresentId[%" PRIu32
"] = %" PRIu64 " when only NULL values are allowed",
i, present_id_info->pPresentIds[i]);
}
}
}
if (pPresentInfo->swapchainCount != present_id_info->swapchainCount) {
skip |= LogError(pPresentInfo->pSwapchains[0], "VUID-VkPresentIdKHR-swapchainCount-04998",
"vkQueuePresentKHR(): VkPresentIdKHR.swapchainCount is %" PRIu32
" but pPresentInfo->swapchainCount is %" PRIu32
". VkPresentIdKHR.swapchainCount must be the same value as VkPresentInfoKHR::swapchainCount",
present_id_info->swapchainCount, pPresentInfo->swapchainCount);
}
for (uint32_t i = 0; i < present_id_info->swapchainCount; i++) {
const auto swapchain_state = GetSwapchainState(pPresentInfo->pSwapchains[i]);
if ((present_id_info->pPresentIds[i] != 0) &&
(present_id_info->pPresentIds[i] <= swapchain_state->max_present_id)) {
skip |= LogError(pPresentInfo->pSwapchains[i], "VUID-VkPresentIdKHR-presentIds-04999",
"vkQueuePresentKHR(): VkPresentIdKHR.pPresentId[%" PRIu32 "] is %" PRIu64
" and the largest presentId sent for this swapchain is %" PRIu64
". Each presentIds entry must be greater than any previous presentIds entry passed for the "
"associated pSwapchains entry",
i, present_id_info->pPresentIds[i], swapchain_state->max_present_id);
}
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount,
const VkSwapchainCreateInfoKHR *pCreateInfos,
const VkAllocationCallbacks *pAllocator,
VkSwapchainKHR *pSwapchains) const {
bool skip = false;
if (pCreateInfos) {
for (uint32_t i = 0; i < swapchainCount; i++) {
const auto surface_state = GetSurfaceState(pCreateInfos[i].surface);
const auto old_swapchain_state = GetSwapchainState(pCreateInfos[i].oldSwapchain);
std::stringstream func_name;
func_name << "vkCreateSharedSwapchainsKHR[" << swapchainCount << "]()";
skip |= ValidateCreateSwapchain(func_name.str().c_str(), &pCreateInfos[i], surface_state, old_swapchain_state);
}
}
return skip;
}
bool CoreChecks::ValidateAcquireNextImage(VkDevice device, const AcquireVersion version, VkSwapchainKHR swapchain, uint64_t timeout,
VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex, const char *func_name,
const char *semaphore_type_vuid) const {
bool skip = false;
auto semaphore_state = GetSemaphoreState(semaphore);
if (semaphore_state && semaphore_state->type != VK_SEMAPHORE_TYPE_BINARY) {
skip |= LogError(semaphore, semaphore_type_vuid, "%s: %s is not a VK_SEMAPHORE_TYPE_BINARY", func_name,
report_data->FormatHandle(semaphore).c_str());
}
if (semaphore_state && semaphore_state->scope == kSyncScopeInternal && semaphore_state->signaled) {
const char *vuid = version == ACQUIRE_VERSION_2 ? "VUID-VkAcquireNextImageInfoKHR-semaphore-01288"
: "VUID-vkAcquireNextImageKHR-semaphore-01286";
skip |= LogError(semaphore, vuid, "%s: Semaphore must not be currently signaled or in a wait state.", func_name);
}
auto fence_state = GetFenceState(fence);
if (fence_state) {
skip |= ValidateFenceForSubmit(fence_state, "VUID-vkAcquireNextImageKHR-fence-01287",
"VUID-vkAcquireNextImageKHR-fence-01287", "vkAcquireNextImageKHR()");
}
const auto swapchain_data = GetSwapchainState(swapchain);
if (swapchain_data) {
if (swapchain_data->retired) {
const char *vuid = version == ACQUIRE_VERSION_2 ? "VUID-VkAcquireNextImageInfoKHR-swapchain-01675"
: "VUID-vkAcquireNextImageKHR-swapchain-01285";
skip |= LogError(swapchain, vuid,
"%s: This swapchain has been retired. The application can still present any images it "
"has acquired, but cannot acquire any more.",
func_name);
}
const uint32_t acquired_images = swapchain_data->acquired_images;
const uint32_t swapchain_image_count = static_cast<uint32_t>(swapchain_data->images.size());
const auto min_image_count = swapchain_data->surface_capabilities.minImageCount;
const bool too_many_already_acquired = acquired_images > swapchain_image_count - min_image_count;
if (timeout == UINT64_MAX && too_many_already_acquired) {
const char *vuid = version == ACQUIRE_VERSION_2 ? "VUID-vkAcquireNextImage2KHR-swapchain-01803"
: "VUID-vkAcquireNextImageKHR-swapchain-01802";
const uint32_t acquirable = swapchain_image_count - min_image_count + 1;
skip |= LogError(swapchain, vuid,
"%s: Application has already previously acquired %" PRIu32 " image%s from swapchain. Only %" PRIu32
" %s available to be acquired using a timeout of UINT64_MAX (given the swapchain has %" PRIu32
", and VkSurfaceCapabilitiesKHR::minImageCount is %" PRIu32 ").",
func_name, acquired_images, acquired_images > 1 ? "s" : "", acquirable, acquirable > 1 ? "are" : "is",
swapchain_image_count, min_image_count);
}
}
return skip;
}
bool CoreChecks::PreCallValidateAcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) const {
return ValidateAcquireNextImage(device, ACQUIRE_VERSION_1, swapchain, timeout, semaphore, fence, pImageIndex,
"vkAcquireNextImageKHR", "VUID-vkAcquireNextImageKHR-semaphore-03265");
}
bool CoreChecks::PreCallValidateAcquireNextImage2KHR(VkDevice device, const VkAcquireNextImageInfoKHR *pAcquireInfo,
uint32_t *pImageIndex) const {
bool skip = false;
skip |= ValidateDeviceMaskToPhysicalDeviceCount(pAcquireInfo->deviceMask, pAcquireInfo->swapchain,
"VUID-VkAcquireNextImageInfoKHR-deviceMask-01290");
skip |= ValidateDeviceMaskToZero(pAcquireInfo->deviceMask, pAcquireInfo->swapchain,
"VUID-VkAcquireNextImageInfoKHR-deviceMask-01291");
skip |= ValidateAcquireNextImage(device, ACQUIRE_VERSION_2, pAcquireInfo->swapchain, pAcquireInfo->timeout,
pAcquireInfo->semaphore, pAcquireInfo->fence, pImageIndex, "vkAcquireNextImage2KHR",
"VUID-VkAcquireNextImageInfoKHR-semaphore-03266");
return skip;
}
bool CoreChecks::PreCallValidateWaitForPresentKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t presentId, uint64_t timeout) const {
bool skip = false;
if (!enabled_features.present_wait_features.presentWait) {
skip |= LogError(swapchain, "VUID-vkWaitForPresentKHR-presentWait-06234",
"vkWaitForPresentKHR(): VkWaitForPresent called but presentWait feature is not enabled");
}
const auto swapchain_state = GetSwapchainState(swapchain);
if (swapchain_state) {
if (swapchain_state->retired) {
skip |= LogError(swapchain, "VUID-vkWaitForPresentKHR-swapchain-04997",
"vkWaitForPresentKHR() called with a retired swapchain.");
}
}
return skip;
}
bool CoreChecks::PreCallValidateDestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface,
const VkAllocationCallbacks *pAllocator) const {
const auto surface_state = GetSurfaceState(surface);
bool skip = false;
if ((surface_state) && (surface_state->swapchain)) {
skip |= LogError(instance, "VUID-vkDestroySurfaceKHR-surface-01266",
"vkDestroySurfaceKHR() called before its associated VkSwapchainKHR was destroyed.");
}
return skip;
}
#ifdef VK_USE_PLATFORM_WAYLAND_KHR
bool CoreChecks::PreCallValidateGetPhysicalDeviceWaylandPresentationSupportKHR(VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex,
struct wl_display *display) const {
const auto pd_state = GetPhysicalDeviceState(physicalDevice);
return ValidateQueueFamilyIndex(pd_state, queueFamilyIndex,
"VUID-vkGetPhysicalDeviceWaylandPresentationSupportKHR-queueFamilyIndex-01306",
"vkGetPhysicalDeviceWaylandPresentationSupportKHR", "queueFamilyIndex");
}
#endif // VK_USE_PLATFORM_WAYLAND_KHR
#ifdef VK_USE_PLATFORM_WIN32_KHR
bool CoreChecks::PreCallValidateGetPhysicalDeviceWin32PresentationSupportKHR(VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex) const {
const auto pd_state = GetPhysicalDeviceState(physicalDevice);
return ValidateQueueFamilyIndex(pd_state, queueFamilyIndex,
"VUID-vkGetPhysicalDeviceWin32PresentationSupportKHR-queueFamilyIndex-01309",
"vkGetPhysicalDeviceWin32PresentationSupportKHR", "queueFamilyIndex");
}
#endif // VK_USE_PLATFORM_WIN32_KHR
#ifdef VK_USE_PLATFORM_XCB_KHR
bool CoreChecks::PreCallValidateGetPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex, xcb_connection_t *connection,
xcb_visualid_t visual_id) const {
const auto pd_state = GetPhysicalDeviceState(physicalDevice);
return ValidateQueueFamilyIndex(pd_state, queueFamilyIndex,
"VUID-vkGetPhysicalDeviceXcbPresentationSupportKHR-queueFamilyIndex-01312",
"vkGetPhysicalDeviceXcbPresentationSupportKHR", "queueFamilyIndex");
}
#endif // VK_USE_PLATFORM_XCB_KHR
#ifdef VK_USE_PLATFORM_XLIB_KHR
bool CoreChecks::PreCallValidateGetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex, Display *dpy,
VisualID visualID) const {
const auto pd_state = GetPhysicalDeviceState(physicalDevice);
return ValidateQueueFamilyIndex(pd_state, queueFamilyIndex,
"VUID-vkGetPhysicalDeviceXlibPresentationSupportKHR-queueFamilyIndex-01315",
"vkGetPhysicalDeviceXlibPresentationSupportKHR", "queueFamilyIndex");
}
#endif // VK_USE_PLATFORM_XLIB_KHR
bool CoreChecks::PreCallValidateGetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex,
VkSurfaceKHR surface, VkBool32 *pSupported) const {
const auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
return ValidateQueueFamilyIndex(physical_device_state, queueFamilyIndex,
"VUID-vkGetPhysicalDeviceSurfaceSupportKHR-queueFamilyIndex-01269",
"vkGetPhysicalDeviceSurfaceSupportKHR", "queueFamilyIndex");
}
bool CoreChecks::ValidateDescriptorUpdateTemplate(const char *func_name,
const VkDescriptorUpdateTemplateCreateInfo *pCreateInfo) const {
bool skip = false;
const auto layout = GetDescriptorSetLayoutShared(pCreateInfo->descriptorSetLayout);
if (VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET == pCreateInfo->templateType && !layout) {
skip |= LogError(pCreateInfo->descriptorSetLayout, "VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00350",
"%s: Invalid pCreateInfo->descriptorSetLayout (%s)", func_name,
report_data->FormatHandle(pCreateInfo->descriptorSetLayout).c_str());
} else if (VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR == pCreateInfo->templateType) {
auto bind_point = pCreateInfo->pipelineBindPoint;
bool valid_bp = (bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS) || (bind_point == VK_PIPELINE_BIND_POINT_COMPUTE) ||
(bind_point == VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR);
if (!valid_bp) {
skip |=
LogError(device, "VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00351",
"%s: Invalid pCreateInfo->pipelineBindPoint (%" PRIu32 ").", func_name, static_cast<uint32_t>(bind_point));
}
const auto pipeline_layout = GetPipelineLayout(pCreateInfo->pipelineLayout);
if (!pipeline_layout) {
skip |= LogError(pCreateInfo->pipelineLayout, "VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00352",
"%s: Invalid pCreateInfo->pipelineLayout (%s)", func_name,
report_data->FormatHandle(pCreateInfo->pipelineLayout).c_str());
} else {
const uint32_t pd_set = pCreateInfo->set;
if ((pd_set >= pipeline_layout->set_layouts.size()) || !pipeline_layout->set_layouts[pd_set] ||
!pipeline_layout->set_layouts[pd_set]->IsPushDescriptor()) {
skip |= LogError(pCreateInfo->pipelineLayout, "VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00353",
"%s: pCreateInfo->set (%" PRIu32
") does not refer to the push descriptor set layout for pCreateInfo->pipelineLayout (%s).",
func_name, pd_set, report_data->FormatHandle(pCreateInfo->pipelineLayout).c_str());
}
}
}
for (uint32_t i = 0; i < pCreateInfo->descriptorUpdateEntryCount; ++i) {
const auto &descriptor_update = pCreateInfo->pDescriptorUpdateEntries[i];
if (descriptor_update.descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) {
if (descriptor_update.dstArrayElement & 3) {
skip |= LogError(pCreateInfo->pipelineLayout, "VUID-VkDescriptorUpdateTemplateEntry-descriptor-02226",
"%s: pCreateInfo->pDescriptorUpdateEntries[%" PRIu32
"] has descriptorType VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT, but dstArrayElement (%" PRIu32 ") is not a "
"multiple of 4).",
func_name, i, descriptor_update.dstArrayElement);
}
if (descriptor_update.descriptorCount & 3) {
skip |= LogError(pCreateInfo->pipelineLayout, "VUID-VkDescriptorUpdateTemplateEntry-descriptor-02227",
"%s: pCreateInfo->pDescriptorUpdateEntries[%" PRIu32
"] has descriptorType VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT, but descriptorCount (%" PRIu32 ")is not a "
"multiple of 4).",
func_name, i, descriptor_update.descriptorCount);
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCreateDescriptorUpdateTemplate(VkDevice device,
const VkDescriptorUpdateTemplateCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkDescriptorUpdateTemplate *pDescriptorUpdateTemplate) const {
bool skip = ValidateDescriptorUpdateTemplate("vkCreateDescriptorUpdateTemplate()", pCreateInfo);
return skip;
}
bool CoreChecks::PreCallValidateCreateDescriptorUpdateTemplateKHR(VkDevice device,
const VkDescriptorUpdateTemplateCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkDescriptorUpdateTemplate *pDescriptorUpdateTemplate) const {
bool skip = ValidateDescriptorUpdateTemplate("vkCreateDescriptorUpdateTemplateKHR()", pCreateInfo);
return skip;
}
bool CoreChecks::ValidateUpdateDescriptorSetWithTemplate(VkDescriptorSet descriptorSet,
VkDescriptorUpdateTemplate descriptorUpdateTemplate,
const void *pData) const {
bool skip = false;
auto const template_map_entry = desc_template_map.find(descriptorUpdateTemplate);
if ((template_map_entry == desc_template_map.end()) || (template_map_entry->second.get() == nullptr)) {
// Object tracker will report errors for invalid descriptorUpdateTemplate values, avoiding a crash in release builds
// but retaining the assert as template support is new enough to want to investigate these in debug builds.
assert(0);
} else {
const TEMPLATE_STATE *template_state = template_map_entry->second.get();
// TODO: Validate template push descriptor updates
if (template_state->create_info.templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET) {
skip = ValidateUpdateDescriptorSetsWithTemplateKHR(descriptorSet, template_state, pData);
}
}
return skip;
}
bool CoreChecks::PreCallValidateUpdateDescriptorSetWithTemplate(VkDevice device, VkDescriptorSet descriptorSet,
VkDescriptorUpdateTemplate descriptorUpdateTemplate,
const void *pData) const {
return ValidateUpdateDescriptorSetWithTemplate(descriptorSet, descriptorUpdateTemplate, pData);
}
bool CoreChecks::PreCallValidateUpdateDescriptorSetWithTemplateKHR(VkDevice device, VkDescriptorSet descriptorSet,
VkDescriptorUpdateTemplate descriptorUpdateTemplate,
const void *pData) const {
return ValidateUpdateDescriptorSetWithTemplate(descriptorSet, descriptorUpdateTemplate, pData);
}
bool CoreChecks::PreCallValidateCmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer,
VkDescriptorUpdateTemplate descriptorUpdateTemplate,
VkPipelineLayout layout, uint32_t set,
const void *pData) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
const char *const func_name = "vkPushDescriptorSetWithTemplateKHR()";
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_PUSHDESCRIPTORSETWITHTEMPLATEKHR);
const auto layout_data = GetPipelineLayout(layout);
const auto dsl = layout_data ? layout_data->GetDsl(set) : nullptr;
// Validate the set index points to a push descriptor set and is in range
if (dsl) {
if (!dsl->IsPushDescriptor()) {
skip = LogError(layout, "VUID-vkCmdPushDescriptorSetKHR-set-00365",
"%s: Set index %" PRIu32 " does not match push descriptor set layout index for %s.", func_name, set,
report_data->FormatHandle(layout).c_str());
}
} else if (layout_data && (set >= layout_data->set_layouts.size())) {
skip = LogError(layout, "VUID-vkCmdPushDescriptorSetKHR-set-00364",
"%s: Set index %" PRIu32 " is outside of range for %s (set < %" PRIu32 ").", func_name, set,
report_data->FormatHandle(layout).c_str(), static_cast<uint32_t>(layout_data->set_layouts.size()));
}
const auto template_state = GetDescriptorTemplateState(descriptorUpdateTemplate);
if (template_state) {
const auto &template_ci = template_state->create_info;
static const std::map<VkPipelineBindPoint, std::string> bind_errors = {
std::make_pair(VK_PIPELINE_BIND_POINT_GRAPHICS, "VUID-vkCmdPushDescriptorSetWithTemplateKHR-commandBuffer-00366"),
std::make_pair(VK_PIPELINE_BIND_POINT_COMPUTE, "VUID-vkCmdPushDescriptorSetWithTemplateKHR-commandBuffer-00366"),
std::make_pair(VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR,
"VUID-vkCmdPushDescriptorSetWithTemplateKHR-commandBuffer-00366")};
skip |= ValidatePipelineBindPoint(cb_state, template_ci.pipelineBindPoint, func_name, bind_errors);
if (template_ci.templateType != VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR) {
skip |= LogError(cb_state->commandBuffer(), kVUID_Core_PushDescriptorUpdate_TemplateType,
"%s: descriptorUpdateTemplate %s was not created with flag "
"VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR.",
func_name, report_data->FormatHandle(descriptorUpdateTemplate).c_str());
}
if (template_ci.set != set) {
skip |= LogError(cb_state->commandBuffer(), kVUID_Core_PushDescriptorUpdate_Template_SetMismatched,
"%s: descriptorUpdateTemplate %s created with set %" PRIu32
" does not match command parameter set %" PRIu32 ".",
func_name, report_data->FormatHandle(descriptorUpdateTemplate).c_str(), template_ci.set, set);
}
if (!CompatForSet(set, layout_data, GetPipelineLayout(template_ci.pipelineLayout))) {
LogObjectList objlist(cb_state->commandBuffer());
objlist.add(descriptorUpdateTemplate);
objlist.add(template_ci.pipelineLayout);
objlist.add(layout);
skip |= LogError(objlist, kVUID_Core_PushDescriptorUpdate_Template_LayoutMismatched,
"%s: descriptorUpdateTemplate %s created with %s is incompatible with command parameter "
"%s for set %" PRIu32,
func_name, report_data->FormatHandle(descriptorUpdateTemplate).c_str(),
report_data->FormatHandle(template_ci.pipelineLayout).c_str(),
report_data->FormatHandle(layout).c_str(), set);
}
}
if (dsl && template_state) {
// Create an empty proxy in order to use the existing descriptor set update validation
cvdescriptorset::DescriptorSet proxy_ds(VK_NULL_HANDLE, nullptr, dsl, 0, this);
// Decode the template into a set of write updates
cvdescriptorset::DecodedTemplateUpdate decoded_template(this, VK_NULL_HANDLE, template_state, pData,
dsl->GetDescriptorSetLayout());
// Validate the decoded update against the proxy_ds
skip |= ValidatePushDescriptorsUpdate(&proxy_ds, static_cast<uint32_t>(decoded_template.desc_writes.size()),
decoded_template.desc_writes.data(), func_name);
}
return skip;
}
bool CoreChecks::ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(VkPhysicalDevice physicalDevice, uint32_t planeIndex,
const char *api_name) const {
bool skip = false;
const auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHR_called) {
if (planeIndex >= physical_device_state->display_plane_property_count) {
skip |= LogError(physicalDevice, "VUID-vkGetDisplayPlaneSupportedDisplaysKHR-planeIndex-01249",
"%s(): planeIndex (%u) must be in the range [0, %d] that was returned by "
"vkGetPhysicalDeviceDisplayPlanePropertiesKHR "
"or vkGetPhysicalDeviceDisplayPlaneProperties2KHR. Do you have the plane index hardcoded?",
api_name, planeIndex, physical_device_state->display_plane_property_count - 1);
}
}
return skip;
}
bool CoreChecks::PreCallValidateGetDisplayPlaneSupportedDisplaysKHR(VkPhysicalDevice physicalDevice, uint32_t planeIndex,
uint32_t *pDisplayCount, VkDisplayKHR *pDisplays) const {
bool skip = false;
skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(physicalDevice, planeIndex,
"vkGetDisplayPlaneSupportedDisplaysKHR");
return skip;
}
bool CoreChecks::PreCallValidateGetDisplayPlaneCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode,
uint32_t planeIndex,
VkDisplayPlaneCapabilitiesKHR *pCapabilities) const {
bool skip = false;
skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(physicalDevice, planeIndex, "vkGetDisplayPlaneCapabilitiesKHR");
return skip;
}
bool CoreChecks::PreCallValidateGetDisplayPlaneCapabilities2KHR(VkPhysicalDevice physicalDevice,
const VkDisplayPlaneInfo2KHR *pDisplayPlaneInfo,
VkDisplayPlaneCapabilities2KHR *pCapabilities) const {
bool skip = false;
skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(physicalDevice, pDisplayPlaneInfo->planeIndex,
"vkGetDisplayPlaneCapabilities2KHR");
return skip;
}
bool CoreChecks::PreCallValidateCreateDisplayPlaneSurfaceKHR(VkInstance instance, const VkDisplaySurfaceCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkSurfaceKHR *pSurface) const {
bool skip = false;
const VkDisplayModeKHR display_mode = pCreateInfo->displayMode;
const uint32_t plane_index = pCreateInfo->planeIndex;
if (pCreateInfo->alphaMode == VK_DISPLAY_PLANE_ALPHA_GLOBAL_BIT_KHR) {
const float global_alpha = pCreateInfo->globalAlpha;
if ((global_alpha > 1.0f) || (global_alpha < 0.0f)) {
skip |= LogError(
display_mode, "VUID-VkDisplaySurfaceCreateInfoKHR-alphaMode-01254",
"vkCreateDisplayPlaneSurfaceKHR(): alphaMode is VK_DISPLAY_PLANE_ALPHA_GLOBAL_BIT_KHR but globalAlpha is %f.",
global_alpha);
}
}
const DISPLAY_MODE_STATE *dm_state = GetDisplayModeState(display_mode);
if (dm_state != nullptr) {
// Get physical device from VkDisplayModeKHR state tracking
const VkPhysicalDevice physical_device = dm_state->physical_device;
const auto physical_device_state = GetPhysicalDeviceState(physical_device);
VkPhysicalDeviceProperties device_properties = {};
DispatchGetPhysicalDeviceProperties(physical_device, &device_properties);
const uint32_t width = pCreateInfo->imageExtent.width;
const uint32_t height = pCreateInfo->imageExtent.height;
if (width >= device_properties.limits.maxImageDimension2D) {
skip |= LogError(display_mode, "VUID-VkDisplaySurfaceCreateInfoKHR-width-01256",
"vkCreateDisplayPlaneSurfaceKHR(): width (%" PRIu32
") exceeds device limit maxImageDimension2D (%" PRIu32 ").",
width, device_properties.limits.maxImageDimension2D);
}
if (height >= device_properties.limits.maxImageDimension2D) {
skip |= LogError(display_mode, "VUID-VkDisplaySurfaceCreateInfoKHR-width-01256",
"vkCreateDisplayPlaneSurfaceKHR(): height (%" PRIu32
") exceeds device limit maxImageDimension2D (%" PRIu32 ").",
height, device_properties.limits.maxImageDimension2D);
}
if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHR_called) {
if (plane_index >= physical_device_state->display_plane_property_count) {
skip |=
LogError(display_mode, "VUID-VkDisplaySurfaceCreateInfoKHR-planeIndex-01252",
"vkCreateDisplayPlaneSurfaceKHR(): planeIndex (%u) must be in the range [0, %d] that was returned by "
"vkGetPhysicalDeviceDisplayPlanePropertiesKHR "
"or vkGetPhysicalDeviceDisplayPlaneProperties2KHR. Do you have the plane index hardcoded?",
plane_index, physical_device_state->display_plane_property_count - 1);
} else {
// call here once we know the plane index used is a valid plane index
VkDisplayPlaneCapabilitiesKHR plane_capabilities;
DispatchGetDisplayPlaneCapabilitiesKHR(physical_device, display_mode, plane_index, &plane_capabilities);
if ((pCreateInfo->alphaMode & plane_capabilities.supportedAlpha) == 0) {
skip |= LogError(display_mode, "VUID-VkDisplaySurfaceCreateInfoKHR-alphaMode-01255",
"vkCreateDisplayPlaneSurfaceKHR(): alphaMode is %s but planeIndex %u supportedAlpha (0x%x) "
"does not support the mode.",
string_VkDisplayPlaneAlphaFlagBitsKHR(pCreateInfo->alphaMode), plane_index,
plane_capabilities.supportedAlpha);
}
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdDebugMarkerBeginEXT(VkCommandBuffer commandBuffer,
const VkDebugMarkerMarkerInfoEXT *pMarkerInfo) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
return ValidateCmd(cb_state, CMD_DEBUGMARKERBEGINEXT);
}
bool CoreChecks::PreCallValidateCmdDebugMarkerEndEXT(VkCommandBuffer commandBuffer) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
return ValidateCmd(cb_state, CMD_DEBUGMARKERENDEXT);
}
bool CoreChecks::PreCallValidateCmdBeginQueryIndexedEXT(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query,
VkQueryControlFlags flags, uint32_t index) const {
if (disabled[query_validation]) return false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
QueryObject query_obj(queryPool, query, index);
const char *cmd_name = "vkCmdBeginQueryIndexedEXT()";
struct BeginQueryIndexedVuids : ValidateBeginQueryVuids {
BeginQueryIndexedVuids() : ValidateBeginQueryVuids() {
vuid_queue_flags = "VUID-vkCmdBeginQueryIndexedEXT-commandBuffer-cmdpool";
vuid_queue_feedback = "VUID-vkCmdBeginQueryIndexedEXT-queryType-02338";
vuid_queue_occlusion = "VUID-vkCmdBeginQueryIndexedEXT-queryType-00803";
vuid_precise = "VUID-vkCmdBeginQueryIndexedEXT-queryType-00800";
vuid_query_count = "VUID-vkCmdBeginQueryIndexedEXT-query-00802";
vuid_profile_lock = "VUID-vkCmdBeginQueryIndexedEXT-queryPool-03223";
vuid_scope_not_first = "VUID-vkCmdBeginQueryIndexedEXT-queryPool-03224";
vuid_scope_in_rp = "VUID-vkCmdBeginQueryIndexedEXT-queryPool-03225";
vuid_dup_query_type = "VUID-vkCmdBeginQueryIndexedEXT-queryPool-04753";
vuid_protected_cb = "VUID-vkCmdBeginQueryIndexedEXT-commandBuffer-01885";
}
};
BeginQueryIndexedVuids vuids;
bool skip = ValidateBeginQuery(cb_state, query_obj, flags, index, CMD_BEGINQUERYINDEXEDEXT, &vuids);
// Extension specific VU's
const auto &query_pool_ci = GetQueryPoolState(query_obj.pool)->createInfo;
if (query_pool_ci.queryType == VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT) {
if (IsExtEnabled(device_extensions.vk_ext_transform_feedback) &&
(index >= phys_dev_ext_props.transform_feedback_props.maxTransformFeedbackStreams)) {
skip |= LogError(
cb_state->commandBuffer(), "VUID-vkCmdBeginQueryIndexedEXT-queryType-02339",
"%s: index %" PRIu32
" must be less than VkPhysicalDeviceTransformFeedbackPropertiesEXT::maxTransformFeedbackStreams %" PRIu32 ".",
cmd_name, index, phys_dev_ext_props.transform_feedback_props.maxTransformFeedbackStreams);
}
} else if (index != 0) {
skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdBeginQueryIndexedEXT-queryType-02340",
"%s: index %" PRIu32
" must be zero if %s was not created with type VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT.",
cmd_name, index, report_data->FormatHandle(queryPool).c_str());
}
return skip;
}
void CoreChecks::PreCallRecordCmdBeginQueryIndexedEXT(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query,
VkQueryControlFlags flags, uint32_t index) {
if (disabled[query_validation]) return;
QueryObject query_obj = {queryPool, query, index};
EnqueueVerifyBeginQuery(commandBuffer, query_obj, "vkCmdBeginQueryIndexedEXT()");
}
void CoreChecks::PreCallRecordCmdEndQueryIndexedEXT(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query,
uint32_t index) {
if (disabled[query_validation]) return;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
QueryObject query_obj = {queryPool, query, index};
query_obj.endCommandIndex = cb_state->commandCount - 1;
EnqueueVerifyEndQuery(commandBuffer, query_obj);
}
bool CoreChecks::PreCallValidateCmdEndQueryIndexedEXT(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query,
uint32_t index) const {
if (disabled[query_validation]) return false;
QueryObject query_obj = {queryPool, query, index};
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
struct EndQueryIndexedVuids : ValidateEndQueryVuids {
EndQueryIndexedVuids() : ValidateEndQueryVuids() {
vuid_queue_flags = "VUID-vkCmdEndQueryIndexedEXT-commandBuffer-cmdpool";
vuid_active_queries = "VUID-vkCmdEndQueryIndexedEXT-None-02342";
vuid_protected_cb = "VUID-vkCmdEndQueryIndexedEXT-commandBuffer-02344";
}
};
EndQueryIndexedVuids vuids;
bool skip = false;
skip |= ValidateCmdEndQuery(cb_state, query_obj, index, CMD_ENDQUERYINDEXEDEXT, &vuids);
const QUERY_POOL_STATE *query_pool_state = GetQueryPoolState(queryPool);
if (query_pool_state) {
const auto &query_pool_ci = query_pool_state->createInfo;
const uint32_t available_query_count = query_pool_state->createInfo.queryCount;
if (query >= available_query_count) {
skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdEndQueryIndexedEXT-query-02343",
"vkCmdEndQueryIndexedEXT(): query index (%" PRIu32
") is greater or equal to the queryPool size (%" PRIu32 ").",
index, available_query_count);
}
if (query_pool_ci.queryType == VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT) {
if (IsExtEnabled(device_extensions.vk_ext_transform_feedback) &&
(index >= phys_dev_ext_props.transform_feedback_props.maxTransformFeedbackStreams)) {
skip |= LogError(
cb_state->commandBuffer(), "VUID-vkCmdEndQueryIndexedEXT-queryType-02346",
"vkCmdEndQueryIndexedEXT(): index %" PRIu32
" must be less than VkPhysicalDeviceTransformFeedbackPropertiesEXT::maxTransformFeedbackStreams %" PRIu32 ".",
index, phys_dev_ext_props.transform_feedback_props.maxTransformFeedbackStreams);
}
} else if (index != 0) {
skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdEndQueryIndexedEXT-queryType-02347",
"vkCmdEndQueryIndexedEXT(): index %" PRIu32
" must be zero if %s was not created with type VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT.",
index, report_data->FormatHandle(queryPool).c_str());
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetDiscardRectangleEXT(VkCommandBuffer commandBuffer, uint32_t firstDiscardRectangle,
uint32_t discardRectangleCount,
const VkRect2D *pDiscardRectangles) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = false;
// Minimal validation for command buffer state
skip |= ValidateCmd(cb_state, CMD_SETDISCARDRECTANGLEEXT);
skip |= ForbidInheritedViewportScissor(commandBuffer, cb_state, "VUID-vkCmdSetDiscardRectangleEXT-viewportScissor2D-04788",
"vkCmdSetDiscardRectangleEXT");
for (uint32_t i = 0; i < discardRectangleCount; ++i) {
if (pDiscardRectangles[i].offset.x < 0) {
skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdSetDiscardRectangleEXT-x-00587",
"vkCmdSetDiscardRectangleEXT(): pDiscardRectangles[%" PRIu32 "].x (%" PRIi32 ") is negative.", i,
pDiscardRectangles[i].offset.x);
}
if (pDiscardRectangles[i].offset.y < 0) {
skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdSetDiscardRectangleEXT-x-00587",
"vkCmdSetDiscardRectangleEXT(): pDiscardRectangles[%" PRIu32 "].y (%" PRIi32 ") is negative.", i,
pDiscardRectangles[i].offset.y);
}
}
if (firstDiscardRectangle + discardRectangleCount > phys_dev_ext_props.discard_rectangle_props.maxDiscardRectangles) {
skip |=
LogError(cb_state->commandBuffer(), "VUID-vkCmdSetDiscardRectangleEXT-firstDiscardRectangle-00585",
"vkCmdSetDiscardRectangleEXT(): firstDiscardRectangle (%" PRIu32 ") + discardRectangleCount (%" PRIu32
") is not less than VkPhysicalDeviceDiscardRectanglePropertiesEXT::maxDiscardRectangles (%" PRIu32 ".",
firstDiscardRectangle, discardRectangleCount, phys_dev_ext_props.discard_rectangle_props.maxDiscardRectangles);
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetSampleLocationsEXT(VkCommandBuffer commandBuffer,
const VkSampleLocationsInfoEXT *pSampleLocationsInfo) const {
bool skip = false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
// Minimal validation for command buffer state
skip |= ValidateCmd(cb_state, CMD_SETSAMPLELOCATIONSEXT);
skip |= ValidateSampleLocationsInfo(pSampleLocationsInfo, "vkCmdSetSampleLocationsEXT");
const auto lv_bind_point = ConvertToLvlBindPoint(VK_PIPELINE_BIND_POINT_GRAPHICS);
const auto *pipe = cb_state->lastBound[lv_bind_point].pipeline_state;
if (pipe != nullptr) {
// Check same error with different log messages
const safe_VkPipelineMultisampleStateCreateInfo *multisample_state = pipe->create_info.graphics.pMultisampleState;
if (multisample_state == nullptr) {
skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdSetSampleLocationsEXT-sampleLocationsPerPixel-01529",
"vkCmdSetSampleLocationsEXT(): pSampleLocationsInfo->sampleLocationsPerPixel must be equal to "
"rasterizationSamples, but the bound graphics pipeline was created without a multisample state");
} else if (multisample_state->rasterizationSamples != pSampleLocationsInfo->sampleLocationsPerPixel) {
skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdSetSampleLocationsEXT-sampleLocationsPerPixel-01529",
"vkCmdSetSampleLocationsEXT(): pSampleLocationsInfo->sampleLocationsPerPixel (%s) is not equal to "
"the last bound pipeline's rasterizationSamples (%s)",
string_VkSampleCountFlagBits(pSampleLocationsInfo->sampleLocationsPerPixel),
string_VkSampleCountFlagBits(multisample_state->rasterizationSamples));
}
}
return skip;
}
bool CoreChecks::ValidateCreateSamplerYcbcrConversion(const char *func_name,
const VkSamplerYcbcrConversionCreateInfo *create_info) const {
bool skip = false;
const VkFormat conversion_format = create_info->format;
// Need to check for external format conversion first as it allows for non-UNORM format
bool external_format = false;
#ifdef VK_USE_PLATFORM_ANDROID_KHR
const VkExternalFormatANDROID *ext_format_android = LvlFindInChain<VkExternalFormatANDROID>(create_info->pNext);
if ((nullptr != ext_format_android) && (0 != ext_format_android->externalFormat)) {
external_format = true;
if (VK_FORMAT_UNDEFINED != create_info->format) {
return LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-format-01904",
"%s: CreateInfo format is not VK_FORMAT_UNDEFINED while "
"there is a chained VkExternalFormatANDROID struct with a non-zero externalFormat.",
func_name);
}
}
#endif // VK_USE_PLATFORM_ANDROID_KHR
if ((external_format == false) && (FormatIsUNorm(conversion_format) == false)) {
const char *vuid = IsExtEnabled(device_extensions.vk_android_external_memory_android_hardware_buffer)
? "VUID-VkSamplerYcbcrConversionCreateInfo-format-04061"
: "VUID-VkSamplerYcbcrConversionCreateInfo-format-04060";
skip |=
LogError(device, vuid,
"%s: CreateInfo format (%s) is not an UNORM format and there is no external format conversion being created.",
func_name, string_VkFormat(conversion_format));
}
// Gets VkFormatFeatureFlags according to Sampler Ycbcr Conversion Format Features
// (vkspec.html#potential-format-features)
VkFormatFeatureFlags format_features = VK_FORMAT_FEATURE_FLAG_BITS_MAX_ENUM;
if (conversion_format == VK_FORMAT_UNDEFINED) {
#ifdef VK_USE_PLATFORM_ANDROID_KHR
// only check for external format inside VK_FORMAT_UNDEFINED check to prevent unnecessary extra errors from no format
// features being supported
if (external_format == true) {
auto it = ahb_ext_formats_map.find(ext_format_android->externalFormat);
if (it != ahb_ext_formats_map.end()) {
format_features = it->second;
}
}
#endif // VK_USE_PLATFORM_ANDROID_KHR
} else {
format_features = GetPotentialFormatFeatures(conversion_format);
}
// Check all VUID that are based off of VkFormatFeatureFlags
// These can't be in StatelessValidation due to needing possible External AHB state for feature support
if (((format_features & VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT) == 0) &&
((format_features & VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT) == 0)) {
skip |= LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-format-01650",
"%s: Format %s does not support either VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT or "
"VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT",
func_name, string_VkFormat(conversion_format));
}
if ((format_features & VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT) == 0) {
if (FormatIsXChromaSubsampled(conversion_format) && create_info->xChromaOffset == VK_CHROMA_LOCATION_COSITED_EVEN) {
skip |= LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-xChromaOffset-01651",
"%s: Format %s does not support VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT so xChromaOffset can't "
"be VK_CHROMA_LOCATION_COSITED_EVEN",
func_name, string_VkFormat(conversion_format));
}
if (FormatIsYChromaSubsampled(conversion_format) && create_info->yChromaOffset == VK_CHROMA_LOCATION_COSITED_EVEN) {
skip |= LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-xChromaOffset-01651",
"%s: Format %s does not support VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT so yChromaOffset can't "
"be VK_CHROMA_LOCATION_COSITED_EVEN",
func_name, string_VkFormat(conversion_format));
}
}
if ((format_features & VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT) == 0) {
if (FormatIsXChromaSubsampled(conversion_format) && create_info->xChromaOffset == VK_CHROMA_LOCATION_MIDPOINT) {
skip |= LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-xChromaOffset-01652",
"%s: Format %s does not support VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT so xChromaOffset can't "
"be VK_CHROMA_LOCATION_MIDPOINT",
func_name, string_VkFormat(conversion_format));
}
if (FormatIsYChromaSubsampled(conversion_format) && create_info->yChromaOffset == VK_CHROMA_LOCATION_MIDPOINT) {
skip |= LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-xChromaOffset-01652",
"%s: Format %s does not support VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT so yChromaOffset can't "
"be VK_CHROMA_LOCATION_MIDPOINT",
func_name, string_VkFormat(conversion_format));
}
}
if (((format_features & VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT) == 0) &&
(create_info->forceExplicitReconstruction == VK_TRUE)) {
skip |= LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-forceExplicitReconstruction-01656",
"%s: Format %s does not support "
"VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT so "
"forceExplicitReconstruction must be VK_FALSE",
func_name, string_VkFormat(conversion_format));
}
if (((format_features & VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT) == 0) &&
(create_info->chromaFilter == VK_FILTER_LINEAR)) {
skip |= LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-chromaFilter-01657",
"%s: Format %s does not support VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT so "
"chromaFilter must not be VK_FILTER_LINEAR",
func_name, string_VkFormat(conversion_format));
}
return skip;
}
bool CoreChecks::PreCallValidateCreateSamplerYcbcrConversion(VkDevice device, const VkSamplerYcbcrConversionCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkSamplerYcbcrConversion *pYcbcrConversion) const {
return ValidateCreateSamplerYcbcrConversion("vkCreateSamplerYcbcrConversion()", pCreateInfo);
}
bool CoreChecks::PreCallValidateCreateSamplerYcbcrConversionKHR(VkDevice device,
const VkSamplerYcbcrConversionCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkSamplerYcbcrConversion *pYcbcrConversion) const {
return ValidateCreateSamplerYcbcrConversion("vkCreateSamplerYcbcrConversionKHR()", pCreateInfo);
}
bool CoreChecks::PreCallValidateCreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) const {
bool skip = false;
if (samplerMap.size() >= phys_dev_props.limits.maxSamplerAllocationCount) {
skip |= LogError(
device, "VUID-vkCreateSampler-maxSamplerAllocationCount-04110",
"vkCreateSampler(): Number of currently valid sampler objects (%zu) is not less than the maximum allowed (%u).",
samplerMap.size(), phys_dev_props.limits.maxSamplerAllocationCount);
}
if (enabled_features.core11.samplerYcbcrConversion == VK_TRUE) {
const VkSamplerYcbcrConversionInfo *conversion_info = LvlFindInChain<VkSamplerYcbcrConversionInfo>(pCreateInfo->pNext);
if (conversion_info != nullptr) {
const VkSamplerYcbcrConversion sampler_ycbcr_conversion = conversion_info->conversion;
const SAMPLER_YCBCR_CONVERSION_STATE *ycbcr_state = GetSamplerYcbcrConversionState(sampler_ycbcr_conversion);
if ((ycbcr_state->format_features &
VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT) == 0) {
const VkFilter chroma_filter = ycbcr_state->chromaFilter;
if (pCreateInfo->minFilter != chroma_filter) {
skip |= LogError(
device, "VUID-VkSamplerCreateInfo-minFilter-01645",
"VkCreateSampler: VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT is "
"not supported for SamplerYcbcrConversion's (%s) format %s so minFilter (%s) needs to be equal to "
"chromaFilter (%s)",
report_data->FormatHandle(sampler_ycbcr_conversion).c_str(), string_VkFormat(ycbcr_state->format),
string_VkFilter(pCreateInfo->minFilter), string_VkFilter(chroma_filter));
}
if (pCreateInfo->magFilter != chroma_filter) {
skip |= LogError(
device, "VUID-VkSamplerCreateInfo-minFilter-01645",
"VkCreateSampler: VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT is "
"not supported for SamplerYcbcrConversion's (%s) format %s so minFilter (%s) needs to be equal to "
"chromaFilter (%s)",
report_data->FormatHandle(sampler_ycbcr_conversion).c_str(), string_VkFormat(ycbcr_state->format),
string_VkFilter(pCreateInfo->minFilter), string_VkFilter(chroma_filter));
}
}
// At this point there is a known sampler YCbCr conversion enabled
const auto *sampler_reduction = LvlFindInChain<VkSamplerReductionModeCreateInfo>(pCreateInfo->pNext);
if (sampler_reduction != nullptr) {
if (sampler_reduction->reductionMode != VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE) {
skip |= LogError(device, "VUID-VkSamplerCreateInfo-None-01647",
"A sampler YCbCr Conversion is being used creating this sampler so the sampler reduction mode "
"must be VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE.");
}
}
}
}
if (pCreateInfo->borderColor == VK_BORDER_COLOR_INT_CUSTOM_EXT ||
pCreateInfo->borderColor == VK_BORDER_COLOR_FLOAT_CUSTOM_EXT) {
if (!enabled_features.custom_border_color_features.customBorderColors) {
skip |=
LogError(device, "VUID-VkSamplerCreateInfo-customBorderColors-04085",
"vkCreateSampler(): A custom border color was specified without enabling the custom border color feature");
}
auto custom_create_info = LvlFindInChain<VkSamplerCustomBorderColorCreateInfoEXT>(pCreateInfo->pNext);
if (custom_create_info) {
if (custom_create_info->format == VK_FORMAT_UNDEFINED &&
!enabled_features.custom_border_color_features.customBorderColorWithoutFormat) {
skip |= LogError(device, "VUID-VkSamplerCustomBorderColorCreateInfoEXT-format-04014",
"vkCreateSampler(): A custom border color was specified as VK_FORMAT_UNDEFINED without the "
"customBorderColorWithoutFormat feature being enabled");
}
}
if (custom_border_color_sampler_count >= phys_dev_ext_props.custom_border_color_props.maxCustomBorderColorSamplers) {
skip |= LogError(device, "VUID-VkSamplerCreateInfo-None-04012",
"vkCreateSampler(): Creating a sampler with a custom border color will exceed the "
"maxCustomBorderColorSamplers limit of %d",
phys_dev_ext_props.custom_border_color_props.maxCustomBorderColorSamplers);
}
}
if (IsExtEnabled(device_extensions.vk_khr_portability_subset)) {
if ((VK_FALSE == enabled_features.portability_subset_features.samplerMipLodBias) && pCreateInfo->mipLodBias != 0) {
skip |= LogError(device, "VUID-VkSamplerCreateInfo-samplerMipLodBias-04467",
"vkCreateSampler (portability error): mip LOD bias not supported.");
}
}
// If any of addressModeU, addressModeV or addressModeW are VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE, the
// VK_KHR_sampler_mirror_clamp_to_edge extension or promoted feature must be enabled
if ((device_extensions.vk_khr_sampler_mirror_clamp_to_edge != kEnabledByCreateinfo) &&
(enabled_features.core12.samplerMirrorClampToEdge == VK_FALSE)) {
// Use 'else' because getting 3 large error messages is redundant and assume developer, if set all 3, will notice and fix
// all at once
if (pCreateInfo->addressModeU == VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE) {
skip |=
LogError(device, "VUID-VkSamplerCreateInfo-addressModeU-01079",
"vkCreateSampler(): addressModeU is set to VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE but the "
"VK_KHR_sampler_mirror_clamp_to_edge extension or samplerMirrorClampToEdge feature has not been enabled.");
} else if (pCreateInfo->addressModeV == VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE) {
skip |=
LogError(device, "VUID-VkSamplerCreateInfo-addressModeU-01079",
"vkCreateSampler(): addressModeV is set to VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE but the "
"VK_KHR_sampler_mirror_clamp_to_edge extension or samplerMirrorClampToEdge feature has not been enabled.");
} else if (pCreateInfo->addressModeW == VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE) {
skip |=
LogError(device, "VUID-VkSamplerCreateInfo-addressModeU-01079",
"vkCreateSampler(): addressModeW is set to VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE but the "
"VK_KHR_sampler_mirror_clamp_to_edge extension or samplerMirrorClampToEdge feature has not been enabled.");
}
}
return skip;
}
bool CoreChecks::ValidateGetBufferDeviceAddress(VkDevice device, const VkBufferDeviceAddressInfo *pInfo,
const char *apiName) const {
bool skip = false;
if (!enabled_features.core12.bufferDeviceAddress && !enabled_features.buffer_device_address_ext_features.bufferDeviceAddress) {
skip |= LogError(pInfo->buffer, "VUID-vkGetBufferDeviceAddress-bufferDeviceAddress-03324",
"%s: The bufferDeviceAddress feature must: be enabled.", apiName);
}
if (physical_device_count > 1 && !enabled_features.core12.bufferDeviceAddressMultiDevice &&
!enabled_features.buffer_device_address_ext_features.bufferDeviceAddressMultiDevice) {
skip |= LogError(pInfo->buffer, "VUID-vkGetBufferDeviceAddress-device-03325",
"%s: If device was created with multiple physical devices, then the "
"bufferDeviceAddressMultiDevice feature must: be enabled.",
apiName);
}
const auto buffer_state = GetBufferState(pInfo->buffer);
if (buffer_state) {
if (!(buffer_state->createInfo.flags & VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT)) {
skip |= ValidateMemoryIsBoundToBuffer(buffer_state, apiName, "VUID-VkBufferDeviceAddressInfo-buffer-02600");
}
skip |= ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT, true,
"VUID-VkBufferDeviceAddressInfo-buffer-02601", apiName,
"VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT");
}
return skip;
}
bool CoreChecks::PreCallValidateGetBufferDeviceAddressEXT(VkDevice device, const VkBufferDeviceAddressInfo *pInfo) const {
return ValidateGetBufferDeviceAddress(device, static_cast<const VkBufferDeviceAddressInfo *>(pInfo),
"vkGetBufferDeviceAddressEXT");
}
bool CoreChecks::PreCallValidateGetBufferDeviceAddressKHR(VkDevice device, const VkBufferDeviceAddressInfo *pInfo) const {
return ValidateGetBufferDeviceAddress(device, static_cast<const VkBufferDeviceAddressInfo *>(pInfo),
"vkGetBufferDeviceAddressKHR");
}
bool CoreChecks::PreCallValidateGetBufferDeviceAddress(VkDevice device, const VkBufferDeviceAddressInfo *pInfo) const {
return ValidateGetBufferDeviceAddress(device, static_cast<const VkBufferDeviceAddressInfo *>(pInfo),
"vkGetBufferDeviceAddress");
}
bool CoreChecks::ValidateGetBufferOpaqueCaptureAddress(VkDevice device, const VkBufferDeviceAddressInfo *pInfo,
const char *apiName) const {
bool skip = false;
if (!enabled_features.core12.bufferDeviceAddress) {
skip |= LogError(pInfo->buffer, "VUID-vkGetBufferOpaqueCaptureAddress-None-03326",
"%s(): The bufferDeviceAddress feature must: be enabled.", apiName);
}
if (physical_device_count > 1 && !enabled_features.core12.bufferDeviceAddressMultiDevice) {
skip |= LogError(pInfo->buffer, "VUID-vkGetBufferOpaqueCaptureAddress-device-03327",
"%s(): If device was created with multiple physical devices, then the "
"bufferDeviceAddressMultiDevice feature must: be enabled.",
apiName);
}
return skip;
}
bool CoreChecks::PreCallValidateGetBufferOpaqueCaptureAddressKHR(VkDevice device, const VkBufferDeviceAddressInfo *pInfo) const {
return ValidateGetBufferOpaqueCaptureAddress(device, static_cast<const VkBufferDeviceAddressInfo *>(pInfo),
"vkGetBufferOpaqueCaptureAddressKHR");
}
bool CoreChecks::PreCallValidateGetBufferOpaqueCaptureAddress(VkDevice device, const VkBufferDeviceAddressInfo *pInfo) const {
return ValidateGetBufferOpaqueCaptureAddress(device, static_cast<const VkBufferDeviceAddressInfo *>(pInfo),
"vkGetBufferOpaqueCaptureAddress");
}
bool CoreChecks::ValidateGetDeviceMemoryOpaqueCaptureAddress(VkDevice device, const VkDeviceMemoryOpaqueCaptureAddressInfo *pInfo,
const char *apiName) const {
bool skip = false;
if (!enabled_features.core12.bufferDeviceAddress) {
skip |= LogError(pInfo->memory, "VUID-vkGetDeviceMemoryOpaqueCaptureAddress-None-03334",
"%s(): The bufferDeviceAddress feature must: be enabled.", apiName);
}
if (physical_device_count > 1 && !enabled_features.core12.bufferDeviceAddressMultiDevice) {
skip |= LogError(pInfo->memory, "VUID-vkGetDeviceMemoryOpaqueCaptureAddress-device-03335",
"%s(): If device was created with multiple physical devices, then the "
"bufferDeviceAddressMultiDevice feature must: be enabled.",
apiName);
}
const DEVICE_MEMORY_STATE *mem_info = GetDevMemState(pInfo->memory);
if (mem_info) {
auto chained_flags_struct = LvlFindInChain<VkMemoryAllocateFlagsInfo>(mem_info->alloc_info.pNext);
if (!chained_flags_struct || !(chained_flags_struct->flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT)) {
skip |= LogError(pInfo->memory, "VUID-VkDeviceMemoryOpaqueCaptureAddressInfo-memory-03336",
"%s(): memory must have been allocated with VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT.", apiName);
}
}
return skip;
}
bool CoreChecks::PreCallValidateGetDeviceMemoryOpaqueCaptureAddressKHR(VkDevice device,
const VkDeviceMemoryOpaqueCaptureAddressInfo *pInfo) const {
return ValidateGetDeviceMemoryOpaqueCaptureAddress(device, static_cast<const VkDeviceMemoryOpaqueCaptureAddressInfo *>(pInfo),
"vkGetDeviceMemoryOpaqueCaptureAddressKHR");
}
bool CoreChecks::PreCallValidateGetDeviceMemoryOpaqueCaptureAddress(VkDevice device,
const VkDeviceMemoryOpaqueCaptureAddressInfo *pInfo) const {
return ValidateGetDeviceMemoryOpaqueCaptureAddress(device, static_cast<const VkDeviceMemoryOpaqueCaptureAddressInfo *>(pInfo),
"vkGetDeviceMemoryOpaqueCaptureAddress");
}
bool CoreChecks::ValidateQueryRange(VkDevice device, VkQueryPool queryPool, uint32_t totalCount, uint32_t firstQuery,
uint32_t queryCount, const char *vuid_badfirst, const char *vuid_badrange,
const char *apiName) const {
bool skip = false;
if (firstQuery >= totalCount) {
skip |= LogError(device, vuid_badfirst,
"%s(): firstQuery (%" PRIu32 ") greater than or equal to query pool count (%" PRIu32 ") for %s", apiName,
firstQuery, totalCount, report_data->FormatHandle(queryPool).c_str());
}
if ((firstQuery + queryCount) > totalCount) {
skip |= LogError(device, vuid_badrange,
"%s(): Query range [%" PRIu32 ", %" PRIu32 ") goes beyond query pool count (%" PRIu32 ") for %s", apiName,
firstQuery, firstQuery + queryCount, totalCount, report_data->FormatHandle(queryPool).c_str());
}
return skip;
}
bool CoreChecks::ValidateResetQueryPool(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
const char *apiName) const {
if (disabled[query_validation]) return false;
bool skip = false;
if (!enabled_features.core12.hostQueryReset) {
skip |= LogError(device, "VUID-vkResetQueryPool-None-02665", "%s(): Host query reset not enabled for device", apiName);
}
const auto query_pool_state = GetQueryPoolState(queryPool);
if (query_pool_state) {
skip |= ValidateQueryRange(device, queryPool, query_pool_state->createInfo.queryCount, firstQuery, queryCount,
"VUID-vkResetQueryPool-firstQuery-02666", "VUID-vkResetQueryPool-firstQuery-02667", apiName);
}
return skip;
}
bool CoreChecks::PreCallValidateResetQueryPoolEXT(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery,
uint32_t queryCount) const {
return ValidateResetQueryPool(device, queryPool, firstQuery, queryCount, "vkResetQueryPoolEXT");
}
bool CoreChecks::PreCallValidateResetQueryPool(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery,
uint32_t queryCount) const {
return ValidateResetQueryPool(device, queryPool, firstQuery, queryCount, "vkResetQueryPool");
}
VkResult CoreChecks::CoreLayerCreateValidationCacheEXT(VkDevice device, const VkValidationCacheCreateInfoEXT *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkValidationCacheEXT *pValidationCache) {
*pValidationCache = ValidationCache::Create(pCreateInfo);
return *pValidationCache ? VK_SUCCESS : VK_ERROR_INITIALIZATION_FAILED;
}
void CoreChecks::CoreLayerDestroyValidationCacheEXT(VkDevice device, VkValidationCacheEXT validationCache,
const VkAllocationCallbacks *pAllocator) {
delete CastFromHandle<ValidationCache *>(validationCache);
}
VkResult CoreChecks::CoreLayerGetValidationCacheDataEXT(VkDevice device, VkValidationCacheEXT validationCache, size_t *pDataSize,
void *pData) {
size_t in_size = *pDataSize;
CastFromHandle<ValidationCache *>(validationCache)->Write(pDataSize, pData);
return (pData && *pDataSize != in_size) ? VK_INCOMPLETE : VK_SUCCESS;
}
VkResult CoreChecks::CoreLayerMergeValidationCachesEXT(VkDevice device, VkValidationCacheEXT dstCache, uint32_t srcCacheCount,
const VkValidationCacheEXT *pSrcCaches) {
bool skip = false;
auto dst = CastFromHandle<ValidationCache *>(dstCache);
VkResult result = VK_SUCCESS;
for (uint32_t i = 0; i < srcCacheCount; i++) {
auto src = CastFromHandle<const ValidationCache *>(pSrcCaches[i]);
if (src == dst) {
skip |= LogError(device, "VUID-vkMergeValidationCachesEXT-dstCache-01536",
"vkMergeValidationCachesEXT: dstCache (0x%" PRIx64 ") must not appear in pSrcCaches array.",
HandleToUint64(dstCache));
result = VK_ERROR_VALIDATION_FAILED_EXT;
}
if (!skip) {
dst->Merge(src);
}
}
return result;
}
bool CoreChecks::ValidateCmdSetDeviceMask(VkCommandBuffer commandBuffer, uint32_t deviceMask, CMD_TYPE cmd_type) const {
bool skip = false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
skip |= ValidateCmd(cb_state, cmd_type);
skip |= ValidateDeviceMaskToPhysicalDeviceCount(deviceMask, commandBuffer, "VUID-vkCmdSetDeviceMask-deviceMask-00108");
skip |= ValidateDeviceMaskToZero(deviceMask, commandBuffer, "VUID-vkCmdSetDeviceMask-deviceMask-00109");
skip |= ValidateDeviceMaskToCommandBuffer(cb_state, deviceMask, commandBuffer, "VUID-vkCmdSetDeviceMask-deviceMask-00110");
if (cb_state->activeRenderPass) {
skip |= ValidateDeviceMaskToRenderPass(cb_state, deviceMask, "VUID-vkCmdSetDeviceMask-deviceMask-00111");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetDeviceMask(VkCommandBuffer commandBuffer, uint32_t deviceMask) const {
return ValidateCmdSetDeviceMask(commandBuffer, deviceMask, CMD_SETDEVICEMASK);
}
bool CoreChecks::PreCallValidateCmdSetDeviceMaskKHR(VkCommandBuffer commandBuffer, uint32_t deviceMask) const {
return ValidateCmdSetDeviceMask(commandBuffer, deviceMask, CMD_SETDEVICEMASKKHR);
}
bool CoreChecks::ValidateGetSemaphoreCounterValue(VkDevice device, VkSemaphore semaphore, uint64_t *pValue,
const char *apiName) const {
bool skip = false;
const auto *semaphore_state = GetSemaphoreState(semaphore);
if (semaphore_state && semaphore_state->type != VK_SEMAPHORE_TYPE_TIMELINE) {
skip |= LogError(semaphore, "VUID-vkGetSemaphoreCounterValue-semaphore-03255",
"%s(): semaphore %s must be of VK_SEMAPHORE_TYPE_TIMELINE type", apiName,
report_data->FormatHandle(semaphore).c_str());
}
return skip;
}
bool CoreChecks::PreCallValidateGetSemaphoreCounterValueKHR(VkDevice device, VkSemaphore semaphore, uint64_t *pValue) const {
return ValidateGetSemaphoreCounterValue(device, semaphore, pValue, "vkGetSemaphoreCounterValueKHR");
}
bool CoreChecks::PreCallValidateGetSemaphoreCounterValue(VkDevice device, VkSemaphore semaphore, uint64_t *pValue) const {
return ValidateGetSemaphoreCounterValue(device, semaphore, pValue, "vkGetSemaphoreCounterValue");
}
bool CoreChecks::ValidateQueryPoolStride(const std::string &vuid_not_64, const std::string &vuid_64, const VkDeviceSize stride,
const char *parameter_name, const uint64_t parameter_value,
const VkQueryResultFlags flags) const {
bool skip = false;
if (flags & VK_QUERY_RESULT_64_BIT) {
static const int condition_multiples = 0b0111;
if ((stride & condition_multiples) || (parameter_value & condition_multiples)) {
skip |= LogError(device, vuid_64, "stride %" PRIx64 " or %s %" PRIx64 " is invalid.", stride, parameter_name,
parameter_value);
}
} else {
static const int condition_multiples = 0b0011;
if ((stride & condition_multiples) || (parameter_value & condition_multiples)) {
skip |= LogError(device, vuid_not_64, "stride %" PRIx64 " or %s %" PRIx64 " is invalid.", stride, parameter_name,
parameter_value);
}
}
return skip;
}
bool CoreChecks::ValidateCmdDrawStrideWithStruct(VkCommandBuffer commandBuffer, const std::string &vuid, const uint32_t stride,
const char *struct_name, const uint32_t struct_size) const {
bool skip = false;
static const int condition_multiples = 0b0011;
if ((stride & condition_multiples) || (stride < struct_size)) {
skip |= LogError(commandBuffer, vuid, "stride %d is invalid or less than sizeof(%s) %d.", stride, struct_name, struct_size);
}
return skip;
}
bool CoreChecks::ValidateCmdDrawStrideWithBuffer(VkCommandBuffer commandBuffer, const std::string &vuid, const uint32_t stride,
const char *struct_name, const uint32_t struct_size, const uint32_t drawCount,
const VkDeviceSize offset, const BUFFER_STATE *buffer_state) const {
bool skip = false;
uint64_t validation_value = stride * (drawCount - 1) + offset + struct_size;
if (validation_value > buffer_state->createInfo.size) {
skip |= LogError(commandBuffer, vuid,
"stride[%d] * (drawCount[%d] - 1) + offset[%" PRIx64 "] + sizeof(%s)[%d] = %" PRIx64
" is greater than the size[%" PRIx64 "] of %s.",
stride, drawCount, offset, struct_name, struct_size, validation_value, buffer_state->createInfo.size,
report_data->FormatHandle(buffer_state->buffer()).c_str());
}
return skip;
}
bool CoreChecks::PreCallValidateReleaseProfilingLockKHR(VkDevice device) const {
bool skip = false;
if (!performance_lock_acquired) {
skip |= LogError(device, "VUID-vkReleaseProfilingLockKHR-device-03235",
"vkReleaseProfilingLockKHR(): The profiling lock of device must have been held via a previous successful "
"call to vkAcquireProfilingLockKHR.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetCheckpointNV(VkCommandBuffer commandBuffer, const void *pCheckpointMarker) const {
{
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETCHECKPOINTNV);
return skip;
}
}
bool CoreChecks::PreCallValidateWriteAccelerationStructuresPropertiesKHR(VkDevice device, uint32_t accelerationStructureCount,
const VkAccelerationStructureKHR *pAccelerationStructures,
VkQueryType queryType, size_t dataSize, void *pData,
size_t stride) const {
bool skip = false;
for (uint32_t i = 0; i < accelerationStructureCount; ++i) {
const ACCELERATION_STRUCTURE_STATE_KHR *as_state = GetAccelerationStructureStateKHR(pAccelerationStructures[i]);
const auto &as_info = as_state->build_info_khr;
if (queryType == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR) {
if (!(as_info.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR)) {
skip |= LogError(device, "VUID-vkWriteAccelerationStructuresPropertiesKHR-accelerationStructures-03431",
"vkWriteAccelerationStructuresPropertiesKHR: All acceleration structures (%s) in "
"pAccelerationStructures must have been built with"
"VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR if queryType is "
"VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR.",
report_data->FormatHandle(as_state->acceleration_structure()).c_str());
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdWriteAccelerationStructuresPropertiesKHR(
VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, const VkAccelerationStructureKHR *pAccelerationStructures,
VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery) const {
bool skip = false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
skip |= ValidateCmd(cb_state, CMD_WRITEACCELERATIONSTRUCTURESPROPERTIESKHR);
const auto *query_pool_state = GetQueryPoolState(queryPool);
const auto &query_pool_ci = query_pool_state->createInfo;
if (query_pool_ci.queryType != queryType) {
skip |= LogError(
device, "VUID-vkCmdWriteAccelerationStructuresPropertiesKHR-queryPool-02493",
"vkCmdWriteAccelerationStructuresPropertiesKHR: queryPool must have been created with a queryType matching queryType.");
}
for (uint32_t i = 0; i < accelerationStructureCount; ++i) {
if (queryType == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR) {
const ACCELERATION_STRUCTURE_STATE_KHR *as_state = GetAccelerationStructureStateKHR(pAccelerationStructures[i]);
if (!(as_state->build_info_khr.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR)) {
skip |= LogError(
device, "VUID-vkCmdWriteAccelerationStructuresPropertiesKHR-accelerationStructures-03431",
"vkCmdWriteAccelerationStructuresPropertiesKHR: All acceleration structures in pAccelerationStructures "
"must have been built with VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR if queryType is "
"VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR.");
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdWriteAccelerationStructuresPropertiesNV(VkCommandBuffer commandBuffer,
uint32_t accelerationStructureCount,
const VkAccelerationStructureNV *pAccelerationStructures,
VkQueryType queryType, VkQueryPool queryPool,
uint32_t firstQuery) const {
bool skip = false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
skip |= ValidateCmd(cb_state, CMD_WRITEACCELERATIONSTRUCTURESPROPERTIESNV);
const auto *query_pool_state = GetQueryPoolState(queryPool);
const auto &query_pool_ci = query_pool_state->createInfo;
if (query_pool_ci.queryType != queryType) {
skip |= LogError(
device, "VUID-vkCmdWriteAccelerationStructuresPropertiesNV-queryPool-03755",
"vkCmdWriteAccelerationStructuresPropertiesNV: queryPool must have been created with a queryType matching queryType.");
}
for (uint32_t i = 0; i < accelerationStructureCount; ++i) {
if (queryType == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV) {
const ACCELERATION_STRUCTURE_STATE *as_state = GetAccelerationStructureStateNV(pAccelerationStructures[i]);
if (!(as_state->build_info.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR)) {
skip |=
LogError(device, "VUID-vkCmdWriteAccelerationStructuresPropertiesNV-pAccelerationStructures-06215",
"vkCmdWriteAccelerationStructuresPropertiesNV: All acceleration structures in pAccelerationStructures "
"must have been built with VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR if queryType is "
"VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV.");
}
}
}
return skip;
}
uint32_t CoreChecks::CalcTotalShaderGroupCount(const PIPELINE_STATE *pipelineState) const {
const auto &create_info = pipelineState->create_info.raytracing;
uint32_t total = create_info.groupCount;
if (create_info.pLibraryInfo) {
for (uint32_t i = 0; i < create_info.pLibraryInfo->libraryCount; ++i) {
const PIPELINE_STATE *library_pipeline_state = GetPipelineState(create_info.pLibraryInfo->pLibraries[i]);
total += CalcTotalShaderGroupCount(library_pipeline_state);
}
}
return total;
}
bool CoreChecks::PreCallValidateGetRayTracingShaderGroupHandlesKHR(VkDevice device, VkPipeline pipeline, uint32_t firstGroup,
uint32_t groupCount, size_t dataSize, void *pData) const {
bool skip = false;
const PIPELINE_STATE *pipeline_state = GetPipelineState(pipeline);
if (pipeline_state->GetPipelineCreateFlags() & VK_PIPELINE_CREATE_LIBRARY_BIT_KHR) {
skip |= LogError(
device, "VUID-vkGetRayTracingShaderGroupHandlesKHR-pipeline-03482",
"vkGetRayTracingShaderGroupHandlesKHR: pipeline must have not been created with VK_PIPELINE_CREATE_LIBRARY_BIT_KHR.");
}
if (dataSize < (phys_dev_ext_props.ray_tracing_propsKHR.shaderGroupHandleSize * groupCount)) {
skip |= LogError(device, "VUID-vkGetRayTracingShaderGroupHandlesKHR-dataSize-02420",
"vkGetRayTracingShaderGroupHandlesKHR: dataSize (%zu) must be at least "
"VkPhysicalDeviceRayTracingPipelinePropertiesKHR::shaderGroupHandleSize * groupCount.",
dataSize);
}
uint32_t total_group_count = CalcTotalShaderGroupCount(pipeline_state);
if (firstGroup >= total_group_count) {
skip |=
LogError(device, "VUID-vkGetRayTracingShaderGroupHandlesKHR-firstGroup-04050",
"vkGetRayTracingShaderGroupHandlesKHR: firstGroup must be less than the number of shader groups in pipeline.");
}
if ((firstGroup + groupCount) > total_group_count) {
skip |= LogError(
device, "VUID-vkGetRayTracingShaderGroupHandlesKHR-firstGroup-02419",
"vkGetRayTracingShaderGroupHandlesKHR: The sum of firstGroup and groupCount must be less than or equal the number "
"of shader groups in pipeline.");
}
return skip;
}
bool CoreChecks::PreCallValidateGetRayTracingCaptureReplayShaderGroupHandlesKHR(VkDevice device, VkPipeline pipeline,
uint32_t firstGroup, uint32_t groupCount,
size_t dataSize, void *pData) const {
bool skip = false;
if (dataSize < (phys_dev_ext_props.ray_tracing_propsKHR.shaderGroupHandleCaptureReplaySize * groupCount)) {
skip |= LogError(device, "VUID-vkGetRayTracingCaptureReplayShaderGroupHandlesKHR-dataSize-03484",
"vkGetRayTracingCaptureReplayShaderGroupHandlesKHR: dataSize (%zu) must be at least "
"VkPhysicalDeviceRayTracingPipelinePropertiesKHR::shaderGroupHandleCaptureReplaySize * groupCount.",
dataSize);
}
const PIPELINE_STATE *pipeline_state = GetPipelineState(pipeline);
if (!pipeline_state) {
return skip;
}
const auto &create_info = pipeline_state->create_info.raytracing;
if (firstGroup >= create_info.groupCount) {
skip |= LogError(device, "VUID-vkGetRayTracingCaptureReplayShaderGroupHandlesKHR-firstGroup-04051",
"vkGetRayTracingCaptureReplayShaderGroupHandlesKHR: firstGroup must be less than the number of shader "
"groups in pipeline.");
}
if ((firstGroup + groupCount) > create_info.groupCount) {
skip |= LogError(device, "VUID-vkGetRayTracingCaptureReplayShaderGroupHandlesKHR-firstGroup-03483",
"vkGetRayTracingCaptureReplayShaderGroupHandlesKHR: The sum of firstGroup and groupCount must be less "
"than or equal to the number of shader groups in pipeline.");
}
if (!(create_info.flags & VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR)) {
skip |= LogError(device, "VUID-vkGetRayTracingCaptureReplayShaderGroupHandlesKHR-pipeline-03607",
"pipeline must have been created with a flags that included "
"VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdBuildAccelerationStructuresIndirectKHR(VkCommandBuffer commandBuffer, uint32_t infoCount,
const VkAccelerationStructureBuildGeometryInfoKHR *pInfos,
const VkDeviceAddress *pIndirectDeviceAddresses,
const uint32_t *pIndirectStrides,
const uint32_t *const *ppMaxPrimitiveCounts) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_BUILDACCELERATIONSTRUCTURESINDIRECTKHR);
skip |= ValidateCmdRayQueryState(cb_state, CMD_BUILDACCELERATIONSTRUCTURESINDIRECTKHR, VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR);
for (uint32_t i = 0; i < infoCount; ++i) {
const ACCELERATION_STRUCTURE_STATE_KHR *src_as_state = GetAccelerationStructureStateKHR(pInfos[i].srcAccelerationStructure);
const ACCELERATION_STRUCTURE_STATE_KHR *dst_as_state = GetAccelerationStructureStateKHR(pInfos[i].dstAccelerationStructure);
if (pInfos[i].mode == VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR) {
if (src_as_state == nullptr || !src_as_state->built ||
!(src_as_state->build_info_khr.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR)) {
skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresIndirectKHR-pInfos-03667",
"vkCmdBuildAccelerationStructuresIndirectKHR(): For each element of pInfos, if its mode member is "
"VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its srcAccelerationStructure member must have "
"been built before with VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR set in "
"VkAccelerationStructureBuildGeometryInfoKHR::flags.");
}
if (pInfos[i].geometryCount != src_as_state->build_info_khr.geometryCount) {
skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresIndirectKHR-pInfos-03758",
"vkCmdBuildAccelerationStructuresIndirectKHR(): For each element of pInfos, if its mode member is "
"VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR,"
" its geometryCount member must have the same value which was specified when "
"srcAccelerationStructure was last built.");
}
if (pInfos[i].flags != src_as_state->build_info_khr.flags) {
skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresIndirectKHR-pInfos-03759",
"vkCmdBuildAccelerationStructuresIndirectKHR(): For each element of pInfos, if its mode member is"
" VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its flags member must have the same value which"
" was specified when srcAccelerationStructure was last built.");
}
if (pInfos[i].type != src_as_state->build_info_khr.type) {
skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresIndirectKHR-pInfos-03760",
"vkCmdBuildAccelerationStructuresIndirectKHR(): For each element of pInfos, if its mode member is"
" VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its type member must have the same value which"
" was specified when srcAccelerationStructure was last built.");
}
}
if (pInfos[i].type == VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR) {
if (!dst_as_state ||
(dst_as_state && dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR &&
dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR)) {
skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresIndirectKHR-pInfos-03700",
"vkCmdBuildAccelerationStructuresIndirectKHR(): For each element of pInfos, if its type member is "
"VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR, its dstAccelerationStructure member must have "
"been created with a value of VkAccelerationStructureCreateInfoKHR::type equal to either "
"VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR or VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR.");
}
}
if (pInfos[i].type == VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR) {
if (!dst_as_state ||
(dst_as_state && dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR &&
dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR)) {
skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresIndirectKHR-pInfos-03699",
"vkCmdBuildAccelerationStructuresIndirectKHR():For each element of pInfos, if its type member is "
"VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR, its dstAccelerationStructure member must have been "
"created with a value of VkAccelerationStructureCreateInfoKHR::type equal to either "
"VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR or VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR.");
}
}
}
return skip;
}
bool CoreChecks::ValidateCopyAccelerationStructureInfoKHR(const VkCopyAccelerationStructureInfoKHR *pInfo,
const char *api_name) const {
bool skip = false;
if (pInfo->mode == VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_KHR) {
const ACCELERATION_STRUCTURE_STATE_KHR *src_as_state = GetAccelerationStructureStateKHR(pInfo->src);
if (!(src_as_state->build_info_khr.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR)) {
skip |= LogError(device, "VUID-VkCopyAccelerationStructureInfoKHR-src-03411",
"(%s): src must have been built with VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR"
"if mode is VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_KHR.",
api_name);
}
}
const auto *src_accel_state = GetAccelerationStructureStateKHR(pInfo->src);
if (src_accel_state) {
const auto *buffer_state = GetBufferState(src_accel_state->create_infoKHR.buffer);
skip |= ValidateMemoryIsBoundToBuffer(buffer_state, api_name, "VUID-VkCopyAccelerationStructureInfoKHR-buffer-03718");
}
const auto *dst_accel_state = GetAccelerationStructureStateKHR(pInfo->dst);
if (dst_accel_state) {
const auto *buffer_state = GetBufferState(dst_accel_state->create_infoKHR.buffer);
skip |= ValidateMemoryIsBoundToBuffer(buffer_state, api_name, "VUID-VkCopyAccelerationStructureInfoKHR-buffer-03719");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdCopyAccelerationStructureKHR(VkCommandBuffer commandBuffer,
const VkCopyAccelerationStructureInfoKHR *pInfo) const {
bool skip = false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
skip |= ValidateCmd(cb_state, CMD_COPYACCELERATIONSTRUCTUREKHR);
skip |= ValidateCopyAccelerationStructureInfoKHR(pInfo, "vkCmdCopyAccelerationStructureKHR");
return skip;
}
bool CoreChecks::PreCallValidateCopyAccelerationStructureKHR(VkDevice device, VkDeferredOperationKHR deferredOperation,
const VkCopyAccelerationStructureInfoKHR *pInfo) const {
bool skip = false;
skip |= ValidateCopyAccelerationStructureInfoKHR(pInfo, "vkCopyAccelerationStructureKHR");
return skip;
}
bool CoreChecks::PreCallValidateCmdCopyAccelerationStructureToMemoryKHR(
VkCommandBuffer commandBuffer, const VkCopyAccelerationStructureToMemoryInfoKHR *pInfo) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_COPYACCELERATIONSTRUCTURETOMEMORYKHR);
const auto *accel_state = GetAccelerationStructureStateKHR(pInfo->src);
if (accel_state) {
const auto *buffer_state = GetBufferState(accel_state->create_infoKHR.buffer);
skip |= ValidateMemoryIsBoundToBuffer(buffer_state, "vkCmdCopyAccelerationStructureToMemoryKHR",
"VUID-vkCmdCopyAccelerationStructureToMemoryKHR-None-03559");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdCopyMemoryToAccelerationStructureKHR(
VkCommandBuffer commandBuffer, const VkCopyMemoryToAccelerationStructureInfoKHR *pInfo) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_COPYMEMORYTOACCELERATIONSTRUCTUREKHR);
return skip;
}
bool CoreChecks::PreCallValidateCmdBindTransformFeedbackBuffersEXT(VkCommandBuffer commandBuffer, uint32_t firstBinding,
uint32_t bindingCount, const VkBuffer *pBuffers,
const VkDeviceSize *pOffsets, const VkDeviceSize *pSizes) const {
bool skip = false;
char const *const cmd_name = "CmdBindTransformFeedbackBuffersEXT";
if (!enabled_features.transform_feedback_features.transformFeedback) {
skip |= LogError(commandBuffer, "VUID-vkCmdBindTransformFeedbackBuffersEXT-transformFeedback-02355",
"%s: transformFeedback feature is not enabled.", cmd_name);
}
{
auto const cb_state = GetCBState(commandBuffer);
if (cb_state->transform_feedback_active) {
skip |= LogError(commandBuffer, "VUID-vkCmdBindTransformFeedbackBuffersEXT-None-02365",
"%s: transform feedback is active.", cmd_name);
}
}
for (uint32_t i = 0; i < bindingCount; ++i) {
auto const buffer_state = GetBufferState(pBuffers[i]);
assert(buffer_state != nullptr);
if (pOffsets[i] >= buffer_state->createInfo.size) {
skip |= LogError(buffer_state->buffer(), "VUID-vkCmdBindTransformFeedbackBuffersEXT-pOffsets-02358",
"%s: pOffset[%" PRIu32 "](0x%" PRIxLEAST64
") is greater than or equal to the size of pBuffers[%" PRIu32 "](0x%" PRIxLEAST64 ").",
cmd_name, i, pOffsets[i], i, buffer_state->createInfo.size);
}
if ((buffer_state->createInfo.usage & VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT) == 0) {
skip |= LogError(buffer_state->buffer(), "VUID-vkCmdBindTransformFeedbackBuffersEXT-pBuffers-02360",
"%s: pBuffers[%" PRIu32 "] (%s)"
" was not created with the VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT flag.",
cmd_name, i, report_data->FormatHandle(pBuffers[i]).c_str());
}
// pSizes is optional and may be nullptr. Also might be VK_WHOLE_SIZE which VU don't apply
if ((pSizes != nullptr) && (pSizes[i] != VK_WHOLE_SIZE)) {
// only report one to prevent redundant error if the size is larger since adding offset will be as well
if (pSizes[i] > buffer_state->createInfo.size) {
skip |= LogError(buffer_state->buffer(), "VUID-vkCmdBindTransformFeedbackBuffersEXT-pSizes-02362",
"%s: pSizes[%" PRIu32 "](0x%" PRIxLEAST64 ") is greater than the size of pBuffers[%" PRIu32
"](0x%" PRIxLEAST64 ").",
cmd_name, i, pSizes[i], i, buffer_state->createInfo.size);
} else if (pOffsets[i] + pSizes[i] > buffer_state->createInfo.size) {
skip |= LogError(buffer_state->buffer(), "VUID-vkCmdBindTransformFeedbackBuffersEXT-pOffsets-02363",
"%s: The sum of pOffsets[%" PRIu32 "](Ox%" PRIxLEAST64 ") and pSizes[%" PRIu32 "](0x%" PRIxLEAST64
") is greater than the size of pBuffers[%" PRIu32 "](0x%" PRIxLEAST64 ").",
cmd_name, i, pOffsets[i], i, pSizes[i], i, buffer_state->createInfo.size);
}
}
skip |= ValidateMemoryIsBoundToBuffer(buffer_state, cmd_name, "VUID-vkCmdBindTransformFeedbackBuffersEXT-pBuffers-02364");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdBeginTransformFeedbackEXT(VkCommandBuffer commandBuffer, uint32_t firstCounterBuffer,
uint32_t counterBufferCount, const VkBuffer *pCounterBuffers,
const VkDeviceSize *pCounterBufferOffsets) const {
bool skip = false;
char const *const cmd_name = "CmdBeginTransformFeedbackEXT";
if (!enabled_features.transform_feedback_features.transformFeedback) {
skip |= LogError(commandBuffer, "VUID-vkCmdBeginTransformFeedbackEXT-transformFeedback-02366",
"%s: transformFeedback feature is not enabled.", cmd_name);
}
{
auto const cb_state = GetCBState(commandBuffer);
if (cb_state) {
if (cb_state->transform_feedback_active) {
skip |= LogError(commandBuffer, "VUID-vkCmdBeginTransformFeedbackEXT-None-02367",
"%s: transform feedback is active.", cmd_name);
}
if (cb_state->activeRenderPass) {
const auto &rp_ci = cb_state->activeRenderPass->createInfo;
for (uint32_t i = 0; i < rp_ci.subpassCount; ++i) {
// When a subpass uses a non-zero view mask, multiview functionality is considered to be enabled
if (rp_ci.pSubpasses[i].viewMask > 0) {
skip |= LogError(commandBuffer, "VUID-vkCmdBeginTransformFeedbackEXT-None-02373",
"%s: active render pass (%s) has multiview enabled.", cmd_name,
report_data->FormatHandle(cb_state->activeRenderPass->renderPass()).c_str());
break;
}
}
}
}
}
// pCounterBuffers and pCounterBufferOffsets are optional and may be nullptr. Additionaly, pCounterBufferOffsets must be nullptr
// if pCounterBuffers is nullptr.
if (pCounterBuffers == nullptr) {
if (pCounterBufferOffsets != nullptr) {
skip |= LogError(commandBuffer, "VUID-vkCmdBeginTransformFeedbackEXT-pCounterBuffer-02371",
"%s: pCounterBuffers is NULL and pCounterBufferOffsets is not NULL.", cmd_name);
}
} else {
for (uint32_t i = 0; i < counterBufferCount; ++i) {
if (pCounterBuffers[i] != VK_NULL_HANDLE) {
auto const buffer_state = GetBufferState(pCounterBuffers[i]);
assert(buffer_state != nullptr);
if (pCounterBufferOffsets != nullptr && pCounterBufferOffsets[i] + 4 > buffer_state->createInfo.size) {
skip |=
LogError(buffer_state->buffer(), "VUID-vkCmdBeginTransformFeedbackEXT-pCounterBufferOffsets-02370",
"%s: pCounterBuffers[%" PRIu32 "](%s) is not large enough to hold 4 bytes at pCounterBufferOffsets[%" PRIu32 "](0x%" PRIx64 ").",
cmd_name, i, report_data->FormatHandle(pCounterBuffers[i]).c_str(), i, pCounterBufferOffsets[i]);
}
if ((buffer_state->createInfo.usage & VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT) == 0) {
skip |= LogError(buffer_state->buffer(), "VUID-vkCmdBeginTransformFeedbackEXT-pCounterBuffers-02372",
"%s: pCounterBuffers[%" PRIu32 "] (%s) was not created with the VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT flag.",
cmd_name, i, report_data->FormatHandle(pCounterBuffers[i]).c_str());
}
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdEndTransformFeedbackEXT(VkCommandBuffer commandBuffer, uint32_t firstCounterBuffer,
uint32_t counterBufferCount, const VkBuffer *pCounterBuffers,
const VkDeviceSize *pCounterBufferOffsets) const {
bool skip = false;
char const *const cmd_name = "CmdEndTransformFeedbackEXT";
if (!enabled_features.transform_feedback_features.transformFeedback) {
skip |= LogError(commandBuffer, "VUID-vkCmdEndTransformFeedbackEXT-transformFeedback-02374",
"%s: transformFeedback feature is not enabled.", cmd_name);
}
{
auto const cb_state = GetCBState(commandBuffer);
if (!cb_state->transform_feedback_active) {
skip |= LogError(commandBuffer, "VUID-vkCmdEndTransformFeedbackEXT-None-02375", "%s: transform feedback is not active.",
cmd_name);
}
}
// pCounterBuffers and pCounterBufferOffsets are optional and may be nullptr. Additionaly, pCounterBufferOffsets must be nullptr
// if pCounterBuffers is nullptr.
if (pCounterBuffers == nullptr) {
if (pCounterBufferOffsets != nullptr) {
skip |= LogError(commandBuffer, "VUID-vkCmdEndTransformFeedbackEXT-pCounterBuffer-02379",
"%s: pCounterBuffers is NULL and pCounterBufferOffsets is not NULL.", cmd_name);
}
} else {
for (uint32_t i = 0; i < counterBufferCount; ++i) {
if (pCounterBuffers[i] != VK_NULL_HANDLE) {
auto const buffer_state = GetBufferState(pCounterBuffers[i]);
assert(buffer_state != nullptr);
if (pCounterBufferOffsets != nullptr && pCounterBufferOffsets[i] + 4 > buffer_state->createInfo.size) {
skip |=
LogError(buffer_state->buffer(), "VUID-vkCmdEndTransformFeedbackEXT-pCounterBufferOffsets-02378",
"%s: pCounterBuffers[%" PRIu32 "](%s) is not large enough to hold 4 bytes at pCounterBufferOffsets[%" PRIu32 "](0x%" PRIx64 ").",
cmd_name, i, report_data->FormatHandle(pCounterBuffers[i]).c_str(), i, pCounterBufferOffsets[i]);
}
if ((buffer_state->createInfo.usage & VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT) == 0) {
skip |= LogError(buffer_state->buffer(), "VUID-vkCmdEndTransformFeedbackEXT-pCounterBuffers-02380",
"%s: pCounterBuffers[%" PRIu32 "] (%s) was not created with the VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT flag.",
cmd_name, i, report_data->FormatHandle(pCounterBuffers[i]).c_str());
}
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetLogicOpEXT(VkCommandBuffer commandBuffer, VkLogicOp logicOp) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETLOGICOPEXT);
if (!enabled_features.extended_dynamic_state2_features.extendedDynamicState2LogicOp) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetLogicOpEXT-None-04867",
"vkCmdSetLogicOpEXT: extendedDynamicState feature is not enabled.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetPatchControlPointsEXT(VkCommandBuffer commandBuffer, uint32_t patchControlPoints) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETPATCHCONTROLPOINTSEXT);
if (!enabled_features.extended_dynamic_state2_features.extendedDynamicState2PatchControlPoints) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetPatchControlPointsEXT-None-04873",
"vkCmdSetPatchControlPointsEXT: extendedDynamicState feature is not enabled.");
}
if (patchControlPoints > phys_dev_props.limits.maxTessellationPatchSize) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetPatchControlPointsEXT-patchControlPoints-04874",
"vkCmdSetPatchControlPointsEXT: The value of patchControlPoints must be less than "
"VkPhysicalDeviceLimits::maxTessellationPatchSize");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetRasterizerDiscardEnableEXT(VkCommandBuffer commandBuffer,
VkBool32 rasterizerDiscardEnable) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETRASTERIZERDISCARDENABLEEXT);
if (!enabled_features.extended_dynamic_state2_features.extendedDynamicState2) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetRasterizerDiscardEnableEXT-None-04871",
"vkCmdSetRasterizerDiscardEnableEXT: extendedDynamicState feature is not enabled.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetDepthBiasEnableEXT(VkCommandBuffer commandBuffer, VkBool32 depthBiasEnable) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETDEPTHBIASENABLEEXT);
if (!enabled_features.extended_dynamic_state2_features.extendedDynamicState2) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetDepthBiasEnableEXT-None-04872",
"vkCmdSetDepthBiasEnableEXT: extendedDynamicState feature is not enabled.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetPrimitiveRestartEnableEXT(VkCommandBuffer commandBuffer,
VkBool32 primitiveRestartEnable) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETPRIMITIVERESTARTENABLEEXT);
if (!enabled_features.extended_dynamic_state2_features.extendedDynamicState2) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetPrimitiveRestartEnableEXT-None-04866",
"vkCmdSetPrimitiveRestartEnableEXT: extendedDynamicState feature is not enabled.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetCullModeEXT(VkCommandBuffer commandBuffer, VkCullModeFlags cullMode) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETCULLMODEEXT);
if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetCullModeEXT-None-03384",
"vkCmdSetCullModeEXT: extendedDynamicState feature is not enabled.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetFrontFaceEXT(VkCommandBuffer commandBuffer, VkFrontFace frontFace) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETFRONTFACEEXT);
if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetFrontFaceEXT-None-03383",
"vkCmdSetFrontFaceEXT: extendedDynamicState feature is not enabled.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetPrimitiveTopologyEXT(VkCommandBuffer commandBuffer,
VkPrimitiveTopology primitiveTopology) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETPRIMITIVETOPOLOGYEXT);
if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetPrimitiveTopologyEXT-None-03347",
"vkCmdSetPrimitiveTopologyEXT: extendedDynamicState feature is not enabled.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetViewportWithCountEXT(VkCommandBuffer commandBuffer, uint32_t viewportCount,
const VkViewport *pViewports) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETVIEWPORTWITHCOUNTEXT);
if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetViewportWithCountEXT-None-03393",
"vkCmdSetViewportWithCountEXT: extendedDynamicState feature is not enabled.");
}
skip |= ForbidInheritedViewportScissor(commandBuffer, cb_state, "VUID-vkCmdSetViewportWithCountEXT-commandBuffer-04819",
"vkCmdSetViewportWithCountEXT");
return skip;
}
bool CoreChecks::PreCallValidateCmdSetScissorWithCountEXT(VkCommandBuffer commandBuffer, uint32_t scissorCount,
const VkRect2D *pScissors) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETSCISSORWITHCOUNTEXT);
if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetScissorWithCountEXT-None-03396",
"vkCmdSetScissorWithCountEXT: extendedDynamicState feature is not enabled.");
}
skip |= ForbidInheritedViewportScissor(commandBuffer, cb_state, "VUID-vkCmdSetScissorWithCountEXT-commandBuffer-04820",
"vkCmdSetScissorWithCountEXT");
return skip;
}
bool CoreChecks::PreCallValidateCmdBindVertexBuffers2EXT(VkCommandBuffer commandBuffer, uint32_t firstBinding,
uint32_t bindingCount, const VkBuffer *pBuffers,
const VkDeviceSize *pOffsets, const VkDeviceSize *pSizes,
const VkDeviceSize *pStrides) const {
const auto cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_BINDVERTEXBUFFERS2EXT);
for (uint32_t i = 0; i < bindingCount; ++i) {
const auto buffer_state = GetBufferState(pBuffers[i]);
if (buffer_state) {
skip |= ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, true,
"VUID-vkCmdBindVertexBuffers2EXT-pBuffers-03359", "vkCmdBindVertexBuffers2EXT()",
"VK_BUFFER_USAGE_VERTEX_BUFFER_BIT");
skip |= ValidateMemoryIsBoundToBuffer(buffer_state, "vkCmdBindVertexBuffers2EXT()",
"VUID-vkCmdBindVertexBuffers2EXT-pBuffers-03360");
if (pOffsets[i] >= buffer_state->createInfo.size) {
skip |= LogError(buffer_state->buffer(), "VUID-vkCmdBindVertexBuffers2EXT-pOffsets-03357",
"vkCmdBindVertexBuffers2EXT() offset (0x%" PRIxLEAST64 ") is beyond the end of the buffer.",
pOffsets[i]);
}
if (pSizes && pOffsets[i] + pSizes[i] > buffer_state->createInfo.size) {
skip |=
LogError(buffer_state->buffer(), "VUID-vkCmdBindVertexBuffers2EXT-pSizes-03358",
"vkCmdBindVertexBuffers2EXT() size (0x%" PRIxLEAST64 ") is beyond the end of the buffer.", pSizes[i]);
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetDepthTestEnableEXT(VkCommandBuffer commandBuffer, VkBool32 depthTestEnable) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETDEPTHTESTENABLEEXT);
if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetDepthTestEnableEXT-None-03352",
"vkCmdSetDepthTestEnableEXT: extendedDynamicState feature is not enabled.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetDepthWriteEnableEXT(VkCommandBuffer commandBuffer, VkBool32 depthWriteEnable) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETDEPTHWRITEENABLEEXT);
if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetDepthWriteEnableEXT-None-03354",
"vkCmdSetDepthWriteEnableEXT: extendedDynamicState feature is not enabled.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetDepthCompareOpEXT(VkCommandBuffer commandBuffer, VkCompareOp depthCompareOp) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETDEPTHCOMPAREOPEXT);
if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetDepthCompareOpEXT-None-03353",
"vkCmdSetDepthCompareOpEXT: extendedDynamicState feature is not enabled.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetDepthBoundsTestEnableEXT(VkCommandBuffer commandBuffer,
VkBool32 depthBoundsTestEnable) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETDEPTHBOUNDSTESTENABLEEXT);
if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetDepthBoundsTestEnableEXT-None-03349",
"vkCmdSetDepthBoundsTestEnableEXT: extendedDynamicState feature is not enabled.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetStencilTestEnableEXT(VkCommandBuffer commandBuffer, VkBool32 stencilTestEnable) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETSTENCILTESTENABLEEXT);
if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetStencilTestEnableEXT-None-03350",
"vkCmdSetStencilTestEnableEXT: extendedDynamicState feature is not enabled.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetStencilOpEXT(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, VkStencilOp failOp,
VkStencilOp passOp, VkStencilOp depthFailOp, VkCompareOp compareOp) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETSTENCILOPEXT);
if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetStencilOpEXT-None-03351",
"vkCmdSetStencilOpEXT: extendedDynamicState feature is not enabled.");
}
return skip;
}
bool CoreChecks::PreCallValidateCreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) const {
bool skip = false;
if (IsExtEnabled(device_extensions.vk_khr_portability_subset)) {
if (VK_FALSE == enabled_features.portability_subset_features.events) {
skip |= LogError(device, "VUID-vkCreateEvent-events-04468",
"vkCreateEvent: events are not supported via VK_KHR_portability_subset");
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetRayTracingPipelineStackSizeKHR(VkCommandBuffer commandBuffer,
uint32_t pipelineStackSize) const {
bool skip = false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
skip |= ValidateCmd(cb_state, CMD_SETRAYTRACINGPIPELINESTACKSIZEKHR);
return skip;
}
bool CoreChecks::PreCallValidateGetRayTracingShaderGroupStackSizeKHR(VkDevice device, VkPipeline pipeline, uint32_t group,
VkShaderGroupShaderKHR groupShader) const {
bool skip = false;
const PIPELINE_STATE *pipeline_state = GetPipelineState(pipeline);
if (pipeline_state) {
if (pipeline_state->GetPipelineType() != VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR) {
skip |= LogError(device, "VUID-vkGetRayTracingShaderGroupStackSizeKHR-pipeline-04622",
"vkGetRayTracingShaderGroupStackSizeKHR: Pipeline must be a ray-tracing pipeline, but is a %s pipeline.",
GetPipelineTypeName(pipeline_state->GetPipelineType()));
} else if (group >= pipeline_state->create_info.raytracing.groupCount) {
skip |=
LogError(device, "VUID-vkGetRayTracingShaderGroupStackSizeKHR-group-03608",
"vkGetRayTracingShaderGroupStackSizeKHR: The value of group must be less than the number of shader groups "
"in pipeline.");
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetFragmentShadingRateKHR(VkCommandBuffer commandBuffer, const VkExtent2D *pFragmentSize,
const VkFragmentShadingRateCombinerOpKHR combinerOps[2]) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
const char *cmd_name = "vkCmdSetFragmentShadingRateKHR()";
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_SETFRAGMENTSHADINGRATEKHR);
if (!enabled_features.fragment_shading_rate_features.pipelineFragmentShadingRate &&
!enabled_features.fragment_shading_rate_features.primitiveFragmentShadingRate &&
!enabled_features.fragment_shading_rate_features.attachmentFragmentShadingRate) {
skip |= LogError(
cb_state->commandBuffer(), "VUID-vkCmdSetFragmentShadingRateKHR-pipelineFragmentShadingRate-04509",
"vkCmdSetFragmentShadingRateKHR: Application called %s, but no fragment shading rate features have been enabled.",
cmd_name);
}
if (!enabled_features.fragment_shading_rate_features.pipelineFragmentShadingRate && pFragmentSize->width != 1) {
skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdSetFragmentShadingRateKHR-pipelineFragmentShadingRate-04507",
"vkCmdSetFragmentShadingRateKHR: Pipeline fragment width of %u has been specified in %s, but "
"pipelineFragmentShadingRate is not enabled",
pFragmentSize->width, cmd_name);
}
if (!enabled_features.fragment_shading_rate_features.pipelineFragmentShadingRate && pFragmentSize->height != 1) {
skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdSetFragmentShadingRateKHR-pipelineFragmentShadingRate-04508",
"vkCmdSetFragmentShadingRateKHR: Pipeline fragment height of %u has been specified in %s, but "
"pipelineFragmentShadingRate is not enabled",
pFragmentSize->height, cmd_name);
}
if (!enabled_features.fragment_shading_rate_features.primitiveFragmentShadingRate &&
combinerOps[0] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR) {
skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdSetFragmentShadingRateKHR-primitiveFragmentShadingRate-04510",
"vkCmdSetFragmentShadingRateKHR: First combiner operation of %s has been specified in %s, but "
"primitiveFragmentShadingRate is not enabled",
string_VkFragmentShadingRateCombinerOpKHR(combinerOps[0]), cmd_name);
}
if (!enabled_features.fragment_shading_rate_features.attachmentFragmentShadingRate &&
combinerOps[1] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR) {
skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdSetFragmentShadingRateKHR-attachmentFragmentShadingRate-04511",
"vkCmdSetFragmentShadingRateKHR: Second combiner operation of %s has been specified in %s, but "
"attachmentFragmentShadingRate is not enabled",
string_VkFragmentShadingRateCombinerOpKHR(combinerOps[1]), cmd_name);
}
if (!phys_dev_ext_props.fragment_shading_rate_props.fragmentShadingRateNonTrivialCombinerOps &&
(combinerOps[0] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR &&
combinerOps[0] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_REPLACE_KHR)) {
skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdSetFragmentShadingRateKHR-fragmentSizeNonTrivialCombinerOps-04512",
"vkCmdSetFragmentShadingRateKHR: First combiner operation of %s has been specified in %s, but "
"fragmentShadingRateNonTrivialCombinerOps is "
"not supported",
string_VkFragmentShadingRateCombinerOpKHR(combinerOps[0]), cmd_name);
}
if (!phys_dev_ext_props.fragment_shading_rate_props.fragmentShadingRateNonTrivialCombinerOps &&
(combinerOps[1] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR &&
combinerOps[1] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_REPLACE_KHR)) {
skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdSetFragmentShadingRateKHR-fragmentSizeNonTrivialCombinerOps-04512",
"vkCmdSetFragmentShadingRateKHR: Second combiner operation of %s has been specified in %s, but "
"fragmentShadingRateNonTrivialCombinerOps "
"is not supported",
string_VkFragmentShadingRateCombinerOpKHR(combinerOps[1]), cmd_name);
}
if (pFragmentSize->width == 0) {
skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdSetFragmentShadingRateKHR-pFragmentSize-04513",
"vkCmdSetFragmentShadingRateKHR: Fragment width of %u has been specified in %s.", pFragmentSize->width,
cmd_name);
}
if (pFragmentSize->height == 0) {
skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdSetFragmentShadingRateKHR-pFragmentSize-04514",
"vkCmdSetFragmentShadingRateKHR: Fragment height of %u has been specified in %s.", pFragmentSize->height,
cmd_name);
}
if (pFragmentSize->width != 0 && !IsPowerOfTwo(pFragmentSize->width)) {
skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdSetFragmentShadingRateKHR-pFragmentSize-04515",
"vkCmdSetFragmentShadingRateKHR: Non-power-of-two fragment width of %u has been specified in %s.",
pFragmentSize->width, cmd_name);
}
if (pFragmentSize->height != 0 && !IsPowerOfTwo(pFragmentSize->height)) {
skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdSetFragmentShadingRateKHR-pFragmentSize-04516",
"vkCmdSetFragmentShadingRateKHR: Non-power-of-two fragment height of %u has been specified in %s.",
pFragmentSize->height, cmd_name);
}
if (pFragmentSize->width > 4) {
skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdSetFragmentShadingRateKHR-pFragmentSize-04517",
"vkCmdSetFragmentShadingRateKHR: Fragment width of %u specified in %s is too large.", pFragmentSize->width,
cmd_name);
}
if (pFragmentSize->height > 4) {
skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdSetFragmentShadingRateKHR-pFragmentSize-04518",
"vkCmdSetFragmentShadingRateKHR: Fragment height of %u specified in %s is too large",
pFragmentSize->height, cmd_name);
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetColorWriteEnableEXT(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
const VkBool32 *pColorWriteEnables) const {
bool skip = false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
if (!enabled_features.color_write_features.colorWriteEnable) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetColorWriteEnableEXT-None-04803",
"vkCmdSetColorWriteEnableEXT: color write is not enabled.");
}
auto graphics_pipeline = cb_state->GetCurrentPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS);
if (graphics_pipeline) {
uint32_t pipeline_attachment_count = graphics_pipeline->create_info.graphics.pColorBlendState->attachmentCount;
if (attachmentCount != pipeline_attachment_count) {
skip |= LogError(
commandBuffer, "VUID-vkCmdSetColorWriteEnableEXT-attachmentCount-04804",
"vkCmdSetColorWriteEnableEXT: attachment count (%" PRIu32
") is not equal to currenly bound pipelines VkPipelineColorBlendStateCreateInfo::attachmentCount (%" PRIu32 ").",
attachmentCount, pipeline_attachment_count);
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdBeginConditionalRenderingEXT(
VkCommandBuffer commandBuffer, const VkConditionalRenderingBeginInfoEXT *pConditionalRenderingBegin) const {
bool skip = false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
if (cb_state && cb_state->conditional_rendering_active) {
skip |= LogError(commandBuffer, "VUID-vkCmdBeginConditionalRenderingEXT-None-01980",
"vkCmdBeginConditionalRenderingEXT(): Conditional rendering is already active.");
}
if (pConditionalRenderingBegin) {
const BUFFER_STATE *buffer_state = GetBufferState(pConditionalRenderingBegin->buffer);
if (buffer_state) {
if ((buffer_state->createInfo.usage & VK_BUFFER_USAGE_CONDITIONAL_RENDERING_BIT_EXT) == 0) {
skip |= LogError(commandBuffer, "VUID-VkConditionalRenderingBeginInfoEXT-buffer-01982",
"vkCmdBeginConditionalRenderingEXT(): pConditionalRenderingBegin->buffer (%s) was not create with "
"VK_BUFFER_USAGE_CONDITIONAL_RENDERING_BIT_EXT bit.",
report_data->FormatHandle(pConditionalRenderingBegin->buffer).c_str());
}
if (pConditionalRenderingBegin->offset + 4 > buffer_state->createInfo.size) {
skip |= LogError(commandBuffer, "VUID-VkConditionalRenderingBeginInfoEXT-offset-01983",
"vkCmdBeginConditionalRenderingEXT(): pConditionalRenderingBegin->offset (%" PRIu64
") + 4 bytes is not less than the size of pConditionalRenderingBegin->buffer (%" PRIu64 ").",
pConditionalRenderingBegin->offset, buffer_state->createInfo.size);
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdEndConditionalRenderingEXT(VkCommandBuffer commandBuffer) const {
bool skip = false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
if (cb_state) {
if (!cb_state->conditional_rendering_active) {
skip |= LogError(commandBuffer, "VUID-vkCmdEndConditionalRenderingEXT-None-01985",
"vkCmdBeginConditionalRenderingEXT(): Conditional rendering is not active.");
}
if (!cb_state->conditional_rendering_inside_render_pass && cb_state->activeRenderPass != nullptr) {
skip |= LogError(commandBuffer, "VUID-vkCmdEndConditionalRenderingEXT-None-01986",
"vkCmdBeginConditionalRenderingEXT(): Conditional rendering was begun outside outside of a render "
"pass instance, but a render pass instance is currently active in the command buffer.");
}
if (cb_state->conditional_rendering_inside_render_pass && cb_state->activeRenderPass != nullptr &&
cb_state->conditional_rendering_subpass != cb_state->activeSubpass) {
skip |= LogError(commandBuffer, "VUID-vkCmdEndConditionalRenderingEXT-None-01987",
"vkCmdBeginConditionalRenderingEXT(): Conditional rendering was begun in subpass %" PRIu32
", but the current subpass is %" PRIu32 ".",
cb_state->conditional_rendering_subpass, cb_state->activeSubpass);
}
}
return skip;
}
#ifdef VK_USE_PLATFORM_WIN32_KHR
bool CoreChecks::PreCallValidateAcquireFullScreenExclusiveModeEXT(VkDevice device, VkSwapchainKHR swapchain) const {
bool skip = false;
const auto swapchain_state = GetSwapchainState(swapchain);
if (swapchain_state) {
if (swapchain_state->retired) {
skip |= LogError(device, "VUID-vkAcquireFullScreenExclusiveModeEXT-swapchain-02674",
"vkAcquireFullScreenExclusiveModeEXT(): swapchain %s is retired.",
report_data->FormatHandle(swapchain).c_str());
}
const auto *surface_full_screen_exclusive_info = LvlFindInChain<VkSurfaceFullScreenExclusiveInfoEXT>(swapchain_state->createInfo.pNext);
if (!surface_full_screen_exclusive_info ||
surface_full_screen_exclusive_info->fullScreenExclusive != VK_FULL_SCREEN_EXCLUSIVE_APPLICATION_CONTROLLED_EXT) {
skip |= LogError(device, "VUID-vkAcquireFullScreenExclusiveModeEXT-swapchain-02675",
"vkAcquireFullScreenExclusiveModeEXT(): swapchain %s was not created with VkSurfaceFullScreenExclusiveInfoEXT in "
"the pNext chain with fullScreenExclusive equal to VK_FULL_SCREEN_EXCLUSIVE_APPLICATION_CONTROLLED_EXT.",
report_data->FormatHandle(swapchain).c_str());
}
}
return skip;
}
#endif
bool CoreChecks::ValidatePhysicalDeviceSurfaceSupport(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, const char *vuid,
const char *func_name) const {
bool skip = false;
const auto *pd_state = GetPhysicalDeviceState(physicalDevice);
if (pd_state) {
const auto *surface_state = GetSurfaceState(surface);
VkBool32 supported = VK_FALSE;
for (uint32_t i = 0; i < pd_state->queue_family_known_count; ++i) {
bool checked = false;
if (surface_state) {
const auto support_it = surface_state->gpu_queue_support.find({physicalDevice, i});
if (support_it != surface_state->gpu_queue_support.end()) {
supported = support_it->second;
checked = true;
}
}
if (!checked) {
DispatchGetPhysicalDeviceSurfaceSupportKHR(physicalDevice, i, surface, &supported);
}
if (supported) {
break;
}
}
if (!supported) {
skip |= LogError(physicalDevice, vuid, "%s(): surface is not supported by the physicalDevice.", func_name);
}
}
return skip;
}
#ifdef VK_USE_PLATFORM_WIN32_KHR
bool CoreChecks::PreCallValidateGetDeviceGroupSurfacePresentModes2EXT(VkDevice device,
const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
VkDeviceGroupPresentModeFlagsKHR *pModes) const {
bool skip = false;
if (physical_device_count == 1) {
ValidationObject *device_object = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
skip |= ValidatePhysicalDeviceSurfaceSupport(device_object->physical_device, pSurfaceInfo->surface,
"VUID-vkGetDeviceGroupSurfacePresentModes2EXT-pSurfaceInfo-06213",
"vkGetDeviceGroupSurfacePresentModes2EXT");
} else {
for (uint32_t i = 0; i < physical_device_count; ++i) {
skip |= ValidatePhysicalDeviceSurfaceSupport(device_group_create_info.pPhysicalDevices[i], pSurfaceInfo->surface,
"VUID-vkGetDeviceGroupSurfacePresentModes2EXT-pSurfaceInfo-06213",
"vkGetDeviceGroupSurfacePresentModes2EXT");
}
}
return skip;
}
bool CoreChecks::PreCallValidateGetPhysicalDeviceSurfacePresentModes2EXT(VkPhysicalDevice physicalDevice,
const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
uint32_t *pPresentModeCount,
VkPresentModeKHR *pPresentModes) const {
bool skip = false;
skip |= ValidatePhysicalDeviceSurfaceSupport(physicalDevice, pSurfaceInfo->surface,
"VUID-vkGetPhysicalDeviceSurfacePresentModes2EXT-pSurfaceInfo-06210",
"vkGetPhysicalDeviceSurfacePresentModes2EXT");
return skip;
}
#endif
bool CoreChecks::PreCallValidateGetDeviceGroupSurfacePresentModesKHR(VkDevice device, VkSurfaceKHR surface,
VkDeviceGroupPresentModeFlagsKHR *pModes) const {
bool skip = false;
if (physical_device_count == 1) {
ValidationObject *device_object = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
skip |= ValidatePhysicalDeviceSurfaceSupport(device_object->physical_device, surface,
"VUID-vkGetDeviceGroupSurfacePresentModesKHR-surface-06212",
"vkGetDeviceGroupSurfacePresentModesKHR");
} else {
for (uint32_t i = 0; i < physical_device_count; ++i) {
skip |= ValidatePhysicalDeviceSurfaceSupport(device_group_create_info.pPhysicalDevices[i], surface,
"VUID-vkGetDeviceGroupSurfacePresentModesKHR-surface-06212",
"vkGetDeviceGroupSurfacePresentModesKHR");
}
}
return skip;
}
bool CoreChecks::PreCallValidateGetPhysicalDevicePresentRectanglesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
uint32_t *pRectCount, VkRect2D *pRects) const {
bool skip = false;
skip |=
ValidatePhysicalDeviceSurfaceSupport(physicalDevice, surface, "VUID-vkGetPhysicalDevicePresentRectanglesKHR-surface-06211",
"vkGetPhysicalDevicePresentRectanglesKHR");
return skip;
}
bool CoreChecks::PreCallValidateGetPhysicalDeviceSurfaceCapabilities2EXT(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
VkSurfaceCapabilities2EXT *pSurfaceCapabilities) const {
bool skip = false;
skip |= ValidatePhysicalDeviceSurfaceSupport(physicalDevice, surface,
"VUID-vkGetPhysicalDeviceSurfaceCapabilities2EXT-surface-06211",
"vkGetPhysicalDeviceSurfaceCapabilities2EXT");
return skip;
}
bool CoreChecks::PreCallValidateGetPhysicalDeviceSurfaceCapabilities2KHR(VkPhysicalDevice physicalDevice,
const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
VkSurfaceCapabilities2KHR *pSurfaceCapabilities) const {
bool skip = false;
skip |= ValidatePhysicalDeviceSurfaceSupport(physicalDevice, pSurfaceInfo->surface,
"VUID-vkGetPhysicalDeviceSurfaceCapabilities2KHR-pSurfaceInfo-06210",
"vkGetPhysicalDeviceSurfaceCapabilities2KHR");
return skip;
}
bool CoreChecks::PreCallValidateGetPhysicalDeviceSurfaceCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
VkSurfaceCapabilitiesKHR *pSurfaceCapabilities) const {
bool skip = false;
skip |= ValidatePhysicalDeviceSurfaceSupport(physicalDevice, surface,
"VUID-vkGetPhysicalDeviceSurfaceCapabilitiesKHR-surface-06211",
"vkGetPhysicalDeviceSurfaceCapabilitiesKHR");
return skip;
}
bool CoreChecks::PreCallValidateGetPhysicalDeviceSurfaceFormats2KHR(VkPhysicalDevice physicalDevice,
const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
uint32_t *pSurfaceFormatCount,
VkSurfaceFormat2KHR *pSurfaceFormats) const {
bool skip = false;
skip |= ValidatePhysicalDeviceSurfaceSupport(physicalDevice, pSurfaceInfo->surface,
"VUID-vkGetPhysicalDeviceSurfaceFormats2KHR-pSurfaceInfo-06210",
"vkGetPhysicalDeviceSurfaceFormats2KHR");
return skip;
}
bool CoreChecks::PreCallValidateGetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
uint32_t *pSurfaceFormatCount,
VkSurfaceFormatKHR *pSurfaceFormats) const {
bool skip = false;
skip |= ValidatePhysicalDeviceSurfaceSupport(physicalDevice, surface, "VUID-vkGetPhysicalDeviceSurfaceFormatsKHR-surface-06211",
"vkGetPhysicalDeviceSurfaceFormatsKHR");
return skip;
}
bool CoreChecks::PreCallValidateGetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
uint32_t *pPresentModeCount,
VkPresentModeKHR *pPresentModes) const {
bool skip = false;
skip |= ValidatePhysicalDeviceSurfaceSupport(physicalDevice, surface,
"VUID-vkGetPhysicalDeviceSurfacePresentModesKHR-surface-06211",
"vkGetPhysicalDeviceSurfacePresentModesKHR");
return skip;
}
void CoreChecks::PostCallRecordGetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
size_t dataSize, void* pData, VkDeviceSize stride, VkQueryResultFlags flags,
VkResult result) {
if (result != VK_SUCCESS) {
return;
}
if ((flags & VK_QUERY_RESULT_PARTIAL_BIT) == 0) {
for (uint32_t i = firstQuery; i < queryCount; ++i) {
QueryObject obj(queryPool, i);
auto query_pass_iter = queryToStateMap.find(obj);
if (query_pass_iter != queryToStateMap.end()) {
query_pass_iter->second = QUERYSTATE_AVAILABLE;
}
}
}
}
| 1 | 21,711 | Shouldn't this say "a binding with descriptor type VK_DESCRIPTOR_TYPE_MUTABLE_VALVE" | KhronosGroup-Vulkan-ValidationLayers | cpp |
@@ -12,6 +12,8 @@
#include <wlr/types/wlr_tablet_v2.h>
#include <wlr/util/log.h>
+static const struct wlr_tablet_tool_v2_grab_interface default_tool_interface;
+
static const struct wlr_surface_role tablet_tool_cursor_surface_role = {
.name = "wp_tablet_tool-cursor",
}; | 1 | #ifndef _POSIX_C_SOURCE
#define _POSIX_C_SOURCE 200809L
#endif
#include "tablet-unstable-v2-protocol.h"
#include "util/array.h"
#include <assert.h>
#include <stdlib.h>
#include <types/wlr_tablet_v2.h>
#include <wayland-util.h>
#include <wlr/types/wlr_tablet_tool.h>
#include <wlr/types/wlr_tablet_v2.h>
#include <wlr/util/log.h>
static const struct wlr_surface_role tablet_tool_cursor_surface_role = {
.name = "wp_tablet_tool-cursor",
};
static void handle_tablet_tool_v2_set_cursor(struct wl_client *client,
struct wl_resource *resource, uint32_t serial,
struct wl_resource *surface_resource,
int32_t hotspot_x, int32_t hotspot_y) {
struct wlr_tablet_tool_client_v2 *tool = tablet_tool_client_from_resource(resource);
if (!tool) {
return;
}
struct wlr_surface *surface = NULL;
if (surface_resource != NULL) {
surface = wlr_surface_from_resource(surface_resource);
if (!wlr_surface_set_role(surface, &tablet_tool_cursor_surface_role, NULL,
surface_resource, ZWP_TABLET_TOOL_V2_ERROR_ROLE)) {
return;
}
}
struct wlr_tablet_v2_event_cursor evt = {
.surface = surface,
.serial = serial,
.hotspot_x = hotspot_x,
.hotspot_y = hotspot_y,
.seat_client = tool->seat->seat_client,
};
wl_signal_emit(&tool->tool->events.set_cursor, &evt);
}
static void handle_tablet_tool_v2_destroy(struct wl_client *client,
struct wl_resource *resource) {
wl_resource_destroy(resource);
}
static struct zwp_tablet_tool_v2_interface tablet_tool_impl = {
.set_cursor = handle_tablet_tool_v2_set_cursor,
.destroy = handle_tablet_tool_v2_destroy,
};
static enum zwp_tablet_tool_v2_type tablet_type_from_wlr_type(
enum wlr_tablet_tool_type wlr_type) {
switch(wlr_type) {
case WLR_TABLET_TOOL_TYPE_PEN:
return ZWP_TABLET_TOOL_V2_TYPE_PEN;
case WLR_TABLET_TOOL_TYPE_ERASER:
return ZWP_TABLET_TOOL_V2_TYPE_ERASER;
case WLR_TABLET_TOOL_TYPE_BRUSH:
return ZWP_TABLET_TOOL_V2_TYPE_BRUSH;
case WLR_TABLET_TOOL_TYPE_PENCIL:
return ZWP_TABLET_TOOL_V2_TYPE_PENCIL;
case WLR_TABLET_TOOL_TYPE_AIRBRUSH:
return ZWP_TABLET_TOOL_V2_TYPE_AIRBRUSH;
case WLR_TABLET_TOOL_TYPE_MOUSE:
return ZWP_TABLET_TOOL_V2_TYPE_MOUSE;
case WLR_TABLET_TOOL_TYPE_LENS:
return ZWP_TABLET_TOOL_V2_TYPE_LENS;
}
assert(false && "Unreachable");
}
void destroy_tablet_tool_v2(struct wl_resource *resource) {
struct wlr_tablet_tool_client_v2 *client =
tablet_tool_client_from_resource(resource);
if (!client) {
return;
}
if (client->frame_source) {
wl_event_source_remove(client->frame_source);
}
if (client->tool && client->tool->current_client == client) {
client->tool->current_client = NULL;
}
wl_list_remove(&client->seat_link);
wl_list_remove(&client->tool_link);
free(client);
wl_resource_set_user_data(resource, NULL);
}
void add_tablet_tool_client(struct wlr_tablet_seat_client_v2 *seat,
struct wlr_tablet_v2_tablet_tool *tool) {
struct wlr_tablet_tool_client_v2 *client =
calloc(1, sizeof(struct wlr_tablet_tool_client_v2));
if (!client) {
return;
}
client->tool = tool;
client->seat = seat;
client->resource =
wl_resource_create(seat->wl_client, &zwp_tablet_tool_v2_interface, 1, 0);
if (!client->resource) {
free(client);
return;
}
wl_resource_set_implementation(client->resource, &tablet_tool_impl,
client, destroy_tablet_tool_v2);
zwp_tablet_seat_v2_send_tool_added(seat->resource, client->resource);
// Send the expected events
if (tool->wlr_tool->hardware_serial) {
zwp_tablet_tool_v2_send_hardware_serial(
client->resource,
tool->wlr_tool->hardware_serial >> 32,
tool->wlr_tool->hardware_serial & 0xFFFFFFFF);
}
if (tool->wlr_tool->hardware_wacom) {
zwp_tablet_tool_v2_send_hardware_id_wacom(
client->resource,
tool->wlr_tool->hardware_wacom >> 32,
tool->wlr_tool->hardware_wacom & 0xFFFFFFFF);
}
zwp_tablet_tool_v2_send_type(client->resource,
tablet_type_from_wlr_type(tool->wlr_tool->type));
if (tool->wlr_tool->tilt) {
zwp_tablet_tool_v2_send_capability(client->resource,
ZWP_TABLET_TOOL_V2_CAPABILITY_TILT);
}
if (tool->wlr_tool->pressure) {
zwp_tablet_tool_v2_send_capability(client->resource,
ZWP_TABLET_TOOL_V2_CAPABILITY_PRESSURE);
}
if (tool->wlr_tool->distance) {
zwp_tablet_tool_v2_send_capability(client->resource,
ZWP_TABLET_TOOL_V2_CAPABILITY_DISTANCE);
}
if (tool->wlr_tool->rotation) {
zwp_tablet_tool_v2_send_capability(client->resource,
ZWP_TABLET_TOOL_V2_CAPABILITY_ROTATION);
}
if (tool->wlr_tool->slider) {
zwp_tablet_tool_v2_send_capability(client->resource,
ZWP_TABLET_TOOL_V2_CAPABILITY_SLIDER);
}
if (tool->wlr_tool->wheel) {
zwp_tablet_tool_v2_send_capability(client->resource,
ZWP_TABLET_TOOL_V2_CAPABILITY_WHEEL);
}
zwp_tablet_tool_v2_send_done(client->resource);
client->client = seat->wl_client;
wl_list_insert(&seat->tools, &client->seat_link);
wl_list_insert(&tool->clients, &client->tool_link);
}
static void handle_wlr_tablet_tool_destroy(struct wl_listener *listener, void *data) {
struct wlr_tablet_v2_tablet_tool *tool =
wl_container_of(listener, tool, tool_destroy);
struct wlr_tablet_tool_client_v2 *pos;
struct wlr_tablet_tool_client_v2 *tmp;
wl_list_for_each_safe(pos, tmp, &tool->clients, tool_link) {
zwp_tablet_tool_v2_send_removed(pos->resource);
pos->tool = NULL;
}
wl_list_remove(&tool->clients);
wl_list_remove(&tool->link);
wl_list_remove(&tool->tool_destroy.link);
wl_list_remove(&tool->events.set_cursor.listener_list);
free(tool);
}
struct wlr_tablet_v2_tablet_tool *wlr_tablet_tool_create(
struct wlr_tablet_manager_v2 *manager,
struct wlr_seat *wlr_seat,
struct wlr_tablet_tool *wlr_tool) {
struct wlr_tablet_seat_v2 *seat = get_or_create_tablet_seat(manager, wlr_seat);
if (!seat) {
return NULL;
}
struct wlr_tablet_v2_tablet_tool *tool =
calloc(1, sizeof(struct wlr_tablet_v2_tablet_tool));
if (!tool) {
return NULL;
}
tool->wlr_tool = wlr_tool;
wl_list_init(&tool->clients);
tool->tool_destroy.notify = handle_wlr_tablet_tool_destroy;
wl_signal_add(&wlr_tool->events.destroy, &tool->tool_destroy);
wl_list_insert(&seat->tools, &tool->link);
// We need to create a tablet client for all clients on the seat
struct wlr_tablet_seat_client_v2 *pos;
wl_list_for_each(pos, &seat->clients, seat_link) {
// Tell the clients about the new tool
add_tablet_tool_client(pos, tool);
}
wl_signal_init(&tool->events.set_cursor);
return tool;
}
struct wlr_tablet_tool_client_v2 *tablet_tool_client_from_resource(struct wl_resource *resource) {
assert(wl_resource_instance_of(resource, &zwp_tablet_tool_v2_interface,
&tablet_tool_impl));
return wl_resource_get_user_data(resource);
}
/* Actual protocol foo */
// Button 0 is KEY_RESERVED in input-event-codes on linux (and freebsd)
static ssize_t tablet_tool_button_update(struct wlr_tablet_v2_tablet_tool *tool,
uint32_t button, enum zwp_tablet_pad_v2_button_state state) {
bool found = false;
size_t i = 0;
for (; i < tool->num_buttons; ++i) {
if (tool->pressed_buttons[i] == button) {
found = true;
wlr_log(WLR_DEBUG, "Found the button \\o/: %u", button);
break;
}
}
if (state == ZWP_TABLET_PAD_V2_BUTTON_STATE_PRESSED && found) {
/* Already have the button saved, durr */
return -1;
}
if (state == ZWP_TABLET_PAD_V2_BUTTON_STATE_PRESSED && !found) {
if (tool->num_buttons < WLR_TABLET_V2_TOOL_BUTTONS_CAP) {
i = tool->num_buttons++;
tool->pressed_buttons[i] = button;
tool->pressed_serials[i] = -1;
} else {
i = -1;
wlr_log(WLR_ERROR, "You pressed more than %d tablet tool buttons. This is currently not supporte by wlroots. Please report this with a description of your tablet, since this is either a bug, or fancy hardware",
WLR_TABLET_V2_TOOL_BUTTONS_CAP);
}
}
if (state == ZWP_TABLET_PAD_V2_BUTTON_STATE_RELEASED && found) {
wlr_log(WLR_DEBUG, "Removed the button \\o/: %u", button);
tool->pressed_buttons[i] = 0;
tool->pressed_serials[i] = 0;
tool->num_buttons = push_zeroes_to_end(tool->pressed_buttons, WLR_TABLET_V2_TOOL_BUTTONS_CAP);
tool->num_buttons = push_zeroes_to_end(tool->pressed_serials, WLR_TABLET_V2_TOOL_BUTTONS_CAP);
}
assert(tool->num_buttons <= WLR_TABLET_V2_TOOL_BUTTONS_CAP);
return i;
}
static inline int64_t timespec_to_msec(const struct timespec *a) {
return (int64_t)a->tv_sec * 1000 + a->tv_nsec / 1000000;
}
static void send_tool_frame(void *data) {
struct wlr_tablet_tool_client_v2 *tool = data;
struct timespec now;
clock_gettime(CLOCK_MONOTONIC, &now);
zwp_tablet_tool_v2_send_frame(tool->resource, timespec_to_msec(&now));
tool->frame_source = NULL;
}
static void queue_tool_frame(struct wlr_tablet_tool_client_v2 *tool) {
struct wl_display *display = wl_client_get_display(tool->client);
struct wl_event_loop *loop = wl_display_get_event_loop(display);
if (!tool->frame_source) {
tool->frame_source =
wl_event_loop_add_idle(loop, send_tool_frame, tool);
}
}
void wlr_send_tablet_v2_tablet_tool_proximity_in(
struct wlr_tablet_v2_tablet_tool *tool,
struct wlr_tablet_v2_tablet *tablet,
struct wlr_surface *surface) {
struct wl_client *client = wl_resource_get_client(surface->resource);
if (tool->focused_surface == surface) {
return;
}
struct wlr_tablet_client_v2 *tablet_tmp;
struct wlr_tablet_client_v2 *tablet_client = NULL;
wl_list_for_each(tablet_tmp, &tablet->clients, tablet_link) {
if (tablet_tmp->client == client) {
tablet_client = tablet_tmp;
break;
}
}
// Couldn't find the client binding for the surface's client. Either
// the client didn't bind tablet_v2 at all, or not for the relevant
// seat
if (!tablet_client) {
return;
}
struct wlr_tablet_tool_client_v2 *tool_tmp = NULL;
struct wlr_tablet_tool_client_v2 *tool_client = NULL;
wl_list_for_each(tool_tmp, &tool->clients, tool_link) {
if (tool_tmp->client == client) {
tool_client = tool_tmp;
break;
}
}
// Couldn't find the client binding for the surface's client. Either
// the client didn't bind tablet_v2 at all, or not for the relevant
// seat
if (!tool_client) {
return;
}
tool->current_client = tool_client;
uint32_t serial = wl_display_next_serial(wl_client_get_display(client));
tool->focused_surface = surface;
tool->proximity_serial = serial;
zwp_tablet_tool_v2_send_proximity_in(tool_client->resource, serial,
tablet_client->resource, surface->resource);
/* Send all the pressed buttons */
for (size_t i = 0; i < tool->num_buttons; ++i) {
wlr_send_tablet_v2_tablet_tool_button(tool,
tool->pressed_buttons[i],
ZWP_TABLET_PAD_V2_BUTTON_STATE_PRESSED);
}
if (tool->is_down) {
wlr_send_tablet_v2_tablet_tool_down(tool);
}
queue_tool_frame(tool_client);
}
void wlr_send_tablet_v2_tablet_tool_motion(
struct wlr_tablet_v2_tablet_tool *tool, double x, double y) {
if (!tool->current_client) {
return;
}
zwp_tablet_tool_v2_send_motion(tool->current_client->resource,
wl_fixed_from_double(x), wl_fixed_from_double(y));
queue_tool_frame(tool->current_client);
}
void wlr_send_tablet_v2_tablet_tool_proximity_out(
struct wlr_tablet_v2_tablet_tool *tool) {
if (tool->current_client) {
for (size_t i = 0; i < tool->num_buttons; ++i) {
zwp_tablet_tool_v2_send_button(tool->current_client->resource,
tool->pressed_serials[i],
tool->pressed_buttons[i],
ZWP_TABLET_PAD_V2_BUTTON_STATE_RELEASED);
}
if (tool->is_down) {
zwp_tablet_tool_v2_send_up(tool->current_client->resource);
}
zwp_tablet_tool_v2_send_proximity_out(tool->current_client->resource);
if (tool->current_client->frame_source) {
wl_event_source_remove(tool->current_client->frame_source);
send_tool_frame(tool->current_client);
}
tool->current_client = NULL;
tool->focused_surface = NULL;
}
}
void wlr_send_tablet_v2_tablet_tool_pressure(
struct wlr_tablet_v2_tablet_tool *tool, double pressure) {
if (tool->current_client) {
zwp_tablet_tool_v2_send_pressure(tool->current_client->resource,
pressure * 65535);
queue_tool_frame(tool->current_client);
}
}
void wlr_send_tablet_v2_tablet_tool_distance(
struct wlr_tablet_v2_tablet_tool *tool, double distance) {
if (tool->current_client) {
zwp_tablet_tool_v2_send_distance(tool->current_client->resource,
distance * 65535);
queue_tool_frame(tool->current_client);
}
}
void wlr_send_tablet_v2_tablet_tool_tilt(
struct wlr_tablet_v2_tablet_tool *tool, double x, double y) {
if (!tool->current_client) {
return;
}
zwp_tablet_tool_v2_send_tilt(tool->current_client->resource,
wl_fixed_from_double(x), wl_fixed_from_double(y));
queue_tool_frame(tool->current_client);
}
void wlr_send_tablet_v2_tablet_tool_rotation(
struct wlr_tablet_v2_tablet_tool *tool, double degrees) {
if (!tool->current_client) {
return;
}
zwp_tablet_tool_v2_send_rotation(tool->current_client->resource,
wl_fixed_from_double(degrees));
queue_tool_frame(tool->current_client);
}
void wlr_send_tablet_v2_tablet_tool_slider(
struct wlr_tablet_v2_tablet_tool *tool, double position) {
if (!tool->current_client) {
return;
}
zwp_tablet_tool_v2_send_slider(tool->current_client->resource,
position * 65535);
queue_tool_frame(tool->current_client);
}
void wlr_send_tablet_v2_tablet_tool_button(
struct wlr_tablet_v2_tablet_tool *tool, uint32_t button,
enum zwp_tablet_pad_v2_button_state state) {
ssize_t index = tablet_tool_button_update(tool, button, state);
if (tool->current_client) {
struct wl_client *client =
wl_resource_get_client(tool->current_client->resource);
uint32_t serial = wl_display_next_serial(wl_client_get_display(client));
if (index >= 0) {
tool->pressed_serials[index] = serial;
}
zwp_tablet_tool_v2_send_button(tool->current_client->resource,
serial, button, state);
queue_tool_frame(tool->current_client);
}
}
void wlr_send_tablet_v2_tablet_tool_wheel(
struct wlr_tablet_v2_tablet_tool *tool, double degrees, int32_t clicks) {
if (tool->current_client) {
zwp_tablet_tool_v2_send_wheel(tool->current_client->resource,
clicks, degrees);
queue_tool_frame(tool->current_client);
}
}
void wlr_send_tablet_v2_tablet_tool_down(struct wlr_tablet_v2_tablet_tool *tool) {
if (tool->is_down) {
return;
}
tool->is_down = true;
if (tool->current_client) {
struct wl_client *client =
wl_resource_get_client(tool->current_client->resource);
uint32_t serial = wl_display_next_serial(wl_client_get_display(client));
zwp_tablet_tool_v2_send_down(tool->current_client->resource,
serial);
queue_tool_frame(tool->current_client);
tool->down_serial = serial;
}
}
void wlr_send_tablet_v2_tablet_tool_up(struct wlr_tablet_v2_tablet_tool *tool) {
if (!tool->is_down) {
return;
}
tool->is_down = false;
tool->down_serial = 0;
if (tool->current_client) {
zwp_tablet_tool_v2_send_up(tool->current_client->resource);
queue_tool_frame(tool->current_client);
}
}
| 1 | 12,172 | This variable name should probably contain `grab` | swaywm-wlroots | c |
@@ -0,0 +1,10 @@
+# frozen_string_literal: true
+
+# These configuration settings are used to communicate with the
+# Open Aire Research Project Registry API. For more information about
+# the API and to verify that your configuration settings are correct,
+Rails.configuration.x.open_aire.api_base_url = "https://api.openaire.eu/"
+# The api_url should contain `%s. This is where the funder is appended!
+Rails.configuration.x.open_aire.search_path = "projects/dspace/%s/ALL/ALL"
+Rails.configuration.x.open_aire.default_funder = "H2020"
+Rails.configuration.x.open_aire.active = true | 1 | 1 | 19,201 | Which initializer should we be using for open_aire? The one in `external_apis` or this one in `initializers`? | DMPRoadmap-roadmap | rb |
|
@@ -8,6 +8,7 @@ import (
inet "gx/ipfs/QmNgLg1NTw37iWbYPKcyK85YJ9Whs1MkPtJwhfqbNYAyKg/go-libp2p-net"
pstore "gx/ipfs/QmPiemjiKBC9VA7vZF82m4x1oygtg2c2YVqag8PX7dN1BD/go-libp2p-peerstore"
+ routing "gx/ipfs/QmTiRqrF5zkdZyrdsL5qndG1UbeWi8k8N2pYxCtXWrahR2/go-libp2p-routing"
peer "gx/ipfs/QmY5Grm8pJdiSSVsYxx4uNRgweY72EmYwuSDbRnbFok3iY/go-libp2p-peer"
host "gx/ipfs/QmaoXrM4Z41PD48JY36YqQGKQpLGjyLA2cKcLsES7YddAq/go-libp2p-host"
logging "gx/ipfs/QmcuXC5cxs79ro2cUuHs4HQ2bkDLJUYokwL8aivcX6HW3C/go-log" | 1 | package filnet
import (
"context"
"math/rand"
"sync"
"time"
inet "gx/ipfs/QmNgLg1NTw37iWbYPKcyK85YJ9Whs1MkPtJwhfqbNYAyKg/go-libp2p-net"
pstore "gx/ipfs/QmPiemjiKBC9VA7vZF82m4x1oygtg2c2YVqag8PX7dN1BD/go-libp2p-peerstore"
peer "gx/ipfs/QmY5Grm8pJdiSSVsYxx4uNRgweY72EmYwuSDbRnbFok3iY/go-libp2p-peer"
host "gx/ipfs/QmaoXrM4Z41PD48JY36YqQGKQpLGjyLA2cKcLsES7YddAq/go-libp2p-host"
logging "gx/ipfs/QmcuXC5cxs79ro2cUuHs4HQ2bkDLJUYokwL8aivcX6HW3C/go-log"
)
var log = logging.Logger("bootstrap")
// Bootstrapper attempts to keep the p2p host connected to the filecoin network
// by keeping a minimum threshold of connections. If the threshold isn't met it
// connects to a random subset of the bootstrap peers. It does not use peer routing
// to discover new peers. To stop a Bootstrapper cancel the context passed in Start()
// or call Stop().
//
// Code loosely modeled on go-ipfs/core/bootstrap.go, take a look there for inspiration
// if you're adding new features.
//
// TODO discover new peers
type Bootstrapper struct {
// Config
// MinPeerThreshold is the number of connections it attempts to maintain.
MinPeerThreshold int
// Peers to connect to if we fall below the threshold.
bootstrapPeers []pstore.PeerInfo
// Period is the interval at which it periodically checks to see
// if the threshold is maintained.
Period time.Duration
// ConnectionTimeout is how long to wait before timing out a connection attempt.
ConnectionTimeout time.Duration
// Dependencies
h host.Host
d inet.Dialer
// Does the work. Usually Bootstrapper.bootstrap. Argument is a slice of
// currently-connected peers (so it won't attempt to reconnect).
Bootstrap func([]peer.ID)
// Bookkeeping
ticker *time.Ticker
ctx context.Context
cancel context.CancelFunc
}
// NewBootstrapper returns a new Bootstrapper that will attempt to keep connected
// to the filecoin network by connecting to the given bootstrap peers.
func NewBootstrapper(bootstrapPeers []pstore.PeerInfo, h host.Host, d inet.Dialer, minPeer int, period time.Duration) *Bootstrapper {
b := &Bootstrapper{
MinPeerThreshold: minPeer,
bootstrapPeers: bootstrapPeers,
Period: period,
ConnectionTimeout: 20 * time.Second,
h: h,
d: d,
}
b.Bootstrap = b.bootstrap
return b
}
// Start starts the Bootstrapper bootstrapping. Cancel `ctx` or call Stop() to stop it.
func (b *Bootstrapper) Start(ctx context.Context) {
b.ctx, b.cancel = context.WithCancel(ctx)
b.ticker = time.NewTicker(b.Period)
go func() {
defer b.ticker.Stop()
for {
select {
case <-b.ctx.Done():
return
case <-b.ticker.C:
b.Bootstrap(b.d.Peers())
}
}
}()
}
// Stop stops the Bootstrapper.
func (b *Bootstrapper) Stop() {
if b.cancel != nil {
b.cancel()
}
}
// bootstrap does the actual work. If the number of connected peers
// has fallen below b.MinPeerThreshold it will attempt to connect to
// a random subset of its bootstrap peers.
func (b *Bootstrapper) bootstrap(currentPeers []peer.ID) {
peersNeeded := b.MinPeerThreshold - len(currentPeers)
if peersNeeded < 1 {
return
}
ctx, cancel := context.WithTimeout(b.ctx, b.ConnectionTimeout)
var wg sync.WaitGroup
defer func() {
wg.Wait()
cancel()
}()
peersAttempted := 0
for _, i := range rand.Perm(len(b.bootstrapPeers)) {
pinfo := b.bootstrapPeers[i]
// Don't try to connect to an already connected peer.
if hasPID(currentPeers, pinfo.ID) {
continue
}
wg.Add(1)
go func() {
if err := b.h.Connect(ctx, pinfo); err != nil {
log.Warningf("got error trying to connect to bootstrap node %+v: %s", pinfo, err.Error())
}
wg.Done()
}()
peersAttempted++
if peersAttempted == peersNeeded {
return
}
}
log.Warningf("not enough bootstrap nodes to maintain %d connections (current connections: %d)", b.MinPeerThreshold, len(currentPeers))
}
func hasPID(pids []peer.ID, pid peer.ID) bool {
for _, p := range pids {
if p == pid {
return true
}
}
return false
}
| 1 | 16,386 | remove todo on line 28? | filecoin-project-venus | go |
@@ -0,0 +1,17 @@
+_base_ = [
+ '../_base_/models/retinanet_r50_fpn.py', '../common/mstrain_3x_coco.py'
+]
+# optimizer
+model = dict(
+ pretrained='open-mmlab://resnext101_64x4d',
+ backbone=dict(
+ type='ResNeXt',
+ depth=101,
+ groups=64,
+ base_width=4,
+ num_stages=4,
+ out_indices=(0, 1, 2, 3),
+ frozen_stages=1,
+ norm_cfg=dict(type='BN', requires_grad=True),
+ style='pytorch'))
+optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) | 1 | 1 | 24,577 | clean unnecessary arguments. | open-mmlab-mmdetection | py |
|
@@ -30,6 +30,8 @@ public enum Status {
SUCCEEDED(50),
KILLING(55),
KILLED(60),
+ // EXECUTION_STOPPED refers to a terminal flow status due to crashed executor/container
+ EXECUTION_STOPPED(65),
FAILED(70),
FAILED_FINISHING(80),
SKIPPED(90), | 1 | /*
* Copyright 2014 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.executor;
import com.google.common.collect.ImmutableMap;
import java.util.Arrays;
import java.util.Set;
import java.util.TreeSet;
public enum Status {
READY(10),
DISPATCHING (15),
PREPARING(20),
RUNNING(30),
PAUSED(40),
SUCCEEDED(50),
KILLING(55),
KILLED(60),
FAILED(70),
FAILED_FINISHING(80),
SKIPPED(90),
DISABLED(100),
QUEUED(110),
FAILED_SUCCEEDED(120),
CANCELLED(125);
// status is TINYINT in DB and the value ranges from -128 to 127
private static final ImmutableMap<Integer, Status> numValMap = Arrays.stream(Status.values())
.collect(ImmutableMap.toImmutableMap(status -> status.getNumVal(), status -> status));
public static final Set<Status> nonFinishingStatusAfterFlowStartsSet = new TreeSet<>(
Arrays.asList(Status.RUNNING, Status.QUEUED, Status.PAUSED, Status.FAILED_FINISHING));
private final int numVal;
Status(final int numVal) {
this.numVal = numVal;
}
public static Status fromInteger(final int x) {
return numValMap.getOrDefault(x, READY);
}
public static boolean isStatusFinished(final Status status) {
switch (status) {
case FAILED:
case KILLED:
case SUCCEEDED:
case SKIPPED:
case FAILED_SUCCEEDED:
case CANCELLED:
return true;
default:
return false;
}
}
public static boolean isStatusRunning(final Status status) {
switch (status) {
case RUNNING:
case FAILED_FINISHING:
case QUEUED:
return true;
default:
return false;
}
}
public static boolean isStatusFailed(final Status status) {
switch (status) {
case FAILED:
case KILLED:
case CANCELLED:
return true;
default:
return false;
}
}
public static boolean isStatusSucceeded(final Status status) {
switch (status) {
case SUCCEEDED:
case FAILED_SUCCEEDED:
case SKIPPED:
return true;
default:
return false;
}
}
public int getNumVal() {
return this.numVal;
}
}
| 1 | 22,159 | Could the name of the status be confusing to users? It says the execution stopped but is it a temporary stop? Will it continue to run at some point? To me the name does not suggest a final status but a transition one. WDYT @sshardool @jakhani @djaiswal83 @aditya1105 | azkaban-azkaban | java |
@@ -20,7 +20,8 @@
import testtools
-import molecule.provisioners as provisioners
+from molecule.Provisioners import DockerProvisioner
+import molecule.utilities
from molecule.core import Molecule
import yaml
from molecule.ansible_playbook import AnsiblePlaybook | 1 | # Copyright (c) 2015 Cisco Systems
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import testtools
import molecule.provisioners as provisioners
from molecule.core import Molecule
import yaml
from molecule.ansible_playbook import AnsiblePlaybook
class TestDockerProvisioner(testtools.TestCase):
def setUp(self):
super(TestDockerProvisioner, self).setUp()
# Setup mock molecule
self._mock_molecule = Molecule(dict())
self.temp = '/tmp/test_config_load_defaults_external_file.yml'
data = {
'molecule': {
'molecule_dir': '.test_molecule',
'inventory_file': 'tests/ansible_inventory'
},
'docker': {
'containers': [
{'name': 'test1',
'image': 'ubuntu',
'image_version': 'latest',
'ansible_groups': ['group1']}, {'name': 'test2',
'image': 'ubuntu',
'image_version': 'latest',
'ansible_groups':
['group2']}
]
},
'ansible': {
'config_file': 'test_config',
'inventory_file': 'test_inventory'
}
}
with open(self.temp, 'w') as f:
f.write(yaml.dump(data, default_flow_style=True))
self._mock_molecule._config.load_defaults_file(defaults_file=self.temp)
self._mock_molecule._state = dict()
def test_name(self):
docker_provisioner = provisioners.DockerProvisioner(
self._mock_molecule)
# false values don't exist in arg dict at all
self.assertEqual(docker_provisioner.name, 'docker')
def test_get_provisioner(self):
self.assertEqual(
provisioners.get_provisioner(self._mock_molecule).name, 'docker')
def test_up(self):
docker_provisioner = provisioners.DockerProvisioner(
self._mock_molecule)
docker_provisioner.up()
docker_provisioner.destroy()
def test_instances(self):
docker_provisioner = provisioners.DockerProvisioner(
self._mock_molecule)
self.assertEqual(docker_provisioner.instances[0]['name'], 'test1')
self.assertEqual(docker_provisioner.instances[1]['name'], 'test2')
def test_status(self):
docker_provisioner = provisioners.DockerProvisioner(
self._mock_molecule)
docker_provisioner.up()
self.assertEquals('test1', docker_provisioner.status()[1].name)
self.assertEquals('test2', docker_provisioner.status()[0].name)
self.assertIn('Up', docker_provisioner.status()[1].state)
self.assertIn('Up', docker_provisioner.status()[0].state)
self.assertEqual('docker', docker_provisioner.status()[0].provider)
self.assertEqual('docker', docker_provisioner.status()[1].provider)
def test_destroy(self):
docker_provisioner = provisioners.DockerProvisioner(
self._mock_molecule)
docker_provisioner.up()
self.assertEquals('test1', docker_provisioner.status()[1].name)
self.assertEquals('test2', docker_provisioner.status()[0].name)
self.assertIn('Up', docker_provisioner.status()[1].state)
self.assertIn('Up', docker_provisioner.status()[0].state)
docker_provisioner.destroy()
self.assertIn('Not Created', docker_provisioner.status()[1].state)
self.assertIn('Not Created', docker_provisioner.status()[0].state)
def test_provision(self):
docker_provisioner = provisioners.DockerProvisioner(
self._mock_molecule)
docker_provisioner.destroy()
docker_provisioner.up()
self.book = docker_provisioner.ansible_connection_params
self.book['playbook'] = 'tests/playbook.yml'
self.book['inventory'] = 'test1,test2,'
self.ansible = AnsiblePlaybook(self.book)
self.assertEqual((None, ''), self.ansible.execute())
docker_provisioner.destroy()
def test_inventory_generation(self):
self._mock_molecule._provisioner = provisioners.get_provisioner(
self._mock_molecule)
self._mock_molecule._provisioner.destroy()
self._mock_molecule._provisioner.up()
self._mock_molecule._create_inventory_file()
self.book = self._mock_molecule._provisioner.ansible_connection_params
self.book['playbook'] = 'tests/playbook.yml'
self.book['inventory'] = 'tests/ansible_inventory'
self.ansible = AnsiblePlaybook(self.book)
self.assertEqual((None, ''), self.ansible.execute())
| 1 | 6,219 | Since we created a Provisioners directory, can we do the same with the tests, and follow the same structure as the code? | ansible-community-molecule | py |
@@ -16,11 +16,12 @@ import (
)
var (
- sentryAddr string // Address of the sentry <host>:<port>
- coreAddr string // Address of the core <host>:<port>
- chaindata string // Path to chaindata
- database string // Type of database (lmdb or mdbx)
- mapSizeStr string // Map size for LMDB
+ sentryAddr string // Address of the sentry <host>:<port>
+ sentryAddrs []string // Address of the sentry <host>:<port>
+ coreAddr string // Address of the core <host>:<port>
+ chaindata string // Path to chaindata
+ database string // Type of database (lmdb or mdbx)
+ mapSizeStr string // Map size for LMDB
freelistReuse int
)
| 1 | package commands
import (
"context"
"fmt"
"os"
"os/signal"
"syscall"
"github.com/c2h5oh/datasize"
"github.com/ledgerwatch/turbo-geth/cmd/utils"
"github.com/ledgerwatch/turbo-geth/ethdb"
"github.com/ledgerwatch/turbo-geth/internal/debug"
"github.com/ledgerwatch/turbo-geth/log"
"github.com/spf13/cobra"
)
var (
sentryAddr string // Address of the sentry <host>:<port>
coreAddr string // Address of the core <host>:<port>
chaindata string // Path to chaindata
database string // Type of database (lmdb or mdbx)
mapSizeStr string // Map size for LMDB
freelistReuse int
)
func init() {
utils.CobraFlags(rootCmd, append(debug.Flags, utils.MetricFlags...))
}
func rootContext() context.Context {
ctx, cancel := context.WithCancel(context.Background())
go func() {
ch := make(chan os.Signal, 1)
signal.Notify(ch, os.Interrupt, syscall.SIGTERM)
defer signal.Stop(ch)
select {
case <-ch:
log.Info("Got interrupt, shutting down...")
case <-ctx.Done():
}
cancel()
}()
return ctx
}
var rootCmd = &cobra.Command{
Use: "headers",
Short: "headers is Proof Of Concept for new header/block downloading algorithms",
PersistentPreRun: func(cmd *cobra.Command, args []string) {
if err := debug.SetupCobra(cmd); err != nil {
panic(err)
}
},
PersistentPostRun: func(cmd *cobra.Command, args []string) {
debug.Exit()
},
}
func Execute() {
if err := rootCmd.ExecuteContext(rootContext()); err != nil {
fmt.Println(err)
os.Exit(1)
}
}
func must(err error) {
if err != nil {
panic(err)
}
}
func withChaindata(cmd *cobra.Command) {
cmd.Flags().StringVar(&chaindata, "chaindata", "", "path to the db")
must(cmd.MarkFlagDirname("chaindata"))
must(cmd.MarkFlagRequired("chaindata"))
cmd.Flags().StringVar(&database, "database", "", "lmdb|mdbx")
}
func withLmdbFlags(cmd *cobra.Command) {
cmd.Flags().StringVar(&mapSizeStr, "lmdb.mapSize", "", "map size for LMDB")
cmd.Flags().IntVar(&freelistReuse, "maxFreelistReuse", 0, "Find a big enough contiguous page range for large values in freelist is hard just allocate new pages and even don't try to search if value is bigger than this limit. Measured in pages.")
}
func openDatabase(path string) *ethdb.ObjectDatabase {
db := ethdb.NewObjectDatabase(openKV(path, false))
return db
}
func openKV(path string, exclusive bool) ethdb.RwKV {
if database == "mdbx" {
opts := ethdb.NewMDBX().Path(path)
if exclusive {
opts = opts.Exclusive()
}
if mapSizeStr != "" {
var mapSize datasize.ByteSize
must(mapSize.UnmarshalText([]byte(mapSizeStr)))
opts = opts.MapSize(mapSize)
}
if freelistReuse > 0 {
opts = opts.MaxFreelistReuse(uint(freelistReuse))
}
return opts.MustOpen()
}
opts := ethdb.NewLMDB().Path(path)
if exclusive {
opts = opts.Exclusive()
}
if mapSizeStr != "" {
var mapSize datasize.ByteSize
must(mapSize.UnmarshalText([]byte(mapSizeStr)))
opts = opts.MapSize(mapSize)
}
if freelistReuse > 0 {
opts = opts.MaxFreelistReuse(uint(freelistReuse))
}
return opts.MustOpen()
}
| 1 | 22,051 | We don't need that one anymore? | ledgerwatch-erigon | go |
@@ -40,6 +40,7 @@ from graphite.render.attime import parseATTime
from graphite.render.functions import PieFunctions
from graphite.render.hashing import hashRequest, hashData
from graphite.render.glyph import GraphTypes
+from graphite.storage import STORE
from django.http import HttpResponse, HttpResponseServerError, HttpResponseRedirect
from django.template import Context, loader | 1 | """Copyright 2008 Orbitz WorldWide
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
import csv
import math
from datetime import datetime
from time import time
from random import shuffle
from httplib import CannotSendRequest
from urllib import urlencode
from urlparse import urlsplit, urlunsplit
from cgi import parse_qs
from cStringIO import StringIO
try:
import cPickle as pickle
except ImportError:
import pickle
try: # See if there is a system installation of pytz first
import pytz
except ImportError: # Otherwise we fall back to Graphite's bundled version
from graphite.thirdparty import pytz
from graphite.util import getProfileByUsername, json, unpickle
from graphite.remote_storage import HTTPConnectionWithTimeout
from graphite.logger import log
from graphite.render.evaluator import evaluateTarget, extractPathExpressions
from graphite.render.datalib import prefetchRemoteData
from graphite.render.attime import parseATTime
from graphite.render.functions import PieFunctions
from graphite.render.hashing import hashRequest, hashData
from graphite.render.glyph import GraphTypes
from django.http import HttpResponse, HttpResponseServerError, HttpResponseRedirect
from django.template import Context, loader
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist
from django.conf import settings
from django.utils.timezone import get_current_timezone
from django.utils.cache import add_never_cache_headers, patch_response_headers
def renderView(request):
start = time()
(graphOptions, requestOptions) = parseOptions(request)
useCache = 'noCache' not in requestOptions
cacheTimeout = requestOptions['cacheTimeout']
requestContext = {
'startTime' : requestOptions['startTime'],
'endTime' : requestOptions['endTime'],
'now': requestOptions['now'],
'localOnly' : requestOptions['localOnly'],
'prefetchedRemoteData' : {},
'data' : []
}
data = requestContext['data']
# First we check the request cache
if useCache:
requestKey = hashRequest(request)
cachedResponse = cache.get(requestKey)
if cachedResponse:
log.cache('Request-Cache hit [%s]' % requestKey)
log.rendering('Returned cached response in %.6f' % (time() - start))
return cachedResponse
else:
log.cache('Request-Cache miss [%s]' % requestKey)
# Now we prepare the requested data
if requestOptions['graphType'] == 'pie':
for target in requestOptions['targets']:
if target.find(':') >= 0:
try:
name,value = target.split(':',1)
value = float(value)
except:
raise ValueError, "Invalid target '%s'" % target
data.append( (name,value) )
else:
seriesList = evaluateTarget(requestContext, target)
for series in seriesList:
func = PieFunctions[requestOptions['pieMode']]
data.append( (series.name, func(requestContext, series) or 0 ))
elif requestOptions['graphType'] == 'line':
# Let's see if at least our data is cached
if useCache:
targets = requestOptions['targets']
startTime = requestOptions['startTime']
endTime = requestOptions['endTime']
dataKey = hashData(targets, startTime, endTime)
cachedData = cache.get(dataKey)
if cachedData:
log.cache("Data-Cache hit [%s]" % dataKey)
else:
log.cache("Data-Cache miss [%s]" % dataKey)
else:
cachedData = None
if cachedData is not None:
requestContext['data'] = data = cachedData
else: # Have to actually retrieve the data now
targets = requestOptions['targets']
if settings.REMOTE_PREFETCH_DATA:
t = time()
pathExpressions = extractPathExpressions(targets)
requestContext['prefetchedRemoteData'] = prefetchRemoteData(requestContext, pathExpressions)
log.rendering("Prefetching remote data took %.6f" % (time() - t))
for target in targets:
t = time()
seriesList = evaluateTarget(requestContext, target)
log.rendering("Retrieval of %s took %.6f" % (target, time() - t))
data.extend(seriesList)
if useCache:
cache.add(dataKey, data, cacheTimeout)
format = requestOptions.get('format')
if format == 'csv':
response = HttpResponse(content_type='text/csv')
writer = csv.writer(response, dialect='excel')
for series in data:
for i, value in enumerate(series):
timestamp = datetime.fromtimestamp(series.start + (i * series.step), requestOptions['tzinfo'])
writer.writerow((series.name, timestamp.strftime("%Y-%m-%d %H:%M:%S"), value))
return response
if format == 'json':
series_data = []
if 'maxDataPoints' in requestOptions and any(data):
startTime = min([series.start for series in data])
endTime = max([series.end for series in data])
timeRange = endTime - startTime
maxDataPoints = requestOptions['maxDataPoints']
for series in data:
numberOfDataPoints = timeRange/series.step
if maxDataPoints < numberOfDataPoints:
valuesPerPoint = math.ceil(float(numberOfDataPoints) / float(maxDataPoints))
secondsPerPoint = int(valuesPerPoint * series.step)
# Nudge start over a little bit so that the consolidation bands align with each call
# removing 'jitter' seen when refreshing.
nudge = secondsPerPoint + (series.start % series.step) - (series.start % secondsPerPoint)
series.start = series.start + nudge
valuesToLose = int(nudge/series.step)
for r in range(1, valuesToLose):
del series[0]
series.consolidate(valuesPerPoint)
timestamps = range(int(series.start), int(series.end) + 1, int(secondsPerPoint))
else:
timestamps = range(int(series.start), int(series.end) + 1, int(series.step))
datapoints = zip(series, timestamps)
series_data.append(dict(target=series.name, datapoints=datapoints))
else:
for series in data:
timestamps = range(int(series.start), int(series.end) + 1, int(series.step))
datapoints = zip(series, timestamps)
series_data.append( dict(target=series.name, datapoints=datapoints) )
if 'jsonp' in requestOptions:
response = HttpResponse(
content="%s(%s)" % (requestOptions['jsonp'], json.dumps(series_data)),
content_type='text/javascript')
else:
response = HttpResponse(content=json.dumps(series_data), content_type='application/json')
if useCache:
patch_response_headers(response, cache_timeout=cacheTimeout)
else:
add_never_cache_headers(response)
return response
if format == 'raw':
response = HttpResponse(content_type='text/plain')
for series in data:
response.write( "%s,%d,%d,%d|" % (series.name, series.start, series.end, series.step) )
response.write( ','.join(map(str,series)) )
response.write('\n')
log.rendering('Total rawData rendering time %.6f' % (time() - start))
return response
if format == 'svg':
graphOptions['outputFormat'] = 'svg'
if format == 'pickle':
response = HttpResponse(content_type='application/pickle')
seriesInfo = [series.getInfo() for series in data]
pickle.dump(seriesInfo, response, protocol=-1)
log.rendering('Total pickle rendering time %.6f' % (time() - start))
return response
# We've got the data, now to render it
graphOptions['data'] = data
if settings.REMOTE_RENDERING: # Rendering on other machines is faster in some situations
image = delegateRendering(requestOptions['graphType'], graphOptions)
else:
image = doImageRender(requestOptions['graphClass'], graphOptions)
useSVG = graphOptions.get('outputFormat') == 'svg'
if useSVG and 'jsonp' in requestOptions:
response = HttpResponse(
content="%s(%s)" % (requestOptions['jsonp'], json.dumps(image)),
content_type='text/javascript')
else:
response = buildResponse(image, useSVG and 'image/svg+xml' or 'image/png')
if useCache:
cache.add(requestKey, response, cacheTimeout)
patch_response_headers(response, cache_timeout=cacheTimeout)
else:
add_never_cache_headers(response)
log.rendering('Total rendering time %.6f seconds' % (time() - start))
return response
def parseOptions(request):
queryParams = request.REQUEST
# Start with some defaults
graphOptions = {'width' : 330, 'height' : 250}
requestOptions = {}
graphType = queryParams.get('graphType','line')
assert graphType in GraphTypes, "Invalid graphType '%s', must be one of %s" % (graphType,GraphTypes.keys())
graphClass = GraphTypes[graphType]
# Fill in the requestOptions
requestOptions['graphType'] = graphType
requestOptions['graphClass'] = graphClass
requestOptions['pieMode'] = queryParams.get('pieMode', 'average')
requestOptions['cacheTimeout'] = int( queryParams.get('cacheTimeout', settings.DEFAULT_CACHE_DURATION) )
requestOptions['targets'] = []
# Extract the targets out of the queryParams
mytargets = []
# Normal format: ?target=path.1&target=path.2
if len(queryParams.getlist('target')) > 0:
mytargets = queryParams.getlist('target')
# Rails/PHP/jQuery common practice format: ?target[]=path.1&target[]=path.2
elif len(queryParams.getlist('target[]')) > 0:
mytargets = queryParams.getlist('target[]')
# Collect the targets
for target in mytargets:
requestOptions['targets'].append(target)
if 'pickle' in queryParams:
requestOptions['format'] = 'pickle'
if 'rawData' in queryParams:
requestOptions['format'] = 'raw'
if 'format' in queryParams:
requestOptions['format'] = queryParams['format']
if 'jsonp' in queryParams:
requestOptions['jsonp'] = queryParams['jsonp']
if 'noCache' in queryParams:
requestOptions['noCache'] = True
if 'maxDataPoints' in queryParams and queryParams['maxDataPoints'].isdigit():
requestOptions['maxDataPoints'] = int(queryParams['maxDataPoints'])
requestOptions['localOnly'] = queryParams.get('local') == '1'
# Fill in the graphOptions
for opt in graphClass.customizable:
if opt in queryParams:
val = queryParams[opt]
if (val.isdigit() or (val.startswith('-') and val[1:].isdigit())) and opt not in ('fgcolor','bgcolor','fontColor'):
val = int(val)
elif '.' in val and (val.replace('.','',1).isdigit() or (val.startswith('-') and val[1:].replace('.','',1).isdigit())):
val = float(val)
elif val.lower() in ('true','false'):
val = val.lower() == 'true'
elif val.lower() == 'default' or val == '':
continue
graphOptions[opt] = val
tzinfo = get_current_timezone()
if 'tz' in queryParams:
try:
tzinfo = pytz.timezone(queryParams['tz'])
except pytz.UnknownTimeZoneError:
pass
requestOptions['tzinfo'] = tzinfo
# Get the time interval for time-oriented graph types
if graphType == 'line' or graphType == 'pie':
if 'now' in queryParams:
now = parseATTime(queryParams['now'])
else:
now = datetime.now(tzinfo)
if 'until' in queryParams:
untilTime = parseATTime(queryParams['until'], tzinfo, now)
else:
untilTime = now
if 'from' in queryParams:
fromTime = parseATTime(queryParams['from'], tzinfo, now)
else:
fromTime = parseATTime('-1d', tzinfo, now)
startTime = min(fromTime, untilTime)
endTime = max(fromTime, untilTime)
assert startTime != endTime, "Invalid empty time range"
requestOptions['startTime'] = startTime
requestOptions['endTime'] = endTime
requestOptions['now'] = now
return (graphOptions, requestOptions)
connectionPools = {}
def delegateRendering(graphType, graphOptions):
start = time()
postData = graphType + '\n' + pickle.dumps(graphOptions)
servers = settings.RENDERING_HOSTS[:] #make a copy so we can shuffle it safely
shuffle(servers)
for server in servers:
start2 = time()
try:
# Get a connection
try:
pool = connectionPools[server]
except KeyError: #happens the first time
pool = connectionPools[server] = set()
try:
connection = pool.pop()
except KeyError: #No available connections, have to make a new one
connection = HTTPConnectionWithTimeout(server)
connection.timeout = settings.REMOTE_RENDER_CONNECT_TIMEOUT
# Send the request
try:
connection.request('POST','/render/local/', postData)
except CannotSendRequest:
connection = HTTPConnectionWithTimeout(server) #retry once
connection.timeout = settings.REMOTE_RENDER_CONNECT_TIMEOUT
connection.request('POST', '/render/local/', postData)
# Read the response
response = connection.getresponse()
assert response.status == 200, "Bad response code %d from %s" % (response.status,server)
contentType = response.getheader('Content-Type')
imageData = response.read()
assert contentType == 'image/png', "Bad content type: \"%s\" from %s" % (contentType,server)
assert imageData, "Received empty response from %s" % server
# Wrap things up
log.rendering('Remotely rendered image on %s in %.6f seconds' % (server,time() - start2))
log.rendering('Spent a total of %.6f seconds doing remote rendering work' % (time() - start))
pool.add(connection)
return imageData
except:
log.exception("Exception while attempting remote rendering request on %s" % server)
log.rendering('Exception while remotely rendering on %s wasted %.6f' % (server,time() - start2))
continue
def renderLocalView(request):
try:
start = time()
reqParams = StringIO(request.body)
graphType = reqParams.readline().strip()
optionsPickle = reqParams.read()
reqParams.close()
graphClass = GraphTypes[graphType]
options = unpickle.loads(optionsPickle)
image = doImageRender(graphClass, options)
log.rendering("Delegated rendering request took %.6f seconds" % (time() - start))
response = buildResponse(image)
add_never_cache_headers(response)
return response
except:
log.exception("Exception in graphite.render.views.rawrender")
return HttpResponseServerError()
def renderMyGraphView(request,username,graphName):
profile = getProfileByUsername(username)
if not profile:
return errorPage("No such user '%s'" % username)
try:
graph = profile.mygraph_set.get(name=graphName)
except ObjectDoesNotExist:
return errorPage("User %s doesn't have a MyGraph named '%s'" % (username,graphName))
request_params = dict(request.REQUEST.items())
if request_params:
url_parts = urlsplit(graph.url)
query_string = url_parts[3]
if query_string:
url_params = parse_qs(query_string)
# Remove lists so that we can do an update() on the dict
for param, value in url_params.items():
if isinstance(value, list) and param != 'target':
url_params[param] = value[-1]
url_params.update(request_params)
# Handle 'target' being a list - we want duplicate &target params out of it
url_param_pairs = []
for key,val in url_params.items():
if isinstance(val, list):
for v in val:
url_param_pairs.append( (key,v) )
else:
url_param_pairs.append( (key,val) )
query_string = urlencode(url_param_pairs)
url = urlunsplit(url_parts[:3] + (query_string,) + url_parts[4:])
else:
url = graph.url
return HttpResponseRedirect(url)
def doImageRender(graphClass, graphOptions):
pngData = StringIO()
t = time()
img = graphClass(**graphOptions)
img.output(pngData)
log.rendering('Rendered PNG in %.6f seconds' % (time() - t))
imageData = pngData.getvalue()
pngData.close()
return imageData
def buildResponse(imageData, content_type="image/png"):
return HttpResponse(imageData, content_type=content_type)
def errorPage(message):
template = loader.get_template('500.html')
context = Context(dict(message=message))
return HttpResponseServerError( template.render(context) )
| 1 | 9,874 | Since we're determining the local node's host info in the `Store()` init, we need to import the `STORE` object here. Is this OK, or should I be determining the host info earlier. (When parsing settings?) | graphite-project-graphite-web | py |
@@ -15,7 +15,7 @@ public interface ASTAnyTypeBodyDeclaration extends JavaNode {
/**
* Returns the child of this declaration,
* which can be cast to a more specific node
- * type using {@link #getKind()} as a cue.
+ * type using #getKind() as a cue.
*
* <p>Returns null if this is an empty declaration,
* that is, a single semicolon. | 1 | /**
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.java.ast;
/**
* Marker interface for type body declarations, such as annotation members, field or method declarations.
*
* @author Clément Fournier
*/
public interface ASTAnyTypeBodyDeclaration extends JavaNode {
/**
* Returns the child of this declaration,
* which can be cast to a more specific node
* type using {@link #getKind()} as a cue.
*
* <p>Returns null if this is an empty declaration,
* that is, a single semicolon.
*/
JavaNode getDeclarationNode();
/**
* Gets the kind of declaration this node contains.
* This is a cue for the node type the child of this
* declaration can be cast to.
*/
DeclarationKind getKind();
/**
* Kind of declaration. This is not deprecated because the node will
* go away entirely in 7.0.0 and one cannot avoid using it on master.
* See TypeKind for the reasons for deprecation.
*/
enum DeclarationKind {
/** See {@link ASTInitializer}. */
INITIALIZER,
/** See {@link ASTConstructorDeclaration}. */
CONSTRUCTOR,
/** See {@link ASTMethodDeclaration}. */
METHOD,
/** See {@link ASTFieldDeclaration}. */
FIELD,
/** See {@link ASTAnnotationMethodDeclaration}. */
ANNOTATION_METHOD,
/** See {@link ASTClassOrInterfaceDeclaration}. */
CLASS,
/** See {@link ASTEnumDeclaration}. */
ENUM,
/** See {@link ASTClassOrInterfaceDeclaration}. */
INTERFACE,
/** See {@link ASTAnnotationTypeDeclaration}. */
ANNOTATION,
/** No child, {@link #getDeclarationNode()} will return null. */
EMPTY
}
}
| 1 | 16,953 | I think, there is no `#getKind()` method anymore, is it? The approach would now be testing with instanceof. Or could we return a more specific type than JavaNode now? | pmd-pmd | java |
@@ -60,6 +60,15 @@ module.exports = (props) => {
title: props.title
})}
</div>
+ <button class="UppyButton--circular UppyButton--blue Browser-doneBtn"
+ type="button"
+ aria-label="Done picking files"
+ title="Done picking files"
+ onclick=${props.done}>
+ <svg aria-hidden="true" class="UppyIcon" width="13px" height="9px" viewBox="0 0 13 9">
+ <polygon points="5 7.293 1.354 3.647 0.646 4.354 5 8.707 12.354 1.354 11.646 0.647" />
+ </svg>
+ </button>
</div>
`
} | 1 | const html = require('yo-yo')
const Breadcrumbs = require('./Breadcrumbs')
const Table = require('./Table')
module.exports = (props) => {
let filteredFolders = props.folders
let filteredFiles = props.files
if (props.filterInput !== '') {
filteredFolders = props.filterItems(props.folders)
filteredFiles = props.filterItems(props.files)
}
return html`
<div class="Browser Browser-viewType--${props.viewType}">
<header class="Browser-header">
<div class="Browser-search" aria-hidden="${!props.isSearchVisible}">
<input type="text" class="Browser-searchInput" placeholder="Search"
onkeyup=${props.filterQuery} value="${props.filterInput}"/>
<button type="button" class="Browser-searchClose"
onclick=${props.toggleSearch}>
<svg class="UppyIcon" viewBox="0 0 19 19">
<path d="M17.318 17.232L9.94 9.854 9.586 9.5l-.354.354-7.378 7.378h.707l-.62-.62v.706L9.318 9.94l.354-.354-.354-.354L1.94 1.854v.707l.62-.62h-.706l7.378 7.378.354.354.354-.354 7.378-7.378h-.707l.622.62v-.706L9.854 9.232l-.354.354.354.354 7.378 7.378.708-.707-7.38-7.378v.708l7.38-7.38.353-.353-.353-.353-.622-.622-.353-.353-.354.352-7.378 7.38h.708L2.56 1.23 2.208.88l-.353.353-.622.62-.353.355.352.353 7.38 7.38v-.708l-7.38 7.38-.353.353.352.353.622.622.353.353.354-.353 7.38-7.38h-.708l7.38 7.38z"/>
</svg>
</button>
</div>
<div class="Browser-headerBar">
<button type="button" class="Browser-searchToggle"
onclick=${props.toggleSearch}>
<svg class="UppyIcon" viewBox="0 0 100 100">
<path d="M87.533 80.03L62.942 55.439c3.324-4.587 5.312-10.207 5.312-16.295 0-.312-.043-.611-.092-.908.05-.301.093-.605.093-.922 0-15.36-12.497-27.857-27.857-27.857-.273 0-.536.043-.799.08-.265-.037-.526-.08-.799-.08-15.361 0-27.858 12.497-27.858 27.857 0 .312.042.611.092.909a5.466 5.466 0 0 0-.093.921c0 15.36 12.496 27.858 27.857 27.858.273 0 .535-.043.8-.081.263.038.524.081.798.081 5.208 0 10.071-1.464 14.245-3.963L79.582 87.98a5.603 5.603 0 0 0 3.976 1.647 5.621 5.621 0 0 0 3.975-9.597zM39.598 55.838c-.265-.038-.526-.081-.8-.081-9.16 0-16.612-7.452-16.612-16.612 0-.312-.042-.611-.092-.908.051-.301.093-.605.093-.922 0-9.16 7.453-16.612 16.613-16.612.272 0 .534-.042.799-.079.263.037.525.079.799.079 9.16 0 16.612 7.452 16.612 16.612 0 .312.043.611.092.909-.05.301-.094.604-.094.921 0 9.16-7.452 16.612-16.612 16.612-.274 0-.536.043-.798.081z"/>
</svg>
</button>
${Breadcrumbs({
getFolder: props.getFolder,
directories: props.directories,
title: props.title
})}
<button type="button" onclick=${props.logout} class="Browser-userLogout">Log out</button>
</div>
</header>
<div class="Browser-body">
${Table({
columns: [{
name: 'Name',
key: 'title'
}],
folders: filteredFolders,
files: filteredFiles,
activeRow: props.isActiveRow,
sortByTitle: props.sortByTitle,
sortByDate: props.sortByDate,
handleFileClick: props.addFile,
handleFolderClick: props.getNextFolder,
isChecked: props.isChecked,
toggleCheckbox: props.toggleCheckbox,
getItemName: props.getItemName,
getItemIcon: props.getItemIcon,
handleScroll: props.handleScroll,
title: props.title
})}
</div>
</div>
`
}
| 1 | 10,198 | can we localise these? not sure if we have easy access to a translator instance from `generic-provider-views` | transloadit-uppy | js |
@@ -94,3 +94,14 @@ const (
NFC1_PIN Pin = 9
NFC2_PIN Pin = 10
)
+
+// USB CDC identifiers
+const (
+ usb_STRING_PRODUCT = "Particle Argon"
+ usb_STRING_MANUFACTURER = "Particle"
+)
+
+var (
+ usb_VID uint16 = 0x239A
+ usb_PID uint16 = 0x8029
+) | 1 | // +build particle_argon
package machine
const HasLowFrequencyCrystal = true
// More info: https://docs.particle.io/datasheets/wi-fi/argon-datasheet/
// Board diagram: https://docs.particle.io/assets/images/argon/argon-block-diagram.png
// LEDs
const (
LED Pin = 44
LED_GREEN Pin = 14
LED_RED Pin = 13
LED_BLUE Pin = 15
)
// GPIOs
const (
A0 Pin = 3
A1 Pin = 4
A2 Pin = 28
A3 Pin = 29
A4 Pin = 30
A5 Pin = 31
D0 Pin = 26 // Also SDA
D1 Pin = 27 // Also SCL
D2 Pin = 33
D3 Pin = 34
D4 Pin = 40
D5 Pin = 42
D6 Pin = 43
D7 Pin = 44 // Also LED
D8 Pin = 35
D9 Pin = 6 // Also TX
D10 Pin = 8 // Also RX
D11 Pin = 46 // Also MISO
D12 Pin = 45 // Also MOSI
D13 Pin = 47 // Also SCK
)
// UART
var (
Serial = USB
UART0 = NRF_UART0
)
const (
UART_TX_PIN Pin = 6
UART_RX_PIN Pin = 8
)
// I2C pins
const (
SDA_PIN Pin = 26
SCL_PIN Pin = 27
)
// SPI pins
const (
SPI0_SCK_PIN Pin = 47
SPI0_MOSI_PIN Pin = 45
SPI0_MISO_PIN Pin = 46
)
// Internal 4MB SPI Flash
const (
SPI1_SCK_PIN Pin = 19
SPI1_MOSI_PIN Pin = 20
SPI1_MISO_PIN Pin = 21
SPI1_CS_PIN Pin = 17
SPI1_WP_PIN Pin = 22
SPI1_HOLD_PIN Pin = 23
)
// ESP32 coprocessor
const (
ESP32_TXD_PIN Pin = 36
ESP32_RXD_PIN Pin = 37
ESP32_CTS_PIN Pin = 39
ESP32_RTS_PIN Pin = 38
ESP32_BOOT_MODE_PIN Pin = 16
ESP32_WIFI_EN_PIN Pin = 24
ESP32_HOST_WK_PIN Pin = 7
)
// Other peripherals
const (
MODE_BUTTON_PIN Pin = 11
CHARGE_STATUS_PIN Pin = 41
LIPO_VOLTAGE_PIN Pin = 5
PCB_ANTENNA_PIN Pin = 2
EXTERNAL_UFL_PIN Pin = 25
NFC1_PIN Pin = 9
NFC2_PIN Pin = 10
)
| 1 | 8,823 | This is the VID from Adafruit. Doesn't Particle have a different VID? (CC @suda). | tinygo-org-tinygo | go |
@@ -97,8 +97,7 @@ static void parseBlock (lexerState *lexer, bool delim, int kind, vString *scope)
/* Resets the scope string to the old length */
static void resetScope (vString *scope, size_t old_len)
{
- scope->length = old_len;
- scope->buffer[old_len] = '\0';
+ vStringTruncate (scope, old_len);
}
/* Adds a name to the end of the scope string */ | 1 | /*
*
* This source code is released for free distribution under the terms of the
* GNU General Public License version 2 or (at your option) any later version.
*
* This module contains functions for generating tags for Rust files.
*/
/*
* INCLUDE FILES
*/
#include "general.h" /* must always come first */
#include "main.h"
#include <string.h>
#include "keyword.h"
#include "parse.h"
#include "entry.h"
#include "options.h"
#include "read.h"
#include "routines.h"
#include "vstring.h"
/*
* MACROS
*/
#define MAX_STRING_LENGTH 256
/*
* DATA DECLARATIONS
*/
typedef enum {
K_MOD,
K_STRUCT,
K_TRAIT,
K_IMPL,
K_FN,
K_ENUM,
K_TYPE,
K_STATIC,
K_MACRO,
K_FIELD,
K_VARIANT,
K_METHOD,
K_NONE
} RustKind;
static kindDefinition rustKinds[] = {
{true, 'n', "module", "module"},
{true, 's', "struct", "structural type"},
{true, 'i', "interface", "trait interface"},
{true, 'c', "implementation", "implementation"},
{true, 'f', "function", "Function"},
{true, 'g', "enum", "Enum"},
{true, 't', "typedef", "Type Alias"},
{true, 'v', "variable", "Global variable"},
{true, 'M', "macro", "Macro Definition"},
{true, 'm', "field", "A struct field"},
{true, 'e', "enumerator", "An enum variant"},
{true, 'P', "method", "A method"},
};
typedef enum {
TOKEN_WHITESPACE,
TOKEN_STRING,
TOKEN_IDENT,
TOKEN_LSHIFT,
TOKEN_RSHIFT,
TOKEN_RARROW,
TOKEN_EOF
} tokenType;
typedef struct {
/* Characters */
int cur_c;
int next_c;
/* Tokens */
int cur_token;
vString* token_str;
unsigned long line;
MIOPos pos;
} lexerState;
/*
* FUNCTION PROTOTYPES
*/
static void parseBlock (lexerState *lexer, bool delim, int kind, vString *scope);
/*
* FUNCTION DEFINITIONS
*/
/* Resets the scope string to the old length */
static void resetScope (vString *scope, size_t old_len)
{
scope->length = old_len;
scope->buffer[old_len] = '\0';
}
/* Adds a name to the end of the scope string */
static void addToScope (vString *scope, vString *name)
{
if (vStringLength(scope) > 0)
vStringCatS(scope, "::");
vStringCat(scope, name);
}
/* Write the lexer's current token to string, taking care of special tokens */
static void writeCurTokenToStr (lexerState *lexer, vString *out_str)
{
switch (lexer->cur_token)
{
case TOKEN_IDENT:
vStringCat(out_str, lexer->token_str);
break;
case TOKEN_STRING:
vStringCat(out_str, lexer->token_str);
break;
case TOKEN_WHITESPACE:
vStringPut(out_str, ' ');
break;
case TOKEN_LSHIFT:
vStringCatS(out_str, "<<");
break;
case TOKEN_RSHIFT:
vStringCatS(out_str, ">>");
break;
case TOKEN_RARROW:
vStringCatS(out_str, "->");
break;
default:
vStringPut(out_str, (char) lexer->cur_token);
}
}
/* Reads a character from the file */
static void advanceChar (lexerState *lexer)
{
lexer->cur_c = lexer->next_c;
lexer->next_c = getcFromInputFile();
}
/* Reads N characters from the file */
static void advanceNChar (lexerState *lexer, int n)
{
while (n--)
advanceChar(lexer);
}
/* Store the current character in lexerState::token_str if there is space
* (set by MAX_STRING_LENGTH), and then read the next character from the file */
static void advanceAndStoreChar (lexerState *lexer)
{
if (vStringLength(lexer->token_str) < MAX_STRING_LENGTH)
vStringPut(lexer->token_str, (char) lexer->cur_c);
advanceChar(lexer);
}
static bool isWhitespace (int c)
{
return c == ' ' || c == '\t' || c == '\r' || c == '\n';
}
static bool isAscii (int c)
{
return (c >= 0) && (c < 0x80);
}
/* This isn't quite right for Unicode identifiers */
static bool isIdentifierStart (int c)
{
return (isAscii(c) && (isalpha(c) || c == '_')) || !isAscii(c);
}
/* This isn't quite right for Unicode identifiers */
static bool isIdentifierContinue (int c)
{
return (isAscii(c) && (isalnum(c) || c == '_')) || !isAscii(c);
}
static void scanWhitespace (lexerState *lexer)
{
while (isWhitespace(lexer->cur_c))
advanceChar(lexer);
}
/* Normal line comments start with two /'s and continue until the next \n
* (potentially after a \r). Additionally, a shebang in the beginning of the
* file also counts as a line comment as long as it is not this sequence: #![ .
* Block comments start with / followed by a * and end with a * followed by a /.
* Unlike in C/C++ they nest. */
static void scanComments (lexerState *lexer)
{
/* // */
if (lexer->next_c == '/')
{
advanceNChar(lexer, 2);
while (lexer->cur_c != EOF && lexer->cur_c != '\n')
advanceChar(lexer);
}
/* #! */
else if (lexer->next_c == '!')
{
advanceNChar(lexer, 2);
/* If it is exactly #![ then it is not a comment, but an attribute */
if (lexer->cur_c == '[')
return;
while (lexer->cur_c != EOF && lexer->cur_c != '\n')
advanceChar(lexer);
}
/* block comment */
else if (lexer->next_c == '*')
{
int level = 1;
advanceNChar(lexer, 2);
while (lexer->cur_c != EOF && level > 0)
{
if (lexer->cur_c == '*' && lexer->next_c == '/')
{
level--;
advanceNChar(lexer, 2);
}
else if (lexer->cur_c == '/' && lexer->next_c == '*')
{
level++;
advanceNChar(lexer, 2);
}
else
{
advanceChar(lexer);
}
}
}
}
static void scanIdentifier (lexerState *lexer)
{
vStringClear(lexer->token_str);
do
{
advanceAndStoreChar(lexer);
} while(lexer->cur_c != EOF && isIdentifierContinue(lexer->cur_c));
}
/* Double-quoted strings, we only care about the \" escape. These
* last past the end of the line, so be careful not too store too much
* of them (see MAX_STRING_LENGTH). The only place we look at their
* contents is in the function definitions, and there the valid strings are
* things like "C" and "Rust" */
static void scanString (lexerState *lexer)
{
vStringClear(lexer->token_str);
advanceAndStoreChar(lexer);
while (lexer->cur_c != EOF && lexer->cur_c != '"')
{
if (lexer->cur_c == '\\' && lexer->next_c == '"')
advanceAndStoreChar(lexer);
advanceAndStoreChar(lexer);
}
advanceAndStoreChar(lexer);
}
/* Raw strings look like this: r"" or r##""## where the number of
* hashes must match */
static void scanRawString (lexerState *lexer)
{
size_t num_initial_hashes = 0;
vStringClear(lexer->token_str);
advanceAndStoreChar(lexer);
/* Count how many leading hashes there are */
while (lexer->cur_c == '#')
{
num_initial_hashes++;
advanceAndStoreChar(lexer);
}
if (lexer->cur_c != '"')
return;
advanceAndStoreChar(lexer);
while (lexer->cur_c != EOF)
{
/* Count how many trailing hashes there are. If the number is equal or more
* than the number of leading hashes, break. */
if (lexer->cur_c == '"')
{
size_t num_trailing_hashes = 0;
advanceAndStoreChar(lexer);
while (lexer->cur_c == '#' && num_trailing_hashes < num_initial_hashes)
{
num_trailing_hashes++;
advanceAndStoreChar(lexer);
}
if (num_trailing_hashes == num_initial_hashes)
break;
}
else
{
advanceAndStoreChar(lexer);
}
}
}
/* This deals with character literals: 'n', '\n', '\uFFFF'; and lifetimes:
* 'lifetime. We'll use this approximate regexp for the literals:
* \' \\ [^']+ \' or \' [^'] \' or \' \\ \' \'. Either way, we'll treat this
* token as a string, so it gets preserved as is for function signatures with
* lifetimes. */
static void scanCharacterOrLifetime (lexerState *lexer)
{
vStringClear(lexer->token_str);
advanceAndStoreChar(lexer);
if (lexer->cur_c == '\\')
{
advanceAndStoreChar(lexer);
/* The \' \\ \' \' (literally '\'') case */
if (lexer->cur_c == '\'' && lexer->next_c == '\'')
{
advanceAndStoreChar(lexer);
advanceAndStoreChar(lexer);
}
/* The \' \\ [^']+ \' case */
else
{
while (lexer->cur_c != EOF && lexer->cur_c != '\'')
advanceAndStoreChar(lexer);
}
}
/* The \' [^'] \' case */
else if (lexer->cur_c != '\'' && lexer->next_c == '\'')
{
advanceAndStoreChar(lexer);
advanceAndStoreChar(lexer);
}
/* Otherwise it is malformed, or a lifetime */
}
/* Advances the parser one token, optionally skipping whitespace
* (otherwise it is concatenated and returned as a single whitespace token).
* Whitespace is needed to properly render function signatures. Unrecognized
* token starts are stored literally, e.g. token may equal to a character '#'. */
static int advanceToken (lexerState *lexer, bool skip_whitspace)
{
bool have_whitespace = false;
lexer->line = getInputLineNumber();
lexer->pos = getInputFilePosition();
while (lexer->cur_c != EOF)
{
if (isWhitespace(lexer->cur_c))
{
scanWhitespace(lexer);
have_whitespace = true;
}
else if (lexer->cur_c == '/' && (lexer->next_c == '/' || lexer->next_c == '*'))
{
scanComments(lexer);
have_whitespace = true;
}
else
{
if (have_whitespace && !skip_whitspace)
return lexer->cur_token = TOKEN_WHITESPACE;
break;
}
}
lexer->line = getInputLineNumber();
lexer->pos = getInputFilePosition();
while (lexer->cur_c != EOF)
{
if (lexer->cur_c == '"')
{
scanString(lexer);
return lexer->cur_token = TOKEN_STRING;
}
else if (lexer->cur_c == 'r' && (lexer->next_c == '#' || lexer->next_c == '"'))
{
scanRawString(lexer);
return lexer->cur_token = TOKEN_STRING;
}
else if (lexer->cur_c == '\'')
{
scanCharacterOrLifetime(lexer);
return lexer->cur_token = TOKEN_STRING;
}
else if (isIdentifierStart(lexer->cur_c))
{
scanIdentifier(lexer);
return lexer->cur_token = TOKEN_IDENT;
}
/* These shift tokens aren't too important for tag-generation per se,
* but they confuse the skipUntil code which tracks the <> pairs. */
else if (lexer->cur_c == '>' && lexer->next_c == '>')
{
advanceNChar(lexer, 2);
return lexer->cur_token = TOKEN_RSHIFT;
}
else if (lexer->cur_c == '<' && lexer->next_c == '<')
{
advanceNChar(lexer, 2);
return lexer->cur_token = TOKEN_LSHIFT;
}
else if (lexer->cur_c == '-' && lexer->next_c == '>')
{
advanceNChar(lexer, 2);
return lexer->cur_token = TOKEN_RARROW;
}
else
{
int c = lexer->cur_c;
advanceChar(lexer);
return lexer->cur_token = c;
}
}
return lexer->cur_token = TOKEN_EOF;
}
static void initLexer (lexerState *lexer)
{
advanceNChar(lexer, 2);
lexer->token_str = vStringNew();
if (lexer->cur_c == '#' && lexer->next_c == '!')
scanComments(lexer);
advanceToken(lexer, true);
}
static void deInitLexer (lexerState *lexer)
{
vStringDelete(lexer->token_str);
lexer->token_str = NULL;
}
static void addTag (vString* ident, const char* arg_list, int kind, unsigned long line, MIOPos pos, vString *scope, int parent_kind)
{
if (kind == K_NONE || ! rustKinds[kind].enabled)
return;
tagEntryInfo tag;
initTagEntry(&tag, ident->buffer, kind);
tag.lineNumber = line;
tag.filePosition = pos;
tag.extensionFields.signature = arg_list;
/*tag.extensionFields.varType = type;*/ /* FIXME: map to typeRef[1]? */
if (parent_kind != K_NONE)
{
tag.extensionFields.scopeKindIndex = parent_kind;
tag.extensionFields.scopeName = scope->buffer;
}
makeTagEntry(&tag);
}
/* Skip tokens until one of the goal tokens is hit. Escapes when level = 0 if there are no goal tokens.
* Keeps track of balanced <>'s, ()'s, []'s, and {}'s and ignores the goal tokens within those pairings */
static void skipUntil (lexerState *lexer, int goal_tokens[], int num_goal_tokens)
{
int angle_level = 0;
int paren_level = 0;
int brace_level = 0;
int bracket_level = 0;
while (lexer->cur_token != TOKEN_EOF)
{
if (angle_level == 0 && paren_level == 0 && brace_level == 0
&& bracket_level == 0)
{
int ii = 0;
for(ii = 0; ii < num_goal_tokens; ii++)
{
if (lexer->cur_token == goal_tokens[ii])
{
break;
}
}
if (ii < num_goal_tokens)
break;
}
switch (lexer->cur_token)
{
case '<':
angle_level++;
break;
case '(':
paren_level++;
break;
case '{':
brace_level++;
break;
case '[':
bracket_level++;
break;
case '>':
angle_level--;
break;
case ')':
paren_level--;
break;
case '}':
brace_level--;
break;
case ']':
bracket_level--;
break;
case TOKEN_RSHIFT:
if (angle_level >= 2)
angle_level -= 2;
break;
/* TOKEN_LSHIFT is never interpreted as two <'s in valid Rust code */
default:
break;
}
/* Has to be after the token switch to catch the case when we start with the initial level token */
if (num_goal_tokens == 0 && angle_level == 0 && paren_level == 0 && brace_level == 0
&& bracket_level == 0)
break;
advanceToken(lexer, true);
}
}
/* Function format:
* "fn" <ident>[<type_bounds>] "(" [<args>] ")" ["->" <ret_type>] "{" [<body>] "}"*/
static void parseFn (lexerState *lexer, vString *scope, int parent_kind)
{
int kind = (parent_kind == K_TRAIT || parent_kind == K_IMPL) ? K_METHOD : K_FN;
vString *name;
vString *arg_list;
unsigned long line;
MIOPos pos;
int paren_level = 0;
bool found_paren = false;
bool valid_signature = true;
advanceToken(lexer, true);
if (lexer->cur_token != TOKEN_IDENT)
return;
name = vStringNewCopy(lexer->token_str);
arg_list = vStringNew();
line = lexer->line;
pos = lexer->pos;
advanceToken(lexer, true);
/* HACK: This is a bit coarse as far as what tag entry means by
* 'arglist'... */
while (lexer->cur_token != '{' && lexer->cur_token != ';')
{
if (lexer->cur_token == '}')
{
valid_signature = false;
break;
}
else if (lexer->cur_token == '(')
{
found_paren = true;
paren_level++;
}
else if (lexer->cur_token == ')')
{
paren_level--;
if (paren_level < 0)
{
valid_signature = false;
break;
}
}
else if (lexer->cur_token == TOKEN_EOF)
{
valid_signature = false;
break;
}
writeCurTokenToStr(lexer, arg_list);
advanceToken(lexer, false);
}
if (!found_paren || paren_level != 0)
valid_signature = false;
if (valid_signature)
{
vStringStripTrailing(arg_list);
addTag(name, arg_list->buffer, kind, line, pos, scope, parent_kind);
addToScope(scope, name);
parseBlock(lexer, true, kind, scope);
}
vStringDelete(name);
vStringDelete(arg_list);
}
/* Mod format:
* "mod" <ident> "{" [<body>] "}"
* "mod" <ident> ";"*/
static void parseMod (lexerState *lexer, vString *scope, int parent_kind)
{
advanceToken(lexer, true);
if (lexer->cur_token != TOKEN_IDENT)
return;
addTag(lexer->token_str, NULL, K_MOD, lexer->line, lexer->pos, scope, parent_kind);
addToScope(scope, lexer->token_str);
advanceToken(lexer, true);
parseBlock(lexer, true, K_MOD, scope);
}
/* Trait format:
* "trait" <ident> [<type_bounds>] "{" [<body>] "}"
*/
static void parseTrait (lexerState *lexer, vString *scope, int parent_kind)
{
int goal_tokens[] = {'{'};
advanceToken(lexer, true);
if (lexer->cur_token != TOKEN_IDENT)
return;
addTag(lexer->token_str, NULL, K_TRAIT, lexer->line, lexer->pos, scope, parent_kind);
addToScope(scope, lexer->token_str);
advanceToken(lexer, true);
skipUntil(lexer, goal_tokens, 1);
parseBlock(lexer, true, K_TRAIT, scope);
}
/* Skips type blocks of the form <T:T<T>, ...> */
static void skipTypeBlock (lexerState *lexer)
{
if (lexer->cur_token == '<')
{
skipUntil(lexer, NULL, 0);
advanceToken(lexer, true);
}
}
/* Essentially grabs the last ident before 'for', '<' and '{', which
* tends to correspond to what we want as the impl tag entry name */
static void parseQualifiedType (lexerState *lexer, vString* name)
{
while (lexer->cur_token != TOKEN_EOF)
{
if (lexer->cur_token == TOKEN_IDENT)
{
if (strcmp(lexer->token_str->buffer, "for") == 0
|| strcmp(lexer->token_str->buffer, "where") == 0)
break;
vStringClear(name);
vStringCat(name, lexer->token_str);
}
else if (lexer->cur_token == '<' || lexer->cur_token == '{')
{
break;
}
advanceToken(lexer, true);
}
skipTypeBlock(lexer);
}
/* Impl format:
* "impl" [<type_bounds>] <qualified_ident>[<type_bounds>] ["for" <qualified_ident>[<type_bounds>]] "{" [<body>] "}"
*/
static void parseImpl (lexerState *lexer, vString *scope, int parent_kind)
{
unsigned long line;
MIOPos pos;
vString *name;
advanceToken(lexer, true);
line = lexer->line;
pos = lexer->pos;
skipTypeBlock(lexer);
name = vStringNew();
parseQualifiedType(lexer, name);
if (lexer->cur_token == TOKEN_IDENT && strcmp(lexer->token_str->buffer, "for") == 0)
{
advanceToken(lexer, true);
parseQualifiedType(lexer, name);
}
addTag(name, NULL, K_IMPL, line, pos, scope, parent_kind);
addToScope(scope, name);
parseBlock(lexer, true, K_IMPL, scope);
vStringDelete(name);
}
/* Static format:
* "static" ["mut"] <ident>
*/
static void parseStatic (lexerState *lexer, vString *scope, int parent_kind)
{
advanceToken(lexer, true);
if (lexer->cur_token != TOKEN_IDENT)
return;
if (strcmp(lexer->token_str->buffer, "mut") == 0)
{
advanceToken(lexer, true);
}
if (lexer->cur_token != TOKEN_IDENT)
return;
addTag(lexer->token_str, NULL, K_STATIC, lexer->line, lexer->pos, scope, parent_kind);
}
/* Type format:
* "type" <ident>
*/
static void parseType (lexerState *lexer, vString *scope, int parent_kind)
{
advanceToken(lexer, true);
if (lexer->cur_token != TOKEN_IDENT)
return;
addTag(lexer->token_str, NULL, K_TYPE, lexer->line, lexer->pos, scope, parent_kind);
}
/* Structs and enums are very similar syntax-wise.
* It is possible to parse variants a bit more cleverly (e.g. make tuple variants functions and
* struct variants structs) but it'd be too clever and the signature wouldn't make too much sense without
* the enum's definition (e.g. for the type bounds)
*
* Struct/Enum format:
* "struct/enum" <ident>[<type_bounds>] "{" [<ident>,]+ "}"
* "struct/enum" <ident>[<type_bounds>] ";"
* */
static void parseStructOrEnum (lexerState *lexer, vString *scope, int parent_kind, bool is_struct)
{
int kind = is_struct ? K_STRUCT : K_ENUM;
int field_kind = is_struct ? K_FIELD : K_VARIANT;
int goal_tokens1[] = {';', '{'};
advanceToken(lexer, true);
if (lexer->cur_token != TOKEN_IDENT)
return;
addTag(lexer->token_str, NULL, kind, lexer->line, lexer->pos, scope, parent_kind);
addToScope(scope, lexer->token_str);
skipUntil(lexer, goal_tokens1, 2);
if (lexer->cur_token == '{')
{
vString *field_name = vStringNew();
while (lexer->cur_token != TOKEN_EOF)
{
int goal_tokens2[] = {'}', ','};
/* Skip attributes. Format:
* #[..] or #![..]
* */
if (lexer->cur_token == '#')
{
advanceToken(lexer, true);
if (lexer->cur_token == '!')
advanceToken(lexer, true);
if (lexer->cur_token == '[')
{
/* It's an attribute, skip it. */
skipUntil(lexer, NULL, 0);
}
else
{
/* Something's up with this field, skip to the next one */
skipUntil(lexer, goal_tokens2, 2);
continue;
}
}
if (lexer->cur_token == TOKEN_IDENT)
{
if (strcmp(lexer->token_str->buffer, "priv") == 0
|| strcmp(lexer->token_str->buffer, "pub") == 0)
{
advanceToken(lexer, true);
if (lexer->cur_token != TOKEN_IDENT)
{
/* Something's up with this field, skip to the next one */
skipUntil(lexer, goal_tokens2, 2);
continue;
}
}
vStringClear(field_name);
vStringCat(field_name, lexer->token_str);
addTag(field_name, NULL, field_kind, lexer->line, lexer->pos, scope, kind);
skipUntil(lexer, goal_tokens2, 2);
}
if (lexer->cur_token == '}')
{
advanceToken(lexer, true);
break;
}
advanceToken(lexer, true);
}
vStringDelete(field_name);
}
}
/* Skip the body of the macro. Can't use skipUntil here as
* the body of the macro may have arbitrary code which confuses it (e.g.
* bitshift operators/function return arrows) */
static void skipMacro (lexerState *lexer)
{
int level = 0;
int plus_token = 0;
int minus_token = 0;
advanceToken(lexer, true);
switch (lexer->cur_token)
{
case '(':
plus_token = '(';
minus_token = ')';
break;
case '{':
plus_token = '{';
minus_token = '}';
break;
case '[':
plus_token = '[';
minus_token = ']';
break;
default:
return;
}
while (lexer->cur_token != TOKEN_EOF)
{
if (lexer->cur_token == plus_token)
level++;
else if (lexer->cur_token == minus_token)
level--;
if (level == 0)
break;
advanceToken(lexer, true);
}
advanceToken(lexer, true);
}
/*
* Macro rules format:
* "macro_rules" "!" <ident> <macro_body>
*/
static void parseMacroRules (lexerState *lexer, vString *scope, int parent_kind)
{
advanceToken(lexer, true);
if (lexer->cur_token != '!')
return;
advanceToken(lexer, true);
if (lexer->cur_token != TOKEN_IDENT)
return;
addTag(lexer->token_str, NULL, K_MACRO, lexer->line, lexer->pos, scope, parent_kind);
skipMacro(lexer);
}
/*
* Rust is very liberal with nesting, so this function is used pretty much for any block
*/
static void parseBlock (lexerState *lexer, bool delim, int kind, vString *scope)
{
int level = 1;
if (delim)
{
if (lexer->cur_token != '{')
return;
advanceToken(lexer, true);
}
while (lexer->cur_token != TOKEN_EOF)
{
if (lexer->cur_token == TOKEN_IDENT)
{
size_t old_scope_len = vStringLength(scope);
if (strcmp(lexer->token_str->buffer, "fn") == 0)
{
parseFn(lexer, scope, kind);
}
else if(strcmp(lexer->token_str->buffer, "mod") == 0)
{
parseMod(lexer, scope, kind);
}
else if(strcmp(lexer->token_str->buffer, "static") == 0)
{
parseStatic(lexer, scope, kind);
}
else if(strcmp(lexer->token_str->buffer, "trait") == 0)
{
parseTrait(lexer, scope, kind);
}
else if(strcmp(lexer->token_str->buffer, "type") == 0)
{
parseType(lexer, scope, kind);
}
else if(strcmp(lexer->token_str->buffer, "impl") == 0)
{
parseImpl(lexer, scope, kind);
}
else if(strcmp(lexer->token_str->buffer, "struct") == 0)
{
parseStructOrEnum(lexer, scope, kind, true);
}
else if(strcmp(lexer->token_str->buffer, "enum") == 0)
{
parseStructOrEnum(lexer, scope, kind, false);
}
else if(strcmp(lexer->token_str->buffer, "macro_rules") == 0)
{
parseMacroRules(lexer, scope, kind);
}
else
{
advanceToken(lexer, true);
if (lexer->cur_token == '!')
{
skipMacro(lexer);
}
}
resetScope(scope, old_scope_len);
}
else if (lexer->cur_token == '{')
{
level++;
advanceToken(lexer, true);
}
else if (lexer->cur_token == '}')
{
level--;
advanceToken(lexer, true);
}
else if (lexer->cur_token == '\'')
{
/* Skip over the 'static lifetime, as it confuses the static parser above */
advanceToken(lexer, true);
if (lexer->cur_token == TOKEN_IDENT && strcmp(lexer->token_str->buffer, "static") == 0)
advanceToken(lexer, true);
}
else
{
advanceToken(lexer, true);
}
if (delim && level <= 0)
break;
}
}
static void findRustTags (void)
{
lexerState lexer;
vString* scope = vStringNew();
initLexer(&lexer);
parseBlock(&lexer, false, K_NONE, scope);
vStringDelete(scope);
deInitLexer(&lexer);
}
extern parserDefinition *RustParser (void)
{
static const char *const extensions[] = { "rs", NULL };
parserDefinition *def = parserNew ("Rust");
def->kindTable = rustKinds;
def->kindCount = ARRAY_SIZE (rustKinds);
def->extensions = extensions;
def->parser = findRustTags;
return def;
}
| 1 | 16,156 | The function is only used once, and this body is now only 1 call with the same args (e.g. basically an alias for `vStringTruncate`), it could probably be simply removed. But if you wanna keep the naming for clarity, it also sounds reasonable. | universal-ctags-ctags | c |
@@ -176,8 +176,8 @@ func (k *Kad) manage() {
k.logger.Debugf("could not remove peer from addressbook: %s", peer.String())
}
}
- k.logger.Debugf("error connecting to peer from kademlia %s: %v", bzzAddr.String(), err)
- k.logger.Warningf("connecting to peer %s: %v", bzzAddr.ShortString(), err)
+ k.logger.Debugf("could not connect to peer from kademlia %s: %v", bzzAddr.String(), err)
+ k.logger.Warningf("could not connect to peer from kademlia %s: %v", bzzAddr.ShortString(), err)
// continue to next
return false, false, nil
} | 1 | // Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package kademlia
import (
"context"
"encoding/json"
"errors"
"fmt"
"sync"
"time"
"github.com/ethersphere/bee/pkg/addressbook"
"github.com/ethersphere/bee/pkg/discovery"
"github.com/ethersphere/bee/pkg/kademlia/pslice"
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/p2p"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/topology"
ma "github.com/multiformats/go-multiaddr"
)
const (
nnLowWatermark = 2 // the number of peers in consecutive deepest bins that constitute as nearest neighbours
maxConnAttempts = 3 // when there is maxConnAttempts failed connect calls for a given peer it is considered non-connectable
)
var (
errMissingAddressBookEntry = errors.New("addressbook underlay entry not found")
errOverlayMismatch = errors.New("overlay mismatch")
timeToRetry = 60 * time.Second
shortRetry = 30 * time.Second
saturationPeers = 4
)
type binSaturationFunc func(bin uint8, peers, connected *pslice.PSlice) bool
// Options for injecting services to Kademlia.
type Options struct {
SaturationFunc binSaturationFunc
Bootnodes []ma.Multiaddr
Standalone bool
}
// Kad is the Swarm forwarding kademlia implementation.
type Kad struct {
base swarm.Address // this node's overlay address
discovery discovery.Driver // the discovery driver
addressBook addressbook.Interface // address book to get underlays
p2p p2p.Service // p2p service to connect to nodes with
saturationFunc binSaturationFunc // pluggable saturation function
connectedPeers *pslice.PSlice // a slice of peers sorted and indexed by po, indexes kept in `bins`
knownPeers *pslice.PSlice // both are po aware slice of addresses
bootnodes []ma.Multiaddr
depth uint8 // current neighborhood depth
depthMu sync.RWMutex // protect depth changes
manageC chan struct{} // trigger the manage forever loop to connect to new peers
waitNext map[string]retryInfo // sanction connections to a peer, key is overlay string and value is a retry information
waitNextMu sync.Mutex // synchronize map
peerSig []chan struct{}
peerSigMtx sync.Mutex
logger logging.Logger // logger
standalone bool
quit chan struct{} // quit channel
done chan struct{} // signal that `manage` has quit
wg sync.WaitGroup
}
type retryInfo struct {
tryAfter time.Time
failedAttempts int
}
// New returns a new Kademlia.
func New(base swarm.Address, addressbook addressbook.Interface, discovery discovery.Driver, p2p p2p.Service, logger logging.Logger, o Options) *Kad {
if o.SaturationFunc == nil {
o.SaturationFunc = binSaturated
}
k := &Kad{
base: base,
discovery: discovery,
addressBook: addressbook,
p2p: p2p,
saturationFunc: o.SaturationFunc,
connectedPeers: pslice.New(int(swarm.MaxBins)),
knownPeers: pslice.New(int(swarm.MaxBins)),
bootnodes: o.Bootnodes,
manageC: make(chan struct{}, 1),
waitNext: make(map[string]retryInfo),
logger: logger,
standalone: o.Standalone,
quit: make(chan struct{}),
done: make(chan struct{}),
wg: sync.WaitGroup{},
}
return k
}
// manage is a forever loop that manages the connection to new peers
// once they get added or once others leave.
func (k *Kad) manage() {
var (
peerToRemove swarm.Address
start time.Time
)
defer k.wg.Done()
defer close(k.done)
ctx, cancel := context.WithCancel(context.Background())
go func() {
<-k.quit
cancel()
}()
for {
select {
case <-k.quit:
return
case <-time.After(30 * time.Second):
// periodically try to connect to new peers
select {
case k.manageC <- struct{}{}:
default:
}
case <-k.manageC:
start = time.Now()
select {
case <-k.quit:
return
default:
}
if k.standalone {
continue
}
err := k.knownPeers.EachBinRev(func(peer swarm.Address, po uint8) (bool, bool, error) {
if k.connectedPeers.Exists(peer) {
return false, false, nil
}
k.waitNextMu.Lock()
if next, ok := k.waitNext[peer.String()]; ok && time.Now().Before(next.tryAfter) {
k.waitNextMu.Unlock()
return false, false, nil
}
k.waitNextMu.Unlock()
currentDepth := k.NeighborhoodDepth()
if saturated := k.saturationFunc(po, k.knownPeers, k.connectedPeers); saturated {
return false, true, nil // bin is saturated, skip to next bin
}
bzzAddr, err := k.addressBook.Get(peer)
if err != nil {
if err == addressbook.ErrNotFound {
k.logger.Debugf("failed to get address book entry for peer: %s", peer.String())
peerToRemove = peer
return false, false, errMissingAddressBookEntry
}
// either a peer is not known in the address book, in which case it
// should be removed, or that some severe I/O problem is at hand
return false, false, err
}
k.logger.Debugf("kademlia dialing to peer %s", peer.String())
err = k.connect(ctx, peer, bzzAddr.Underlay, po)
if err != nil {
if errors.Is(err, errOverlayMismatch) {
k.knownPeers.Remove(peer, po)
if err := k.addressBook.Remove(peer); err != nil {
k.logger.Debugf("could not remove peer from addressbook: %s", peer.String())
}
}
k.logger.Debugf("error connecting to peer from kademlia %s: %v", bzzAddr.String(), err)
k.logger.Warningf("connecting to peer %s: %v", bzzAddr.ShortString(), err)
// continue to next
return false, false, nil
}
k.waitNextMu.Lock()
k.waitNext[peer.String()] = retryInfo{tryAfter: time.Now().Add(shortRetry)}
k.waitNextMu.Unlock()
k.connectedPeers.Add(peer, po)
k.depthMu.Lock()
k.depth = recalcDepth(k.connectedPeers)
k.depthMu.Unlock()
k.logger.Debugf("connected to peer: %s old depth: %d new depth: %d", peer, currentDepth, k.NeighborhoodDepth())
k.notifyPeerSig()
select {
case <-k.quit:
return true, false, nil
default:
}
// the bin could be saturated or not, so a decision cannot
// be made before checking the next peer, so we iterate to next
return false, false, nil
})
k.logger.Tracef("kademlia iterator took %s to finish", time.Since(start))
if err != nil {
if errors.Is(err, errMissingAddressBookEntry) {
po := swarm.Proximity(k.base.Bytes(), peerToRemove.Bytes())
k.knownPeers.Remove(peerToRemove, po)
} else {
k.logger.Errorf("kademlia manage loop iterator: %v", err)
}
}
if k.connectedPeers.Length() == 0 {
k.connectBootnodes(ctx)
}
}
}
}
func (k *Kad) Start(ctx context.Context) error {
k.wg.Add(1)
go k.manage()
addresses, err := k.addressBook.Overlays()
if err != nil {
return fmt.Errorf("addressbook overlays: %w", err)
}
return k.AddPeers(ctx, addresses...)
}
func (k *Kad) connectBootnodes(ctx context.Context) {
var count int
for _, addr := range k.bootnodes {
if count >= 3 {
return
}
if _, err := p2p.Discover(ctx, addr, func(addr ma.Multiaddr) (stop bool, err error) {
k.logger.Tracef("connecting to bootnode %s", addr)
bzzAddress, err := k.p2p.Connect(ctx, addr)
if err != nil {
if !errors.Is(err, p2p.ErrAlreadyConnected) {
k.logger.Debugf("connect fail %s: %v", addr, err)
k.logger.Warningf("connect to bootnode %s", addr)
return false, err
}
return false, nil
}
if err := k.connected(ctx, bzzAddress.Overlay); err != nil {
return false, err
}
k.logger.Tracef("connected to bootnode %s", addr)
count++
// connect to max 3 bootnodes
return count >= 3, nil
}); err != nil {
k.logger.Debugf("discover fail %s: %v", addr, err)
k.logger.Warningf("discover to bootnode %s", addr)
return
}
}
}
// binSaturated indicates whether a certain bin is saturated or not.
// when a bin is not saturated it means we would like to proactively
// initiate connections to other peers in the bin.
func binSaturated(bin uint8, peers, connected *pslice.PSlice) bool {
potentialDepth := recalcDepth(peers)
// short circuit for bins which are >= depth
if bin >= potentialDepth {
return false
}
// lets assume for now that the minimum number of peers in a bin
// would be 2, under which we would always want to connect to new peers
// obviously this should be replaced with a better optimization
// the iterator is used here since when we check if a bin is saturated,
// the plain number of size of bin might not suffice (for example for squared
// gaps measurement)
size := 0
_ = connected.EachBin(func(_ swarm.Address, po uint8) (bool, bool, error) {
if po == bin {
size++
}
return false, false, nil
})
return size >= saturationPeers
}
// recalcDepth calculates and returns the kademlia depth.
func recalcDepth(peers *pslice.PSlice) uint8 {
// handle edge case separately
if peers.Length() <= nnLowWatermark {
return 0
}
var (
peersCtr = uint(0)
candidate = uint8(0)
shallowestEmpty, noEmptyBins = peers.ShallowestEmpty()
)
_ = peers.EachBin(func(_ swarm.Address, po uint8) (bool, bool, error) {
peersCtr++
if peersCtr >= nnLowWatermark {
candidate = po
return true, false, nil
}
return false, false, nil
})
if noEmptyBins || shallowestEmpty > candidate {
return candidate
}
return shallowestEmpty
}
// connect connects to a peer and gossips its address to our connected peers,
// as well as sends the peers we are connected to to the newly connected peer
func (k *Kad) connect(ctx context.Context, peer swarm.Address, ma ma.Multiaddr, po uint8) error {
ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
i, err := k.p2p.Connect(ctx, ma)
if err != nil {
if errors.Is(err, p2p.ErrAlreadyConnected) {
return nil
}
k.logger.Debugf("error connecting to peer %s: %v", peer, err)
retryTime := time.Now().Add(timeToRetry)
var e *p2p.ConnectionBackoffError
k.waitNextMu.Lock()
failedAttempts := 0
if errors.As(err, &e) {
retryTime = e.TryAfter()
} else {
info, ok := k.waitNext[peer.String()]
if ok {
failedAttempts = info.failedAttempts
}
failedAttempts++
}
if failedAttempts > maxConnAttempts {
delete(k.waitNext, peer.String())
if err := k.addressBook.Remove(peer); err != nil {
k.logger.Debugf("could not remove peer from addressbook: %s", peer.String())
}
k.logger.Debugf("kademlia pruned peer from address book %s", peer.String())
} else {
k.waitNext[peer.String()] = retryInfo{tryAfter: retryTime, failedAttempts: failedAttempts}
}
k.waitNextMu.Unlock()
return err
}
if !i.Overlay.Equal(peer) {
_ = k.p2p.Disconnect(peer)
_ = k.p2p.Disconnect(i.Overlay)
return errOverlayMismatch
}
return k.announce(ctx, peer)
}
// announce a newly connected peer to our connected peers, but also
// notify the peer about our already connected peers
func (k *Kad) announce(ctx context.Context, peer swarm.Address) error {
addrs := []swarm.Address{}
_ = k.connectedPeers.EachBinRev(func(connectedPeer swarm.Address, _ uint8) (bool, bool, error) {
if connectedPeer.Equal(peer) {
return false, false, nil
}
addrs = append(addrs, connectedPeer)
// this needs to be in a separate goroutine since a peer we are gossipping to might
// be slow and since this function is called with the same context from kademlia connect
// function, this might result in the unfortunate situation where we end up on
// `err := k.discovery.BroadcastPeers(ctx, peer, addrs...)` with an already expired context
// indicating falsely, that the peer connection has timed out.
k.wg.Add(1)
go func(connectedPeer swarm.Address) {
defer k.wg.Done()
if err := k.discovery.BroadcastPeers(context.Background(), connectedPeer, peer); err != nil {
k.logger.Debugf("error gossiping peer %s to peer %s: %v", peer, connectedPeer, err)
}
}(connectedPeer)
return false, false, nil
})
if len(addrs) == 0 {
return nil
}
err := k.discovery.BroadcastPeers(ctx, peer, addrs...)
if err != nil {
_ = k.p2p.Disconnect(peer)
}
return err
}
// AddPeers adds peers to the knownPeers list.
// This does not guarantee that a connection will immediately
// be made to the peer.
func (k *Kad) AddPeers(ctx context.Context, addrs ...swarm.Address) error {
for _, addr := range addrs {
if k.knownPeers.Exists(addr) {
continue
}
po := swarm.Proximity(k.base.Bytes(), addr.Bytes())
k.knownPeers.Add(addr, po)
}
select {
case k.manageC <- struct{}{}:
default:
}
return nil
}
// Connected is called when a peer has dialed in.
func (k *Kad) Connected(ctx context.Context, peer p2p.Peer) error {
if err := k.connected(ctx, peer.Address); err != nil {
return err
}
select {
case k.manageC <- struct{}{}:
default:
}
return nil
}
func (k *Kad) connected(ctx context.Context, addr swarm.Address) error {
if err := k.announce(ctx, addr); err != nil {
return err
}
po := swarm.Proximity(k.base.Bytes(), addr.Bytes())
k.knownPeers.Add(addr, po)
k.connectedPeers.Add(addr, po)
k.waitNextMu.Lock()
delete(k.waitNext, addr.String())
k.waitNextMu.Unlock()
k.depthMu.Lock()
k.depth = recalcDepth(k.connectedPeers)
k.depthMu.Unlock()
k.notifyPeerSig()
return nil
}
// Disconnected is called when peer disconnects.
func (k *Kad) Disconnected(peer p2p.Peer) {
po := swarm.Proximity(k.base.Bytes(), peer.Address.Bytes())
k.connectedPeers.Remove(peer.Address, po)
k.waitNextMu.Lock()
k.waitNext[peer.Address.String()] = retryInfo{tryAfter: time.Now().Add(timeToRetry), failedAttempts: 0}
k.waitNextMu.Unlock()
k.depthMu.Lock()
k.depth = recalcDepth(k.connectedPeers)
k.depthMu.Unlock()
select {
case k.manageC <- struct{}{}:
default:
}
k.notifyPeerSig()
}
func (k *Kad) notifyPeerSig() {
k.peerSigMtx.Lock()
defer k.peerSigMtx.Unlock()
for _, c := range k.peerSig {
// Every peerSig channel has a buffer capacity of 1,
// so every receiver will get the signal even if the
// select statement has the default case to avoid blocking.
select {
case c <- struct{}{}:
default:
}
}
}
func isIn(a swarm.Address, addresses []p2p.Peer) bool {
for _, v := range addresses {
if v.Address.Equal(a) {
return true
}
}
return false
}
// ClosestPeer returns the closest peer to a given address.
func (k *Kad) ClosestPeer(addr swarm.Address, skipPeers ...swarm.Address) (swarm.Address, error) {
if k.connectedPeers.Length() == 0 {
return swarm.Address{}, topology.ErrNotFound
}
peers := k.p2p.Peers()
var peersToDisconnect []swarm.Address
closest := k.base
err := k.connectedPeers.EachBinRev(func(peer swarm.Address, po uint8) (bool, bool, error) {
for _, a := range skipPeers {
if a.Equal(peer) {
return false, false, nil
}
}
// kludge: hotfix for topology peer inconsistencies bug
if !isIn(peer, peers) {
a := swarm.NewAddress(peer.Bytes())
peersToDisconnect = append(peersToDisconnect, a)
return false, false, nil
}
dcmp, err := swarm.DistanceCmp(addr.Bytes(), closest.Bytes(), peer.Bytes())
if err != nil {
return false, false, err
}
switch dcmp {
case 0:
// do nothing
case -1:
// current peer is closer
closest = peer
case 1:
// closest is already closer to chunk
// do nothing
}
return false, false, nil
})
if err != nil {
return swarm.Address{}, err
}
for _, v := range peersToDisconnect {
k.Disconnected(p2p.Peer{Address: v})
}
// check if self
if closest.Equal(k.base) {
return swarm.Address{}, topology.ErrWantSelf
}
return closest, nil
}
// EachPeer iterates from closest bin to farthest
func (k *Kad) EachPeer(f topology.EachPeerFunc) error {
return k.connectedPeers.EachBin(f)
}
// EachPeerRev iterates from farthest bin to closest
func (k *Kad) EachPeerRev(f topology.EachPeerFunc) error {
return k.connectedPeers.EachBinRev(f)
}
// SubscribePeersChange returns the channel that signals when the connected peers
// set changes. Returned function is safe to be called multiple times.
func (k *Kad) SubscribePeersChange() (c <-chan struct{}, unsubscribe func()) {
channel := make(chan struct{}, 1)
var closeOnce sync.Once
k.peerSigMtx.Lock()
defer k.peerSigMtx.Unlock()
k.peerSig = append(k.peerSig, channel)
unsubscribe = func() {
k.peerSigMtx.Lock()
defer k.peerSigMtx.Unlock()
for i, c := range k.peerSig {
if c == channel {
k.peerSig = append(k.peerSig[:i], k.peerSig[i+1:]...)
break
}
}
closeOnce.Do(func() { close(channel) })
}
return channel, unsubscribe
}
// NeighborhoodDepth returns the current Kademlia depth.
func (k *Kad) NeighborhoodDepth() uint8 {
k.depthMu.RLock()
defer k.depthMu.RUnlock()
return k.neighborhoodDepth()
}
func (k *Kad) neighborhoodDepth() uint8 {
return k.depth
}
// MarshalJSON returns a JSON representation of Kademlia.
func (k *Kad) MarshalJSON() ([]byte, error) {
return k.marshal(false)
}
func (k *Kad) marshal(indent bool) ([]byte, error) {
type binInfo struct {
BinPopulation uint `json:"population"`
BinConnected uint `json:"connected"`
DisconnectedPeers []string `json:"disconnectedPeers"`
ConnectedPeers []string `json:"connectedPeers"`
}
type kadBins struct {
Bin0 binInfo `json:"bin_0"`
Bin1 binInfo `json:"bin_1"`
Bin2 binInfo `json:"bin_2"`
Bin3 binInfo `json:"bin_3"`
Bin4 binInfo `json:"bin_4"`
Bin5 binInfo `json:"bin_5"`
Bin6 binInfo `json:"bin_6"`
Bin7 binInfo `json:"bin_7"`
Bin8 binInfo `json:"bin_8"`
Bin9 binInfo `json:"bin_9"`
Bin10 binInfo `json:"bin_10"`
Bin11 binInfo `json:"bin_11"`
Bin12 binInfo `json:"bin_12"`
Bin13 binInfo `json:"bin_13"`
Bin14 binInfo `json:"bin_14"`
Bin15 binInfo `json:"bin_15"`
}
type kadParams struct {
Base string `json:"baseAddr"` // base address string
Population int `json:"population"` // known
Connected int `json:"connected"` // connected count
Timestamp time.Time `json:"timestamp"` // now
NNLowWatermark int `json:"nnLowWatermark"` // low watermark for depth calculation
Depth uint8 `json:"depth"` // current depth
Bins kadBins `json:"bins"` // individual bin info
}
var infos []binInfo
for i := int(swarm.MaxPO); i >= 0; i-- {
infos = append(infos, binInfo{})
}
_ = k.connectedPeers.EachBin(func(addr swarm.Address, po uint8) (bool, bool, error) {
infos[po].BinConnected++
infos[po].ConnectedPeers = append(infos[po].ConnectedPeers, addr.String())
return false, false, nil
})
// output (k.knownPeers ¬ k.connectedPeers) here to not repeat the peers we already have in the connected peers list
_ = k.knownPeers.EachBin(func(addr swarm.Address, po uint8) (bool, bool, error) {
infos[po].BinPopulation++
for _, v := range infos[po].ConnectedPeers {
// peer already connected, don't show in the known peers list
if v == addr.String() {
return false, false, nil
}
}
infos[po].DisconnectedPeers = append(infos[po].DisconnectedPeers, addr.String())
return false, false, nil
})
j := &kadParams{
Base: k.base.String(),
Population: k.knownPeers.Length(),
Connected: k.connectedPeers.Length(),
Timestamp: time.Now(),
NNLowWatermark: nnLowWatermark,
Depth: k.NeighborhoodDepth(),
Bins: kadBins{
Bin0: infos[0],
Bin1: infos[1],
Bin2: infos[2],
Bin3: infos[3],
Bin4: infos[4],
Bin5: infos[5],
Bin6: infos[6],
Bin7: infos[7],
Bin8: infos[8],
Bin9: infos[9],
Bin10: infos[10],
Bin11: infos[11],
Bin12: infos[12],
Bin13: infos[13],
Bin14: infos[14],
Bin15: infos[15],
},
}
if indent {
return json.MarshalIndent(j, "", " ")
}
return json.Marshal(j)
}
// String returns a string represenstation of Kademlia.
func (k *Kad) String() string {
b, err := k.marshal(true)
if err != nil {
k.logger.Errorf("error marshaling kademlia into json: %v", err)
return ""
}
return string(b)
}
// Close shuts down kademlia.
func (k *Kad) Close() error {
k.logger.Info("kademlia shutting down")
close(k.quit)
cc := make(chan struct{})
go func() {
defer close(cc)
k.wg.Wait()
}()
select {
case <-cc:
case <-time.After(10 * time.Second):
k.logger.Warning("kademlia shutting down with announce goroutines")
}
select {
case <-k.done:
case <-time.After(5 * time.Second):
k.logger.Warning("kademlia manage loop did not shut down properly")
}
return nil
}
| 1 | 13,157 | please change to `could not connect to peer: %s`. it is not relevant to say "from kademlia" in a warning | ethersphere-bee | go |
@@ -103,8 +103,13 @@ StatusOr<DataSet> SubmitJobExecutor::buildResult(meta::cpp2::AdminJobOp jobOp,
}
Value SubmitJobExecutor::convertJobTimestampToDateTime(int64_t timestamp) {
- return timestamp > 0 ? Value(time::TimeConversion::unixSecondsToDateTime(timestamp))
- : Value::kEmpty;
+ if (timestamp <= 0) {
+ return Value::kEmpty;
+ }
+
+ DateTime date = time::TimeConversion::unixSecondsToDateTime(timestamp);
+
+ return time::TimeUtils::rmDateTimeStrMs(date.toString());
}
nebula::DataSet SubmitJobExecutor::buildShowResultData( | 1 | /* Copyright (c) 2020 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License.
*/
#include "graph/executor/admin/SubmitJobExecutor.h"
#include <thrift/lib/cpp/util/EnumUtils.h>
#include "common/time/ScopedTimer.h"
#include "common/time/TimeUtils.h"
#include "graph/context/QueryContext.h"
#include "graph/planner/plan/Admin.h"
namespace nebula {
namespace graph {
folly::Future<Status> SubmitJobExecutor::execute() {
SCOPED_TIMER(&execTime_);
auto *sjNode = asNode<SubmitJob>(node());
auto jobOp = sjNode->jobOp();
auto cmd = sjNode->cmd();
auto params = sjNode->params();
return qctx()
->getMetaClient()
->submitJob(jobOp, cmd, params)
.via(runner())
.thenValue([jobOp, this](StatusOr<meta::cpp2::AdminJobResult> &&resp) {
SCOPED_TIMER(&execTime_);
if (!resp.ok()) {
LOG(ERROR) << resp.status().toString();
return std::move(resp).status();
}
auto status = buildResult(jobOp, std::move(resp).value());
NG_RETURN_IF_ERROR(status);
return finish(std::move(status).value());
});
}
StatusOr<DataSet> SubmitJobExecutor::buildResult(meta::cpp2::AdminJobOp jobOp,
meta::cpp2::AdminJobResult &&resp) {
switch (jobOp) {
case meta::cpp2::AdminJobOp::ADD: {
nebula::DataSet v({"New Job Id"});
DCHECK(resp.job_id_ref().has_value());
if (!resp.job_id_ref().has_value()) {
return Status::Error("Response unexpected.");
}
v.emplace_back(nebula::Row({*resp.job_id_ref()}));
return v;
}
case meta::cpp2::AdminJobOp::RECOVER: {
nebula::DataSet v({"Recovered job num"});
DCHECK(resp.recovered_job_num_ref().has_value());
if (!resp.recovered_job_num_ref().has_value()) {
return Status::Error("Response unexpected.");
}
v.emplace_back(nebula::Row({*resp.recovered_job_num_ref()}));
return v;
}
case meta::cpp2::AdminJobOp::SHOW: {
DCHECK(resp.job_desc_ref().has_value());
if (!resp.job_desc_ref().has_value()) {
return Status::Error("Response unexpected.");
}
DCHECK(resp.task_desc_ref().has_value());
if (!resp.task_desc_ref().has_value()) {
return Status::Error("Response unexpected");
}
auto &jobDesc = *resp.job_desc_ref();
return buildShowResultData(jobDesc.front(), *resp.get_task_desc());
}
case meta::cpp2::AdminJobOp::SHOW_All: {
nebula::DataSet v({"Job Id", "Command", "Status", "Start Time", "Stop Time"});
DCHECK(resp.job_desc_ref().has_value());
if (!resp.job_desc_ref().has_value()) {
return Status::Error("Response unexpected");
}
const auto &jobsDesc = *resp.job_desc_ref();
for (const auto &jobDesc : jobsDesc) {
v.emplace_back(nebula::Row({
jobDesc.get_id(),
apache::thrift::util::enumNameSafe(jobDesc.get_cmd()),
apache::thrift::util::enumNameSafe(jobDesc.get_status()),
convertJobTimestampToDateTime(jobDesc.get_start_time()),
convertJobTimestampToDateTime(jobDesc.get_stop_time()),
}));
}
return v;
}
case meta::cpp2::AdminJobOp::STOP: {
nebula::DataSet v({"Result"});
v.emplace_back(nebula::Row({"Job stopped"}));
return v;
}
// no default so the compiler will warning when lack
}
DLOG(FATAL) << "Unknown job operation " << static_cast<int>(jobOp);
return Status::Error("Unknown job job operation %d.", static_cast<int>(jobOp));
}
Value SubmitJobExecutor::convertJobTimestampToDateTime(int64_t timestamp) {
return timestamp > 0 ? Value(time::TimeConversion::unixSecondsToDateTime(timestamp))
: Value::kEmpty;
}
nebula::DataSet SubmitJobExecutor::buildShowResultData(
const nebula::meta::cpp2::JobDesc &jd, const std::vector<nebula::meta::cpp2::TaskDesc> &td) {
if (jd.get_cmd() == meta::cpp2::AdminCmd::DATA_BALANCE ||
jd.get_cmd() == meta::cpp2::AdminCmd::ZONE_BALANCE) {
nebula::DataSet v(
{"Job Id(spaceId:partId)", "Command(src->dst)", "Status", "Start Time", "Stop Time"});
const auto ¶s = jd.get_paras();
size_t index = std::stoul(paras.back());
uint32_t total = paras.size() - index - 1, succeeded = 0, failed = 0, inProgress = 0,
invalid = 0;
v.emplace_back(Row({jd.get_id(),
apache::thrift::util::enumNameSafe(jd.get_cmd()),
apache::thrift::util::enumNameSafe(jd.get_status()),
convertJobTimestampToDateTime(jd.get_start_time()).toString(),
convertJobTimestampToDateTime(jd.get_stop_time()).toString()}));
for (size_t i = index; i < paras.size() - 1; i++) {
meta::cpp2::BalanceTask tsk;
apache::thrift::CompactSerializer::deserialize(paras[i], tsk);
switch (tsk.get_result()) {
case meta::cpp2::TaskResult::FAILED:
++failed;
break;
case meta::cpp2::TaskResult::IN_PROGRESS:
++inProgress;
break;
case meta::cpp2::TaskResult::INVALID:
++invalid;
break;
case meta::cpp2::TaskResult::SUCCEEDED:
++succeeded;
break;
}
v.emplace_back(Row({std::move(tsk).get_id(),
std::move(tsk).get_command(),
apache::thrift::util::enumNameSafe(tsk.get_result()),
convertJobTimestampToDateTime(std::move(tsk).get_start_time()),
convertJobTimestampToDateTime(std::move(tsk).get_stop_time())}));
}
v.emplace_back(Row({folly::sformat("Total:{}", total),
folly::sformat("Succeeded:{}", succeeded),
folly::sformat("Failed:{}", failed),
folly::sformat("In Progress:{}", inProgress),
folly::sformat("Invalid:{}", invalid)}));
return v;
} else {
nebula::DataSet v({"Job Id(TaskId)", "Command(Dest)", "Status", "Start Time", "Stop Time"});
v.emplace_back(nebula::Row({
jd.get_id(),
apache::thrift::util::enumNameSafe(jd.get_cmd()),
apache::thrift::util::enumNameSafe(jd.get_status()),
convertJobTimestampToDateTime(jd.get_start_time()),
convertJobTimestampToDateTime(jd.get_stop_time()),
}));
// tasks desc
for (const auto &taskDesc : td) {
v.emplace_back(nebula::Row({
taskDesc.get_task_id(),
taskDesc.get_host().host,
apache::thrift::util::enumNameSafe(taskDesc.get_status()),
convertJobTimestampToDateTime(taskDesc.get_start_time()),
convertJobTimestampToDateTime(taskDesc.get_stop_time()),
}));
}
return v;
}
}
} // namespace graph
} // namespace nebula
| 1 | 33,316 | Why need to remove this? Datetime has microseconds in fact. | vesoft-inc-nebula | cpp |
@@ -105,7 +105,12 @@ func PrivateKeyBytes(key crypto.PrivateKey) []byte {
case *rsa.PrivateKey:
keyBytes = x509.MarshalPKCS1PrivateKey(key)
case *ecdsa.PrivateKey:
- keyBytes, _ = x509.MarshalECPrivateKey(key)
+ var err error
+ var t *testing.T
+ keyBytes, err = x509.MarshalECPrivateKey(key)
+ if err != nil {
+ t.Error(err)
+ }
}
return keyBytes
} | 1 | package https
import (
"bytes"
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"os"
"runtime"
"testing"
)
func TestSaveAndLoadRSAPrivateKey(t *testing.T) {
keyFile := "test.key"
defer os.Remove(keyFile)
privateKey, err := rsa.GenerateKey(rand.Reader, 128) // make tests faster; small key size OK for testing
if err != nil {
t.Fatal(err)
}
// test save
err = savePrivateKey(privateKey, keyFile)
if err != nil {
t.Fatal("error saving private key:", err)
}
// it doesn't make sense to test file permission on windows
if runtime.GOOS != "windows" {
// get info of the key file
info, err := os.Stat(keyFile)
if err != nil {
t.Fatal("error stating private key:", err)
}
// verify permission of key file is correct
if info.Mode().Perm() != 0600 {
t.Error("Expected key file to have permission 0600, but it wasn't")
}
}
// test load
loadedKey, err := loadPrivateKey(keyFile)
if err != nil {
t.Error("error loading private key:", err)
}
// verify loaded key is correct
if !PrivateKeysSame(privateKey, loadedKey) {
t.Error("Expected key bytes to be the same, but they weren't")
}
}
func TestSaveAndLoadECCPrivateKey(t *testing.T) {
keyFile := "test.key"
defer os.Remove(keyFile)
privateKey, err := ecdsa.GenerateKey(elliptic.P384(), rand.Reader)
if err != nil {
t.Fatal(err)
}
// test save
err = savePrivateKey(privateKey, keyFile)
if err != nil {
t.Fatal("error saving private key:", err)
}
// it doesn't make sense to test file permission on windows
if runtime.GOOS != "windows" {
// get info of the key file
info, err := os.Stat(keyFile)
if err != nil {
t.Fatal("error stating private key:", err)
}
// verify permission of key file is correct
if info.Mode().Perm() != 0600 {
t.Error("Expected key file to have permission 0600, but it wasn't")
}
}
// test load
loadedKey, err := loadPrivateKey(keyFile)
if err != nil {
t.Error("error loading private key:", err)
}
// verify loaded key is correct
if !PrivateKeysSame(privateKey, loadedKey) {
t.Error("Expected key bytes to be the same, but they weren't")
}
}
// PrivateKeysSame compares the bytes of a and b and returns true if they are the same.
func PrivateKeysSame(a, b crypto.PrivateKey) bool {
return bytes.Equal(PrivateKeyBytes(a), PrivateKeyBytes(b))
}
// PrivateKeyBytes returns the bytes of DER-encoded key.
func PrivateKeyBytes(key crypto.PrivateKey) []byte {
var keyBytes []byte
switch key := key.(type) {
case *rsa.PrivateKey:
keyBytes = x509.MarshalPKCS1PrivateKey(key)
case *ecdsa.PrivateKey:
keyBytes, _ = x509.MarshalECPrivateKey(key)
}
return keyBytes
}
| 1 | 8,123 | What's up with this, isn't it nil? Why not use the real T value from the test function? @elcore @wmark | caddyserver-caddy | go |
@@ -34,7 +34,9 @@ class TCallableObject extends TObject
return false;
}
- public function getAssertionString(bool $exact = false): string
+ public function getAssertionString()
+
+ /** @psalm-mutation-free */: string
{
return 'object';
} | 1 | <?php
namespace Psalm\Type\Atomic;
/**
* Denotes an object that is also `callable` (i.e. it has `__invoke` defined).
*/
class TCallableObject extends TObject
{
public function __toString(): string
{
return 'callable-object';
}
public function getKey(bool $include_extra = true): string
{
return 'callable-object';
}
/**
* @param array<lowercase-string, string> $aliased_classes
*/
public function toPhpString(
?string $namespace,
array $aliased_classes,
?string $this_class,
int $analysis_php_version_id
): ?string {
return $analysis_php_version_id >= 7_02_00 ? 'object' : null;
}
public function canBeFullyExpressedInPhp(int $analysis_php_version_id): bool
{
return false;
}
public function getAssertionString(bool $exact = false): string
{
return 'object';
}
}
| 1 | 12,741 | this feels weird | vimeo-psalm | php |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.