message
stringlengths 6
474
| diff
stringlengths 8
5.22k
|
---|---|
Test aborting external entity value parser | @@ -3771,6 +3771,61 @@ START_TEST(test_external_entity_values)
}
END_TEST
+static int XMLCALL
+external_entity_value_aborter(XML_Parser parser,
+ const XML_Char *context,
+ const XML_Char *UNUSED_P(base),
+ const XML_Char *systemId,
+ const XML_Char *UNUSED_P(publicId))
+{
+ const char *text1 =
+ "<!ELEMENT doc EMPTY>\n"
+ "<!ENTITY % e1 SYSTEM '004-2.ent'>\n"
+ "<!ENTITY % e2 '%e1;'>\n"
+ "%e1;\n";
+ const char *text2 =
+ "<?xml version='1.0' encoding='utf-8'?>";
+ XML_Parser ext_parser;
+
+ if (systemId == NULL)
+ return XML_STATUS_OK;
+ ext_parser = XML_ExternalEntityParserCreate(parser, context, NULL);
+ if (ext_parser == NULL)
+ fail("Could not create external entity parser");
+ if (!strcmp(systemId, "004-1.ent")) {
+ if (_XML_Parse_SINGLE_BYTES(ext_parser, text1, strlen(text1),
+ XML_TRUE) == XML_STATUS_ERROR)
+ xml_failure(ext_parser);
+ }
+ if (!strcmp(systemId, "004-2.ent")) {
+ XML_SetXmlDeclHandler(ext_parser, entity_suspending_xdecl_handler);
+ XML_SetUserData(ext_parser, ext_parser);
+ if (_XML_Parse_SINGLE_BYTES(ext_parser, text2, strlen(text2),
+ XML_TRUE) != XML_STATUS_ERROR)
+ fail("Aborted parse not faulted");
+ if (XML_GetErrorCode(ext_parser) != XML_ERROR_ABORTED)
+ xml_failure(ext_parser);
+ }
+ return XML_STATUS_OK;
+}
+
+START_TEST(test_ext_entity_value_abort)
+{
+ const char *text =
+ "<!DOCTYPE doc SYSTEM '004-1.ent'>\n"
+ "<doc></doc>\n";
+
+ XML_SetParamEntityParsing(parser, XML_PARAM_ENTITY_PARSING_ALWAYS);
+ XML_SetExternalEntityRefHandler(parser,
+ external_entity_value_aborter);
+ resumable = XML_FALSE;
+ if (_XML_Parse_SINGLE_BYTES(parser, text, strlen(text),
+ XML_TRUE) == XML_STATUS_ERROR)
+ xml_failure(parser);
+}
+END_TEST
+
+
/*
* Namespaces tests.
*/
@@ -6098,6 +6153,7 @@ make_suite(void)
tcase_add_test(tc_basic, test_ignore_section);
tcase_add_test(tc_basic, test_bad_ignore_section);
tcase_add_test(tc_basic, test_external_entity_values);
+ tcase_add_test(tc_basic, test_ext_entity_value_abort);
suite_add_tcase(s, tc_namespace);
tcase_add_checked_fixture(tc_namespace,
|
cloud: avoid calling directly "reconnect" function by the cloud manager
Cloud reconnect closes a connection that triggers a response handler of requests.
The response handler removes delayed callbacks, and it can remove
currently used callbacks. | #define PING_DELAY_ON_TIMEOUT (PING_DELAY / 5)
static void cloud_start_process(oc_cloud_context_t *ctx);
+static oc_event_callback_retval_t reconnect(void *data);
static oc_event_callback_retval_t cloud_register(void *data);
static oc_event_callback_retval_t cloud_login(void *data);
static oc_event_callback_retval_t refresh_token(void *data);
@@ -73,6 +74,7 @@ void
cloud_manager_stop(oc_cloud_context_t *ctx)
{
OC_DBG("[CM] cloud_manager_stop\n");
+ oc_remove_delayed_callback(ctx, reconnect);
oc_remove_delayed_callback(ctx, cloud_register);
oc_remove_delayed_callback(ctx, cloud_login);
oc_remove_delayed_callback(ctx, send_ping);
@@ -88,11 +90,13 @@ reset_delayed_callback(void *cb_data, oc_trigger_t callback, uint16_t seconds)
oc_set_delayed_callback(cb_data, callback, seconds);
}
-static void
-reconnect(oc_cloud_context_t *ctx)
+static oc_event_callback_retval_t
+reconnect(void *data)
{
+ oc_cloud_context_t *ctx = (oc_cloud_context_t *)data;
reset_delayed_callback(ctx, callback_handler, 0);
cloud_reconnect(ctx);
+ return OC_EVENT_DONE;
}
static bool
@@ -432,7 +436,7 @@ cloud_login(void *data)
message_timeout[ctx->retry_count]);
ctx->retry_count++;
} else {
- reconnect(ctx);
+ reset_delayed_callback(ctx, reconnect, 0);
}
}
@@ -574,7 +578,7 @@ refresh_token(void *data)
ctx->retry_refresh_token_count++;
} else {
- reconnect(ctx);
+ reset_delayed_callback(ctx, reconnect, 0);
}
return OC_EVENT_DONE;
@@ -620,7 +624,7 @@ send_ping(void *data)
}
ctx->retry_count++;
} else {
- reconnect(ctx);
+ reset_delayed_callback(ctx, reconnect, 0);
}
return OC_EVENT_DONE;
|
`http2client.c`: a 0-byte data frame is ok in message_body_forbidden cases.
This makes the check introduced in h2o/h2o#2971 slightly less strict. | @@ -470,7 +470,7 @@ static int handle_data_frame(struct st_h2o_http2client_conn_t *conn, h2o_http2_f
* All other responses do include a message body, although the body might
* be of zero length.
*/
- if (stream->input.message_body_forbidden) {
+ if (stream->input.message_body_forbidden && payload.length != 0) {
stream_send_error(conn, frame->stream_id, H2O_HTTP2_ERROR_PROTOCOL);
call_callback_with_error(stream, h2o_httpclient_error_protocol_violation);
close_stream(stream);
|
Remove OpenLab CI from README as it's EOL.
Thanks OpenLab for providing ARM regression tests service for a very
long time. | **Concourse Pipeline** [](https://prod.ci.gpdb.pivotal.io/teams/main/pipelines/gpdb_master) |
**Travis Build** [](https://travis-ci.org/greenplum-db/gpdb) |
-**Zuul Regression Test On Arm** [](https://status.openlabtesting.org/builds/builds?project=greenplum-db%2Fgpdb&job_name=gpdb-installcheck-world-tests-on-arm64)
----------------------------------------------------------------------
|
refine config; start helperd | @@ -2,7 +2,7 @@ To add optional support for mounting \beegfs{} file systems, an
additional \pkgmgr{} repository must be configured. In this recipe, it is
assumed that the \beegfs{} file system is hosted by servers that are pre-existing
and are not part of the install process. Installing the client package triggers
-a build of a kernel module, so the kernel module development packages must be
+a build of a kernel module, hence the kernel module development packages must be
installed first. Once the client is configured to point
to the \beegfs{} system management server, starting the client should trigger file
system mounts.
@@ -15,9 +15,10 @@ system mounts.
[sms](*\#*) cd /etc/yum.repos.d
[sms](*\#*) wget ${beegfs_repo}
[sms](*\#*) (*\install*) kernel-devel
-[sms](*\#*) (*\install*) beegfs-client
+[sms](*\#*) (*\install*) beegfs-client beegfs-helperd beegfs-utils
-[sms](*\#*) perl -pi -e "s/^sysMgmtdHost.*$/sysMgmtdHost = ${sysmgmtd_host}/" /etc/beegfs/beegfs-client.conf
+[sms](*\#*) /opt/beegfs/sbin/beegfs-setup-client -m ${sysmgmtd_host}
+[sms](*\#*) /etc/init.d/beegfs-helperd start
[sms](*\#*) /etc/init.d/beegfs-client start
\end{lstlisting}
% ohpc_indent 0
|
test-suite: disable dynamic lib check, broken by | @@ -105,6 +105,7 @@ setup() {
}
@test "[$testname] Verify dynamic library available in ${PKG}_LIB ($LMOD_FAMILY_COMPILER/$LMOD_FAMILY_MPI)" {
+ skip "no longer checking for specific dynamic lib"
LIB=${PKG}_LIB
if [ -z ${!LIB} ];then
|
Add RV64 support to CMakeLists.txt
based on loongarch64 | @@ -12,6 +12,7 @@ option(TEGRAX1 "Set to ON if targeting an Tegra X1 based device" ${TEGRAX1})
option(PHYTIUM "Set to ON if targeting an Phytium (D2000 or FT2000/4) based device" ${PHYTIUM})
option(SD845 "Set to ON if targeting a Snapragon 845 based device" ${SD845})
option(LARCH64 "Set to ON if targeting an Loongarch64 based device" ${LARCH64})
+option(RV64 "Set to ON if targeting an RISCV RV64G based device" ${RV64})
option(PPC64LE "Set to ON if targeting an PowerPC 64 LE based device" ${PPC64LE})
option(LX2160A "Set to ON if targeting an LX2160A based device" ${LX2160A})
option(USE_CCACHE "Set to ON to use ccache if present in the system" ${USE_CCACHE})
@@ -23,6 +24,11 @@ if(LARCH64)
set(NOALIGN OFF CACHE BOOL "")
set(ARM_DYNAREC OFF CACHE BOOL "")
endif()
+if(RV64)
+ set(LD80BITS OFF CACHE BOOL "")
+ set(NOALIGN OFF CACHE BOOL "")
+ set(ARM_DYNAREC OFF CACHE BOOL "")
+endif()
if(PPC64LE)
set(LD80BITS OFF CACHE BOOL "")
set(NOALIGN OFF CACHE BOOL "")
@@ -93,6 +99,10 @@ elseif(LARCH64)
add_definitions(-DLARCH64)
add_definitions(-pipe -march=loongarch64)
set(CMAKE_ASM_FLAGS "-pipe -march=loongarch64")
+elseif(RV64)
+ add_definitions(-DRV64)
+ add_definitions(-pipe -march=rv64g)
+ set(CMAKE_ASM_FLAGS "-pipe -march=rv64g")
elseif(PPC64LE)
add_definitions(-DPPC64LE)
elseif(LX2160A)
|
OVR() fix for wren | @@ -1558,6 +1558,17 @@ static void callWrenTick(tic_mem* tic)
wrenEnsureSlots(vm, 1);
wrenSetSlotHandle(vm, 0, game_class);
wrenCall(vm, update_handle);
+
+ // call OVR() callback for backward compatibility
+ if(overline_handle)
+ {
+ OVR(tic)
+ {
+ wrenEnsureSlots(vm, 1);
+ wrenSetSlotHandle(vm, 0, game_class);
+ wrenCall(vm, overline_handle);
+ }
+ }
}
}
@@ -1586,17 +1597,6 @@ static void callWrenBorder(tic_mem* tic, s32 row, void* data)
wrenSetSlotHandle(vm, 0, game_class);
wrenSetSlotDouble(vm, 1, row);
wrenCall(vm, border_handle);
-
- // call OVR() callback for backward compatibility
- if(overline_handle)
- {
- OVR(tic)
- {
- wrenEnsureSlots(vm, 1);
- wrenSetSlotHandle(vm, 0, game_class);
- wrenCall(vm, overline_handle);
- }
- }
}
}
|
touchpad_elan: Ensure we at least have 1k of shared memory
Debugging commands may request buffers up to that size.
BRANCH=poppy
TEST=make buildall -j | @@ -574,6 +574,9 @@ allowed_command_hashes[TOUCHPAD_ELAN_DEBUG_NUM_CMD][SHA256_DIGEST_SIZE] = {
},
};
+/* Debugging commands need to allocate a <=1k buffer. */
+SHARED_MEM_CHECK_SIZE(1024);
+
int touchpad_debug(const uint8_t *param, unsigned int param_size,
uint8_t **data, unsigned int *data_size)
{
|
test(client-encryption): drop collection before each test run | @@ -33,7 +33,12 @@ describe('ClientEncryption', function() {
beforeEach(() => {
client = new MongoClient('mongodb://localhost:27017/test', { useNewUrlParser: true });
- return client.connect();
+ return client.connect().then(() =>
+ client
+ .db('client')
+ .collection('encryption')
+ .drop()
+ );
});
afterEach(() => {
|
apps/wm_test: Add string null check logic in get_auth_type
result[2] can be null when the network is not enterprise WiFi. Therefore, we add null check logic of result[2],
not to be used as an input of strcmp, which causes crash in specific board. | @@ -313,12 +313,13 @@ wifi_manager_ap_auth_type_e get_auth_type(const char *method)
int list_size = sizeof(wifi_test_auth_method)/sizeof(wifi_test_auth_method[0]);
for (; i < list_size; i++) {
if ((strcmp(method, wifi_test_auth_method[i]) == 0) || (strcmp(result[0], wifi_test_auth_method[i]) == 0)) {
+ if (result[2] != NULL) {
if (strcmp(result[2], "ent") == 0) {
- return auth_type_table[i + 2];
- } else {
- return auth_type_table[i];
+ return auth_type_table[i + 3];
}
}
+ return auth_type_table[i];
+ }
}
return WIFI_MANAGER_AUTH_UNKNOWN;
}
|
imxrt: strategically disable/enable interrupts in syscall | @@ -271,6 +271,7 @@ _start_1:
.type _syscallend, %function
_syscall_dispatch:
+ cpsid if
stmdb sp!, {r4-r8}
mrs r0, psp
@@ -278,8 +279,6 @@ _syscall_dispatch:
orr r7, r7, #1
stmdb sp!, {r1-r8}
- add r6, r0, #(18 * 4)
- push {r6}
sub sp, sp, #(18 * 4)
mov r5, #(17 * 4)
@@ -309,7 +308,9 @@ _syscall_dispatch0:
bx lr
_syscallend:
- pop {lr}
+ cpsid if
+ mrs lr, psp
+ add lr, #((18 + 8) * 4)
ldmia sp!, {r1-r8}
mov r1, r0 /* Put result to user's r0 */
@@ -323,6 +324,7 @@ _syscallend:
msr control, r0
ldmia sp!, {r0-r3, r12, lr}
+ cpsie if
ldr pc, [sp], #8
.size _syscall_dispatch, .-_syscall_dispatch
.ltorg
|
[CUDA] Launch with correct block and grid size | @@ -258,6 +258,15 @@ pocl_cuda_run(void *dptr, _cl_command_node* cmd)
}
// Launch kernel
- result = cuLaunchKernel(function, 1, 1, 1, 1, 1, 1, 0, NULL, params, NULL);
+ struct pocl_context pc = cmd->command.run.pc;
+ result = cuLaunchKernel(
+ function,
+ pc.num_groups[0],
+ pc.num_groups[1],
+ pc.num_groups[2],
+ cmd->command.run.local_x,
+ cmd->command.run.local_y,
+ cmd->command.run.local_z,
+ 0, NULL, params, NULL);
CUDA_CHECK(result, "cuLaunchKernel");
}
|
Update netdb-white.txt
52.83.192.224:13655> 151.80.36.21:13655 | 52.80.150.210:13655
52.80.193.246:13654
67.229.161.178:13655
-52.83.192.224:13655
54.39.28.77:13654
59.110.170.149:13655
83.219.150.219:16775
144.202.120.160:13655
148.251.139.197:16800
148.251.189.108:13655
+151.80.36.21:13655
172.94.63.18:9797
172.94.63.25:25000
172.105.216.53:3356
|
h2olog: make -t to accept a glob pattern like "quicly:*" | @@ -28,6 +28,7 @@ extern "C" {
#include <unistd.h>
#include <stdarg.h>
#include <sys/time.h>
+#include <fnmatch.h>
#include "h2o/memory.h"
#include "h2o/version.h"
}
@@ -44,9 +45,10 @@ Usage: h2olog -p PID
Optional arguments:
-d Print debugging information (-dd shows more)
-h Print this help and exit
- -l Print the list of available tracepoints and exit
+ -l Print the list of selected tracepoints and exit
-s RESPONSE_HEADER_NAME A response header name to show, e.g. "content-type"
- -t TRACEPOINT A tracepoint, or fully-qualified probe name, to show, e.g. "quicly:accept"
+ -t TRACEPOINT A tracepoint, or fully-qualified probe name, to show,
+ accepting a glob pattern, e.g. "quicly:accept", "h2o:*"
-r Run without dropping root privilege
-w Path to write the output (default: stdout)
@@ -224,6 +226,19 @@ static void lost_cb(void *context, uint64_t lost)
tracer->handle_lost(lost);
}
+static size_t add_matched_usdts(std::vector<h2o_tracer::usdt> &selected_usdts, const std::vector<h2o_tracer::usdt> &available_usdts,
+ const char *pattern)
+{
+ size_t added = 0;
+ for (auto usdt : available_usdts) {
+ if (fnmatch(pattern, usdt.fully_qualified_name().c_str(), 0) == 0) {
+ selected_usdts.push_back(usdt);
+ added++;
+ }
+ }
+ return added;
+}
+
int main(int argc, char **argv)
{
std::unique_ptr<h2o_tracer> tracer;
@@ -250,13 +265,11 @@ int main(int argc, char **argv)
h2o_pid = atoi(optarg);
break;
case 't': {
- auto found = std::find_if(available_usdts.cbegin(), available_usdts.cend(),
- [](const h2o_tracer::usdt &usdt) { return optarg == usdt.fully_qualified_name(); });
- if (found == available_usdts.cend()) {
+ size_t added = add_matched_usdts(selected_usdts, available_usdts, optarg);
+ if (added == 0) {
fprintf(stderr, "No such tracepoint: %s\n", optarg);
exit(EXIT_FAILURE);
}
- selected_usdts.push_back(*found);
break;
}
case 's':
@@ -272,7 +285,7 @@ int main(int argc, char **argv)
debug++;
break;
case 'l':
- for (const auto &usdt : available_usdts) {
+ for (const auto &usdt : selected_usdts.empty() ? available_usdts : selected_usdts) {
printf("%s\n", usdt.fully_qualified_name().c_str());
}
exit(EXIT_SUCCESS);
|
Move encrypted files as raw in integration tests.
The encryption key should not be changed when moving a file so no need to decrypt/encrypt. | @@ -222,7 +222,7 @@ sub forceStorageMove
}
else
{
- $oStorage->put($strDestinationPath, ${$oStorage->get($strSourcePath)});
+ $oStorage->put($strDestinationPath, ${$oStorage->get($strSourcePath, {bRaw => true})}, {bRaw => true});
$oStorage->remove($strSourcePath);
}
}
|
don't try locking disabled ramp elements because of the differences
in Maya and Houdini ramp element indexing | @@ -1214,6 +1214,12 @@ GetAttrOperation::leaf(const HAPI_ParmInfo &parmInfo)
if(exists)
{
+ // if it's not a ramp, go ahead and lock based on disable
+ // leave the ramps alone cause there are other UI issues that complicate things
+ if(!(parmInfo.isChildOfMultiParm
+ && parentParmInfo
+ && parentParmInfo->rampType != HAPI_RAMPTYPE_INVALID)) {
+
if(parmInfo.disabled) {
plug.setLocked(true);
} else {
@@ -1221,6 +1227,8 @@ GetAttrOperation::leaf(const HAPI_ParmInfo &parmInfo)
plug.setLocked(false);
}
}
+ }
+
dataHandle = parentDataHandle.child(attrObj);
// The HAPI_ParmInfo::choiceCount could change between cooks because
|
Fix gpexpand help usage
In commit the --novacuum option is removed, however the help
page of gpexpand keep the -V option, which is a short option for
novacuum. | @@ -93,7 +93,7 @@ Remaining TODO items:
_usage = """[-f hosts_file]
-gpexpand -i input_file [-B batch_size] [-V] [-t segment_tar_dir] [-S]
+gpexpand -i input_file [-B batch_size] [-t segment_tar_dir] [-S]
gpexpand [-d duration[hh][:mm[:ss]] | [-e 'YYYY-MM-DD hh:mm:ss']]
[-a] [-n parallel_processes]
|
apps/blestress: Fix log in tx_stress_14 | @@ -1389,7 +1389,7 @@ tx_stress_14_gap_event(struct ble_gap_event *event, void *arg)
if (++tx_stress_ctx->rcv_num >= MYNEWT_VAL(BLE_STRESS_REPEAT)) {
rc = ble_gap_terminate(event->notify_rx.conn_handle,
BLE_ERR_REM_USER_CONN_TERM);
- MODLOG_DFLT(INFO, "rc=%d\n");
+ MODLOG_DFLT(INFO, "rc=%d\n", rc);
assert(rc == 0);
return 0;
}
|
Fix ext loopback lna_w switch toggle, pga gain difference check | @@ -209,15 +209,15 @@ int CheckSaturationTxRx(bool extLoopback)
PUSH_GMEASUREMENT_VALUES(index, ChipRSSI_2_dBFS(rssi));
{
uint16_t rssi_prev = rssi;
- while(g_pga < 18 && g_rfe == 15 && rssi < saturationLevel)
+ while(g_pga < 25 && g_rfe == 15 && rssi < saturationLevel)
{
- if(g_pga < 18)
+ if(g_pga < 25)
++g_pga;
else
break;
Modify_SPI_Reg_bits(G_PGA_RBB, g_pga);
rssi = GetRSSI();
- if((float)rssi/rssi_prev < 1.11) // pga should give ~1dB change
+ if((float)rssi/rssi_prev < 1.05) // pga should give ~1dB change
break;
rssi_prev = rssi;
PUSH_GMEASUREMENT_VALUES(++index, ChipRSSI_2_dBFS(rssi));
@@ -793,7 +793,10 @@ uint8_t CalibrateTxSetup(bool extLoopback)
if(sel_band1_2_trf == 1)
Modify_SPI_Reg_bits(SEL_PATH_RFE, 1); //LNA_H
else if(sel_band1_2_trf == 2)
+ {
Modify_SPI_Reg_bits(SEL_PATH_RFE, 3); //LNA_W
+ Modify_SPI_Reg_bits(EN_INSHSW_W_RFE, 0); //LNA_W
+ }
else
{
#if VERBOSE
|
Undo earlier change, since it is incorrect. | @@ -13011,8 +13011,6 @@ sctp_send_str_reset_req(struct sctp_tcb *stcb,
/* now anything on those queues? */
TAILQ_FOREACH_SAFE(sp, &oldstream[i].outqueue, next, nsp) {
TAILQ_REMOVE(&oldstream[i].outqueue, sp, next);
- sp->ss_next.tqe_next = NULL;
- sp->ss_next.tqe_prev = NULL;
TAILQ_INSERT_TAIL(&stcb->asoc.strmout[i].outqueue, sp, next);
}
|
landscape: fix overeager graphapp routing
It was blank paging instead of showing a 404. | @@ -69,7 +69,7 @@ export const Content = (props) => {
<Notifications {...props} />
)}
/>
- <GraphApp {...props} />
+ <GraphApp path="/~graph" {...props} />
<Route
render={p => (
<ErrorComponent
|
unit-test: fix sig formatting | @@ -764,12 +764,9 @@ static void _sign(const _modification_t* mod)
case BTCScriptConfig_SimpleType_P2WPKH: {
const uint8_t expected_signature[64] =
"\x95\x09\x43\x09\xa2\xd2\x77\xd3\xa6\x8d\xde\xd3\x3d\x50\xa7\x47\xf2\xee\xfb\x3f"
- "\x54"
- "\x8a\x92\x45\x15\xdb\x62\xbe\x06\xa1\xae\xa4\x56\x92\x91\xe5\x2e\x6f\xea\x95\xf8"
- "\xb6"
- "\x75\x23\xb1\x9b\x35\x9a\x84\x85\xd8\xaa\x3c\xa0\x2d\xb3\x74\x70\x01\x0b\x19\x9b"
- "\x0c"
- "\xe3";
+ "\x54\x8a\x92\x45\x15\xdb\x62\xbe\x06\xa1\xae\xa4\x56\x92\x91\xe5\x2e\x6f\xea\x95"
+ "\xf8\xb6\x75\x23\xb1\x9b\x35\x9a\x84\x85\xd8\xaa\x3c\xa0\x2d\xb3\x74\x70\x01\x0b"
+ "\x19\x9b\x0c\xe3";
assert_memory_equal(next.signature, expected_signature, sizeof(next.signature));
break;
}
|
Include libluv.pc.in in release .tar.gz | @@ -19,7 +19,7 @@ sed -e "${script}" -i rockspecs/luv-${version}.rockspec
# .tar.gz
rm -rf luv-${version}
mkdir -p luv-${version}/deps
-cp -r src cmake CMakeLists.txt LICENSE.txt README.md docs.md luv-${version}/
+cp -r src cmake CMakeLists.txt LICENSE.txt README.md docs.md libluv.pc.in luv-${version}/
cp -r deps/libuv deps/lua-compat-5.3 deps/*.cmake deps/lua_one.c luv-${version}/deps/
COPYFILE_DISABLE=true tar -czvf luv-${version}.tar.gz luv-${version}
rm -rf luv-${version}
|
npm/http-api: changed relative dependency | {
"name": "@urbit/http-api",
- "version": "1.2.0",
+ "version": "1.2.1",
"license": "MIT",
"description": "Library to interact with an Urbit ship over HTTP",
"repository": {
"dependencies": {
"@babel/runtime": "^7.12.5",
"@microsoft/fetch-event-source": "^2.0.0",
- "@urbit/api": "file:../api",
+ "@urbit/api": "^1.1.0",
"browser-or-node": "^1.3.0",
"browserify-zlib": "^0.2.0",
"buffer": "^6.0.3",
"node-fetch": "^2.6.1",
"stream-browserify": "^3.0.0",
"stream-http": "^3.1.1"
+ },
+ "bundledDependencies": {
+ "@urbit/api"
}
}
|
releases/README: add a section on the hash displayed by the bb02 | @@ -48,6 +48,25 @@ contains the unsigned binary with:
./describe_signed_firmware.py firmware.vX.Y.Z.signed.bin
```
+## Verify the hash as shown by the BitBox02 at startup
+
+When installing new firmware on the BitBox02, an option can be turned on to display the firmware
+hash on the device screen when the BitBox02 is plugged in. The hash shown is a hash of the firmware
+and the firmware version: `sha256d(<version><padded firmware>)`.
+
+It can be verified with the same tool as above. For example:
+
+```sh
+$ ./describe_signed_firmware.py firmware.v9.0.0.signed.bin
+The following information assumes the provided binary was signed correctly; the signatures are not being verified.
+This is a Multi-edition firmware.
+The hash of the unsigned firmware binary is (compare with reproducible build):
+19f692a769b22abe889849d7c2987688c57489a3437e70f649baef825a6788c6
+The monotonic firmware version is: 11
+The hash of the firmware as verified/shown by the bootloader is:
+aae5fd961e552cb40ee9ef0f0060b96fb8a4f13f89168560c156010337f34b6a
+```
+
## Contribute your signature
We kindly ask you to independently build the firmware binaries we released, and verify that you get
|
set OPENSSL_ROOT_DIR=/usr/local for the fallback path in openssl detection | @@ -59,11 +59,11 @@ IF ((CMAKE_SYSTEM_NAME STREQUAL "Darwin") AND NOT (DEFINED OPENSSL_ROOT_DIR OR D
)
IF (NOT EXISTS ${OPENSSL_ROOT_DIR})
MESSAGE(STATUS "*************************************************************************\n"
- " * Setting OPENSSL_ROOT_DIR to /usr/local/opt/openssl. On macOS, OpenSSL *\n"
+ " * Setting OPENSSL_ROOT_DIR to /usr/local. On macOS, OpenSSL *\n"
" * should be installed using homebrew or OPENSSL_ROOT_DIR must be set to *\n"
" * the path that has OpenSSL installed. *\n"
" *************************************************************************")
- SET(OPENSSL_ROOT_DIR, "/usr/local/opt/openssl")
+ SET(OPENSSL_ROOT_DIR "/usr/local")
ENDIF ()
ENDIF ()
FIND_PACKAGE(OpenSSL REQUIRED)
|
terminate immediately if driver crashes | @@ -2655,6 +2655,11 @@ while(1)
memset(&ll, 0, sizeof(ll));
fromlen = sizeof(ll);
packet_len = recvfrom(fd_socket, &epb[EPB_SIZE], PCAPNG_MAXSNAPLEN, 0 ,(struct sockaddr*) &ll, &fromlen);
+ if(packet_len == 0)
+ {
+ fprintf(stderr, "\ninterface went down\n");
+ globalclose();
+ }
if(packet_len < 0)
{
perror("\nfailed to read packet");
@@ -2937,6 +2942,11 @@ while(1)
memset(&ll, 0, sizeof(ll));
fromlen = sizeof(ll);
packet_len = recvfrom(fd_socket, &epb[EPB_SIZE], PCAPNG_MAXSNAPLEN, 0 ,(struct sockaddr*) &ll, &fromlen);
+ if(packet_len == 0)
+ {
+ fprintf(stderr, "\ninterface went down\n");
+ globalclose();
+ }
if(packet_len < 0)
{
perror("\nfailed to read packet");
|
fix(barcode): type conversion warning | @@ -241,6 +241,8 @@ static signed char code128_switch_code(char from_mode, char to_mode)
return 101;
}
break;
+ default:
+ break;
}
CODE128_ASSERT(0); // Invalid mode switch
@@ -250,32 +252,32 @@ static signed char code128_switch_code(char from_mode, char to_mode)
static signed char code128a_ascii_to_code(signed char value)
{
if(value >= ' ' && value <= '_')
- return value - ' ';
+ return (signed char)(value - ' ');
else if(value >= 0 && value < ' ')
- return value + 64;
- else if(value == CODE128_FNC1)
+ return (signed char)(value + 64);
+ else if(value == (signed char)CODE128_FNC1)
return 102;
- else if(value == CODE128_FNC2)
+ else if(value == (signed char)CODE128_FNC2)
return 97;
- else if(value == CODE128_FNC3)
+ else if(value == (signed char)CODE128_FNC3)
return 96;
- else if(value == CODE128_FNC4)
+ else if(value == (signed char)CODE128_FNC4)
return 101;
else
return -1;
}
-static signed char code128b_ascii_to_code(char value)
+static signed char code128b_ascii_to_code(signed char value)
{
- if(value >= 32) // value <= 127 is implied
- return value - 32;
- else if(value == CODE128_FNC1)
+ if(value >= ' ') // value <= 127 is implied
+ return (signed char)(value - ' ');
+ else if(value == (signed char)CODE128_FNC1)
return 102;
- else if(value == CODE128_FNC2)
+ else if(value == (signed char)CODE128_FNC2)
return 97;
- else if(value == CODE128_FNC3)
+ else if(value == (signed char)CODE128_FNC3)
return 96;
- else if(value == CODE128_FNC4)
+ else if(value == (signed char)CODE128_FNC4)
return 100;
else
return -1;
|
Do not use glide | @@ -8,9 +8,7 @@ go:
- 1.12.x
before_install:
- - go get -t github.com/Masterminds/glide
- if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then go get -t github.com/codeclimate/test-reporter; fi
- - cmake .
install:
- make all polaris colaris
|
global keyset: fix memproblems | @@ -267,6 +267,7 @@ KDB * kdbOpen (Key * errorKey)
handle->modules = ksNew (0, KS_END);
if (elektraModulesInit (handle->modules, errorKey) == -1)
{
+ ksDel (handle->global);
ksDel (handle->modules);
elektraFree (handle);
ELEKTRA_SET_ERROR (94, errorKey, "elektraModulesInit returned with -1");
@@ -283,6 +284,7 @@ KDB * kdbOpen (Key * errorKey)
switch (elektraOpenBootstrap (handle, keys, errorKey))
{
case -1:
+ ksDel (handle->global);
ksDel (handle->modules);
elektraFree (handle);
ELEKTRA_SET_ERROR (40, errorKey, "could not open default backend");
@@ -784,7 +786,6 @@ int kdbGet (KDB * handle, KeySet * ks, Key * parentKey)
int errnosave = errno;
Key * initialParent = keyDup (parentKey);
- ksClear (handle->global);
ELEKTRA_LOG ("now in new kdbGet (%s)", keyName (parentKey));
@@ -797,6 +798,8 @@ int kdbGet (KDB * handle, KeySet * ks, Key * parentKey)
goto error;
}
+ ksClear (handle->global);
+
elektraGlobalGet (handle, ks, parentKey, PREGETSTORAGE, INIT);
elektraGlobalGet (handle, ks, parentKey, PREGETSTORAGE, MAXONCE);
elektraGlobalGet (handle, ks, parentKey, PREGETSTORAGE, DEINIT);
@@ -916,9 +919,12 @@ error:
elektraGlobalError (handle, ks, parentKey, POSTGETSTORAGE, MAXONCE);
elektraGlobalError (handle, ks, parentKey, POSTGETSTORAGE, DEINIT);
- ksClear (handle->global);
keySetName (parentKey, keyName (initialParent));
- if (handle) splitUpdateFileName (split, handle, parentKey);
+ if (handle)
+ {
+ splitUpdateFileName (split, handle, parentKey);
+ if (handle->global) ksClear (handle->global);
+ }
keyDel (initialParent);
keyDel (oldError);
splitDel (split);
@@ -1321,7 +1327,7 @@ error:
elektraGlobalError (handle, ks, parentKey, POSTROLLBACK, MAXONCE);
elektraGlobalError (handle, ks, parentKey, POSTROLLBACK, DEINIT);
- ksClear (handle->global);
+ if (handle->global) ksClear (handle->global);
keySetName (parentKey, keyName (initialParent));
keyDel (initialParent);
splitDel (split);
|
agc/autotest: testing final edge cases for scale | @@ -65,6 +65,7 @@ void autotest_agc_crcf_scale()
agc_crcf q = agc_crcf_create();
agc_crcf_set_bandwidth(q, 0.1f);
agc_crcf_set_scale (q, scale);
+ CONTEND_EQUALITY(agc_crcf_get_scale(q), scale);
unsigned int i;
float complex x = 0.1f; // input sample
@@ -306,6 +307,10 @@ void autotest_agc_crcf_invalid_config()
CONTEND_INEQUALITY(LIQUID_OK, agc_crcf_set_signal_level(q, 0))
CONTEND_INEQUALITY(LIQUID_OK, agc_crcf_set_signal_level(q, -1))
+ // invalid scale values
+ CONTEND_INEQUALITY(LIQUID_OK, agc_crcf_set_scale(q, 0))
+ CONTEND_INEQUALITY(LIQUID_OK, agc_crcf_set_scale(q, -1))
+
// initialize gain on input array, but array has length 0
CONTEND_INEQUALITY(LIQUID_OK, agc_crcf_init(q, NULL, 0))
|
doc: minor updates to board inspector
Update success message per PR
Capitalize Board Inspector | @@ -8,7 +8,7 @@ This guide describes all features and uses of the tool.
About the Board Inspector Tool
******************************
-The board inspector tool ``board_inspector.py`` enables you to generate a board
+The Board Inspector tool ``board_inspector.py`` enables you to generate a board
configuration file on the target system. The board configuration file stores
hardware-specific information extracted from the target platform and is used to
customize your :ref:`ACRN configuration <acrn_configuration_tool>`.
@@ -22,19 +22,19 @@ Generate a Board Configuration File
additional memory, or PCI devices, you must generate a new board
configuration file.
-The following steps describe all options in the board inspector for generating
+The following steps describe all options in the Board Inspector for generating
a board configuration file.
-#. Make sure the target system is set up and ready to run the board inspector,
+#. Make sure the target system is set up and ready to run the Board Inspector,
according to :ref:`gsg-board-setup` in the Getting Started Guide.
-#. Load the ``msr`` driver, used by the board inspector:
+#. Load the ``msr`` driver, used by the Board Inspector:
.. code-block:: bash
sudo modprobe msr
-#. Run the board inspector tool (``board_inspector.py``) to generate the board
+#. Run the Board Inspector tool (``board_inspector.py``) to generate the board
configuration file. This example assumes the tool is in the
``~/acrn-work/`` directory and ``my_board`` is the desired file
name. Feel free to modify the commands as needed.
@@ -44,11 +44,11 @@ a board configuration file.
cd ~/acrn-work/board_inspector/
sudo python3 board_inspector.py my_board
- Upon success, the tool displays the following message:
+ Upon success, the tool displays a message similar to this example:
.. code-block:: console
- PTCT table has been saved to PTCT successfully!
+ my_board.xml saved successfully!
#. Confirm that the board configuration file ``my_board.xml`` was generated in
the current directory.
@@ -58,8 +58,8 @@ a board configuration file.
Command-Line Options
********************
-You can configure the board inspector via command-line options. Running the
-board inspector with the ``-h`` option yields the following usage message:
+You can configure the Board Inspector via command-line options. Running the
+Board Inspector with the ``-h`` option yields the following usage message:
.. code-block::
@@ -94,11 +94,11 @@ Details about certain arguments:
* - ``--out``
- Optional. Specify a file path where the board configuration file will be
saved (example: ``~/acrn_work``). If only a filename is provided in this
- option, the board inspector will generate the file in the current
+ option, the Board Inspector will generate the file in the current
directory.
* - ``--basic``
- - Optional. By default, the board inspector parses the ACPI namespace when
+ - Optional. By default, the Board Inspector parses the ACPI namespace when
generating board configuration files. This option provides a way to
disable ACPI namespace parsing in case the parsing blocks the generation
of board configuration files.
@@ -110,6 +110,6 @@ Details about certain arguments:
* - ``--check-device-status``
- Optional. On some boards, the device status (reported by the _STA
object) returns 0 while the device object is still useful for
- pass-through devices. By default, the board inspector includes the
+ pass-through devices. By default, the Board Inspector includes the
devices in the board configuration file. This option filters out the
devices, so that they cannot be used.
|
Fix for resource leak in test_suite_ssl
Fix for coverity bugs 349041, 349052
Allocated pointers could potentially be leaked in the case of errors. | @@ -1179,6 +1179,7 @@ static int build_transforms( mbedtls_ssl_transform *t_in,
size_t keylen, maclen, ivlen;
unsigned char *key0 = NULL, *key1 = NULL;
+ unsigned char *md0 = NULL, *md1 = NULL;
unsigned char iv_enc[16], iv_dec[16];
#if defined(MBEDTLS_SSL_DTLS_CONNECTION_ID)
@@ -1245,7 +1246,6 @@ static int build_transforms( mbedtls_ssl_transform *t_in,
cipher_info->mode == MBEDTLS_MODE_STREAM )
{
mbedtls_md_info_t const *md_info;
- unsigned char *md0, *md1;
/* Pick hash */
md_info = mbedtls_md_info_from_type( hash_id );
@@ -1283,9 +1283,6 @@ static int build_transforms( mbedtls_ssl_transform *t_in,
memcpy( &t_out->mac_dec, md0, maclen );
}
#endif
-
- mbedtls_free( md0 );
- mbedtls_free( md1 );
}
#else
((void) hash_id);
@@ -1417,6 +1414,9 @@ cleanup:
mbedtls_free( key0 );
mbedtls_free( key1 );
+ mbedtls_free( md0 );
+ mbedtls_free( md1 );
+
return( ret );
}
|
unit_tests.md: refine tutorial
BRANCH=none
TEST=rendered in gitiles
Tested-by: Tom Hughes | @@ -71,8 +71,8 @@ void run_test(int argc, char **argv)
}
```
-Create a `tasklist` file for you test that lists the tasks that should run as
-part of the test:
+In the [`test`] subdirectory, create a `tasklist` file for your test that lists
+the tasks that should run as part of the test:
`test/my_test.tasklist`:
@@ -91,6 +91,12 @@ Add the test to the `Makefile`:
test-list-host += my_test
```
+and
+
+```Makefile
+my_test-y=my_test.o
+```
+
Make sure you test shows up in the "host" tests:
```bash
|
trace: Generated streq-helper to ignore null terminator | @@ -217,11 +217,11 @@ class Probe(object):
fname = "streq_%d" % Probe.streq_index
Probe.streq_index += 1
self.streq_functions += """
-static inline bool %s(char const *ignored, unsigned long str) {
+static inline bool %s(char const *ignored, uintptr_t str) {
char needle[] = %s;
char haystack[sizeof(needle)];
bpf_probe_read(&haystack, sizeof(haystack), (void *)str);
- for (int i = 0; i < sizeof(needle); ++i) {
+ for (int i = 0; i < sizeof(needle)-1; ++i) {
if (needle[i] != haystack[i]) {
return false;
}
|
iotjs: Disable power management for artik053
Now aligned to other machines defconfig (artik053s, artik055s)
Observed issue was on artik053 board when wifi was used the board freeze
just before printing message:
input_irq_handler: input_irq_handler: Bad sync in header: header=... | @@ -956,29 +956,7 @@ CONFIG_MM_REGIONS=1
#
# Power Management
#
-CONFIG_PM=y
-# CONFIG_DEBUG_PM is not set
-# CONFIG_PM_TEST is not set
-CONFIG_PM_DEVNAME_LEN=32
-# CONFIG_PM_METRICS is not set
-CONFIG_PM_SLICEMS=100
-CONFIG_PM_NDOMAINS=1
-CONFIG_PM_MEMORY=2
-CONFIG_PM_COEFN=1
-CONFIG_PM_COEF1=1
-CONFIG_PM_COEF2=1
-CONFIG_PM_COEF3=1
-CONFIG_PM_COEF4=1
-CONFIG_PM_COEF5=1
-CONFIG_PM_IDLEENTER_THRESH=1
-CONFIG_PM_IDLEEXIT_THRESH=2
-CONFIG_PM_IDLEENTER_COUNT=30
-CONFIG_PM_STANDBYENTER_THRESH=1
-CONFIG_PM_STANDBYEXIT_THRESH=2
-CONFIG_PM_STANDBYENTER_COUNT=50
-CONFIG_PM_SLEEPENTER_THRESH=1
-CONFIG_PM_SLEEPEXIT_THRESH=2
-CONFIG_PM_SLEEPENTER_COUNT=70
+# CONFIG_PM is not set
#
# Debug Options
|
ipsec: Typo in flag name
Type: fix | @@ -92,7 +92,7 @@ typedef struct ipsec_key_t_
_ (8, IS_TUNNEL_V6, "tunnel-v6") \
_ (16, UDP_ENCAP, "udp-encap") \
_ (32, IS_PROTECT, "Protect") \
- _ (64, IS_INBOUND, "inboud") \
+ _ (64, IS_INBOUND, "inbound") \
_ (128, IS_AEAD, "aead") \
typedef enum ipsec_sad_flags_t_
|
Shape of the earth not taken into account for Lambert Conformal Conic (part 1) | @@ -112,6 +112,35 @@ static int next(grib_iterator* i, double* lat, double* lon, double* val)
#define RAD2DEG 57.29577951308232087684 /* 180 over pi */
#define DEG2RAD 0.01745329251994329576 /* pi over 180 */
+double msfnz(double eccent, double sinphi, double cosphi)
+{
+ double con = eccent * sinphi;
+ return((cosphi / (sqrt (1.0 - con * con))));
+}
+
+/* Function to compute the constant small t for use in the forward
+ computations in the Lambert Conformal Conic and the Polar
+ Stereographic projections.
+--------------------------------------------------------------*/
+double tsfnz(
+ double eccent, /* Eccentricity of the spheroid */
+ double phi, /* Latitude phi */
+ double sinphi) /* Sine of the latitude */
+{
+ double con;
+ double com;
+
+ con = eccent * sinphi;
+ com = .5 * eccent;
+ con = pow(((1.0 - con) / (1.0 + con)),com);
+ return (tan(.5 * (M_PI_2 - phi))/con);
+}
+
+static double calculate_eccentricity(double minor, double major)
+{
+ double temp = minor / major;
+ return sqrt(1.0 - temp*temp);
+}
static int init(grib_iterator* iter, grib_handle* h, grib_arguments* args)
{
int i, j, err = 0;
@@ -148,8 +177,12 @@ static int init(grib_iterator* iter, grib_handle* h, grib_arguments* args)
return err;
if (grib_is_earth_oblate(h)) {
- grib_context_log(h->context, GRIB_LOG_ERROR, "Lambert Conformal only supported for spherical earth.");
- return GRIB_GEOCALCULUS_PROBLEM;
+ double earthMajorAxisInMetres, earthMinorAxisInMetres, e;
+ //grib_context_log(h->context, GRIB_LOG_ERROR, "Lambert Conformal only supported for spherical earth.");
+ //return GRIB_GEOCALCULUS_PROBLEM;
+ if ((err = grib_get_double_internal(h, "earthMajorAxisInMetres", &earthMajorAxisInMetres)) != GRIB_SUCCESS) return err;
+ if ((err = grib_get_double_internal(h, "earthMinorAxisInMetres", &earthMinorAxisInMetres)) != GRIB_SUCCESS) return err;
+ e = calculate_eccentricity(earthMinorAxisInMetres,earthMajorAxisInMetres);
}
if (iter->nv != nx * ny) {
|
sysdeps/managarm: convert mount request to bragi | #include <mlibc/allocator.hpp>
#include <mlibc/posix-pipe.hpp>
#include <posix.frigg_bragi.hpp>
+#include <bragi/helpers-frigg.hpp>
namespace mlibc {
int sys_mount(const char *source, const char *target,
const char *fstype, unsigned long flags, const void *data) {
SignalGuard sguard;
- HelAction actions[3];
- globalQueue.trim();
-
- managarm::posix::CntRequest<MemoryAllocator> req(getSysdepsAllocator());
- req.set_request_type(managarm::posix::CntReqType::MOUNT);
+ managarm::posix::MountRequest<MemoryAllocator> req(getSysdepsAllocator());
req.set_path(frg::string<MemoryAllocator>(getSysdepsAllocator(), source));
req.set_target_path(frg::string<MemoryAllocator>(getSysdepsAllocator(), target));
req.set_fs_type(frg::string<MemoryAllocator>(getSysdepsAllocator(), fstype));
- frg::string<MemoryAllocator> ser(getSysdepsAllocator());
- req.SerializeToString(&ser);
- actions[0].type = kHelActionOffer;
- actions[0].flags = kHelItemAncillary;
- actions[1].type = kHelActionSendFromBuffer;
- actions[1].flags = kHelItemChain;
- actions[1].buffer = ser.data();
- actions[1].length = ser.size();
- actions[2].type = kHelActionRecvInline;
- actions[2].flags = 0;
- HEL_CHECK(helSubmitAsync(getPosixLane(), actions, 3,
- globalQueue.getQueue(), 0, 0));
-
- auto element = globalQueue.dequeueSingle();
- auto offer = parseSimple(element);
- auto send_req = parseSimple(element);
- auto recv_resp = parseInline(element);
-
- HEL_CHECK(offer->error);
- HEL_CHECK(send_req->error);
- HEL_CHECK(recv_resp->error);
-
- managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
- resp.ParseFromArray(recv_resp->data, recv_resp->length);
+ auto [offer, send_head, send_tail, recv_resp] =
+ exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadTail(req, getSysdepsAllocator()),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_head.error());
+ HEL_CHECK(send_tail.error());
+ HEL_CHECK(recv_resp.error());
+
+ auto resp = *bragi::parse_head_only<managarm::posix::SvrResponse>(recv_resp, getSysdepsAllocator());
__ensure(resp.error() == managarm::posix::Errors::SUCCESS);
return 0;
}
|
patch smart audio detection to fpv_switch init | @@ -213,18 +213,21 @@ void vtx_init() {
}
void vtx_update() {
+ static volatile uint32_t delay_loops = 5000;
#if defined(FPV_ON) && defined(FPV_PORT) && defined(FPV_PIN)
if (rx_aux_on(AUX_FPV_ON)) {
// fpv switch on
- if (!fpv_init && flags.rx_mode == RXMODE_NORMAL) {
+ if (!fpv_init && flags.rx_mode == RXMODE_NORMAL && flags.on_ground == 1) {
fpv_init = gpio_init_fpv(flags.rx_mode);
+ delay_loops = 1000;
+ vtx_connect_tries = 0;
}
if (fpv_init) {
GPIO_WriteBit(FPV_PORT, FPV_PIN, Bit_SET);
}
} else {
// fpv switch off
- if (fpv_init) {
+ if (fpv_init && flags.on_ground == 1) {
if (flags.failsafe) {
GPIO_WriteBit(FPV_PORT, FPV_PIN, Bit_SET);
} else {
@@ -234,7 +237,6 @@ void vtx_update() {
}
#endif
- static volatile uint32_t delay_loops = 5000;
if (delay_loops > 0) {
delay_loops--;
return;
|
modules/tools: use __repr__ for showing data
Also make column headers kwargs | # SPDX-License-Identifier: MIT
# Copyright (c) 2018-2020 Laurens Valk
-from tools import wait, StopWatch
-
# Import print for compatibility with 1.0 release
from builtins import print
+from tools import wait, StopWatch
class DataLog():
- def __init__(self, path, header=None):
- self.path = path
- self.file = open(self.path, 'w')
- if header:
- self.file.write('{0}\n'.format(header))
+ def __init__(self, path, *headers):
+ self.file = open(path, 'w+')
+ if len(headers) > 0:
+ print(*headers, sep=', ', file=self.file)
def log(self, *args):
print(*args, sep=', ', file=self.file)
- def show(self):
- self.file.close()
- print('Contents of {0}:'.format(self.path))
- with open(self.path, 'r') as of:
- print(of.read())
- self.file = open(self.path, 'a')
+ def __repr__(self):
+ self.file.seek(0, 0)
+ return self.file.read()
|
C++ SOVERSION bump to version 4.0.1 | @@ -9,7 +9,7 @@ set(CMAKE_CXX_FLAGS_DEBUG "-g -O0")
# with backward compatible change and micro version is connected with any internal change of the library.
set(SYSREPO_CPP_MAJOR_SOVERSION 4)
set(SYSREPO_CPP_MINOR_SOVERSION 0)
-set(SYSREPO_CPP_MICRO_SOVERSION 0)
+set(SYSREPO_CPP_MICRO_SOVERSION 1)
set(SYSREPO_CPP_SOVERSION_FULL ${SYSREPO_MAJOR_SOVERSION}.${SYSREPO_MINOR_SOVERSION}.${SYSREPO_MICRO_SOVERSION})
set(SYSREPO_CPP_SOVERSION ${SYSREPO_MAJOR_SOVERSION})
|
Don't listen to localhost in ssl_passphrase_callback test
Commit contained an unnecessary setting that listened to
localhost. Since the test doesn't actually try to make an SSL connection
to the database this isn't required. Moreover, it's a security hole.
Per gripe from Tom Lane. | @@ -28,7 +28,6 @@ $node->append_conf('postgresql.conf',
"ssl_passphrase.passphrase = '$rot13pass'");
$node->append_conf('postgresql.conf',
"shared_preload_libraries = 'ssl_passphrase_func'");
-$node->append_conf('postgresql.conf', "listen_addresses = 'localhost'");
$node->append_conf('postgresql.conf', "ssl = 'on'");
my $ddir = $node->data_dir;
|
Update drm rst in docs
Add note on drm rst page to remind users NOT to use shell regex with
the match and exclude options. | @@ -15,6 +15,17 @@ contents.
drm is a tool for removing files recursively in parallel.
drm behaves like `rm -rf`, but it is faster.
+.. note::
+
+ DO NOT USE SHELL REGEX!!!
+ The --match and --exclude options use POSIX regex syntax. Because of
+ this make sure that the shell does not try to interpret your regex before
+ it gets passed to the program. You can generally use quotes around your
+ regex to prevent the shell from expanding. An example of this using the
+ --match option with --dryrun would be:
+
+ mpirun -np 128 drm --dryrun -v --name --match 'file_.*' /path/to/dir/*
+
OPTIONS
-------
|
Kernel#open -> URI#open | @@ -108,7 +108,7 @@ require 'nokogiri'
require 'open-uri'
# Fetch and parse HTML document
-doc = Nokogiri::HTML(open('https://nokogiri.org/tutorials/installing_nokogiri.html'))
+doc = Nokogiri::HTML(URI.open('https://nokogiri.org/tutorials/installing_nokogiri.html'))
puts "### Search for nodes by css"
doc.css('nav ul.menu li a', 'article h2').each do |link|
|
metadata-fe: timeout preview fetch | @@ -35,7 +35,7 @@ const useMetadataState = createState<MetadataState>(
return state.previews[group];
}
try {
- const preview = await airlock.subscribeOnce('metadata-pull-hook', `/preview${group}`);
+ const preview = await airlock.subscribeOnce('metadata-pull-hook', `/preview${group}`, 20 * 1000);
if('metadata-hook-update' in preview) {
const newState = get();
newState.set((s) => {
|
Update SAL annotation | @@ -51,8 +51,8 @@ static ULONGLONG ReservedMemory;
BOOLEAN PhSipMemorySectionCallback(
_In_ PPH_SYSINFO_SECTION Section,
_In_ PH_SYSINFO_SECTION_MESSAGE Message,
- _In_opt_ PVOID Parameter1,
- _In_opt_ PVOID Parameter2
+ _In_ PVOID Parameter1,
+ _In_ PVOID Parameter2
)
{
switch (Message)
@@ -106,9 +106,6 @@ BOOLEAN PhSipMemorySectionCallback(
{
PPH_SYSINFO_CREATE_DIALOG createDialog = Parameter1;
- if (!createDialog)
- break;
-
createDialog->Instance = PhInstanceHandle;
createDialog->Template = MAKEINTRESOURCE(IDD_SYSINFO_MEM);
createDialog->DialogProc = PhSipMemoryDialogProc;
@@ -120,9 +117,6 @@ BOOLEAN PhSipMemorySectionCallback(
ULONG i;
LONG dpiValue;
- if (!drawInfo)
- break;
-
dpiValue = PhGetWindowDpi(Section->GraphHandle);
if (PhGetIntegerSetting(L"ShowCommitInSummary"))
@@ -197,9 +191,6 @@ BOOLEAN PhSipMemorySectionCallback(
ULONG usedPages;
PH_FORMAT format[3];
- if (!getTooltipText)
- break;
-
if (PhGetIntegerSetting(L"ShowCommitInSummary"))
{
usedPages = PhGetItemCircularBuffer_ULONG(&PhCommitHistory, getTooltipText->Index);
@@ -233,9 +224,6 @@ BOOLEAN PhSipMemorySectionCallback(
ULONG usedPages;
PH_FORMAT format[5];
- if (!drawPanel)
- break;
-
if (PhGetIntegerSetting(L"ShowCommitInSummary"))
{
totalPages = PhPerfInformation.CommitLimit;
|
[yt] yolint: fix migrations. | @@ -521,16 +521,3 @@ migrations:
- a.yandex-team.ru/yabs/vh/cms-pgaas/transcoded
- a.yandex-team.ru/yabs/telephony/platform/db-cleaner/internal
- a.yandex-team.ru/yaphone/gotifier/cmd
- - a.yandex-team.ru/yt/go/mapreduce/spec
- - a.yandex-team.ru/yt/idm-integration/cmd/yt-idm-integration-v2
- - a.yandex-team.ru/yt/idm-integration/internal/app
- - a.yandex-team.ru/yt/idm-integration/internal/app_test
- - a.yandex-team.ru/yt/jaeger
- - a.yandex-team.ru/yt/solomon-bridge/bridgex
- - a.yandex-team.ru/yt/solomon-bridge/bridgex_test
- - a.yandex-team.ru/yt/solomon-bridge/sensor
- - a.yandex-team.ru/yt/solomon-bridge/sensor_test
- - a.yandex-team.ru/yt/solomon-bridge/spack
- - a.yandex-team.ru/yt/solomon-bridge/spack_test
- - a.yandex-team.ru/yt/solomon-bridge/writer
- - a.yandex-team.ru/yt/solomon-bridge/writer_test
|
Improve stopping of timer | @@ -2977,20 +2977,23 @@ sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
* that were incremented in sctp_timer_start().
*/
if (tmr->ep != NULL) {
- SCTP_INP_DECR_REF(inp);
tmr->ep = NULL;
+ SCTP_INP_DECR_REF(inp);
}
if (tmr->tcb != NULL) {
- atomic_subtract_int(&stcb->asoc.refcnt, 1);
tmr->tcb = NULL;
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
}
if (tmr->net != NULL) {
+ struct sctp_nets *tmr_net;
+
/*
* Can't use net, since it doesn't work for
* SCTP_TIMER_TYPE_ASCONF.
*/
- sctp_free_remote_addr((struct sctp_nets *)tmr->net);
+ tmr_net = tmr->net;
tmr->net = NULL;
+ sctp_free_remote_addr(tmr_net);
}
} else {
SCTPDBG(SCTP_DEBUG_TIMER2,
|
crypto: fix compiler warnings in gpg
Handle unused return values of calls to dup() and write().
Related to . | @@ -784,25 +784,37 @@ int CRYPTO_PLUGIN_FUNCTION (gpgCall) (KeySet * conf, Key * errorKey, Key * msgKe
if (msgKey)
{
close (STDIN_FILENO);
- dup (pipe_stdin[0]);
+ if (dup (pipe_stdin[0]) < 0)
+ {
+ ELEKTRA_SET_ERROR (ELEKTRA_ERROR_CRYPTO_GPG, errorKey, "failed to redirect stdin.");
+ return -2;
+ }
}
close (pipe_stdin[0]);
// redirect stdout to pipe
close (STDOUT_FILENO);
- dup (pipe_stdout[1]);
+ if (dup (pipe_stdout[1]) < 0)
+ {
+ ELEKTRA_SET_ERROR (ELEKTRA_ERROR_CRYPTO_GPG, errorKey, "failed to redirect the stdout.");
+ return -2;
+ }
close (pipe_stdout[1]);
// redirect stderr to pipe
close (STDERR_FILENO);
- dup (pipe_stderr[1]);
+ if (dup (pipe_stderr[1]) < 0)
+ {
+ ELEKTRA_SET_ERROR (ELEKTRA_ERROR_CRYPTO_GPG, errorKey, "failed to redirect stderr.");
+ return -2;
+ }
close (pipe_stderr[1]);
// finally call the gpg executable
if (execv (argv[0], argv) < 0)
{
ELEKTRA_SET_ERRORF (ELEKTRA_ERROR_CRYPTO_GPG, errorKey, "failed to start the gpg binary: %s", argv[0]);
- return -1;
+ return -2;
}
// end of the child process
}
@@ -813,9 +825,19 @@ int CRYPTO_PLUGIN_FUNCTION (gpgCall) (KeySet * conf, Key * errorKey, Key * msgKe
close (pipe_stderr[1]);
// pass the message to the gpg process
- if (msgKey && keyGetValueSize (msgKey) > 0)
+ const ssize_t sendMessageSize = keyGetValueSize (msgKey);
+ if (msgKey && sendMessageSize > 0)
{
- write (pipe_stdin[1], keyValue (msgKey), keyGetValueSize (msgKey));
+ if (write (pipe_stdin[1], keyValue (msgKey), sendMessageSize) != sendMessageSize)
+ {
+ ELEKTRA_SET_ERROR (ELEKTRA_ERROR_CRYPTO_GPG, errorKey, "The communication with the GPG process failed.");
+ closePipe (pipe_stdin);
+ closePipe (pipe_stdout);
+ closePipe (pipe_stderr);
+ elektraFree (buffer);
+ elektraFree (argv[0]);
+ return -1;
+ }
}
close (pipe_stdin[1]);
@@ -841,6 +863,10 @@ int CRYPTO_PLUGIN_FUNCTION (gpgCall) (KeySet * conf, Key * errorKey, Key * msgKe
ELEKTRA_SET_ERROR (ELEKTRA_ERROR_CRYPTO_GPG, errorKey, "GPG reported a bad signature");
break;
+ case -2:
+ // error has been set to errorKey by the child process
+ break;
+
default:
// other errors
outputLen = read (pipe_stderr[0], errorBuffer, sizeof (errorBuffer));
|
Rust: Integrate cargo test into cmake | @@ -29,6 +29,10 @@ if (CARGO_EXECUTABLE)
ALL
DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/elektra-sys/target/release/libelektra_sys.rlib")
+ add_test (NAME test_cargo_elektra_sys
+ COMMAND ${CARGO_EXECUTABLE} test
+ WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/elektra-sys")
+
else ()
exclude_binding (rust,
"cargo not found")
|
doc: Update CPU sharing tutorial
Clarify blocked state | @@ -135,10 +135,11 @@ VMs. The example extends the information provided in the :ref:`gsg`.
you assigned pCPUs 1 and 2 to VMs 1 and 2 (via the ACRN Configurator). vCPU 1
of VM 0 and vCPU 0 of VM 1 and VM 2 are running on the same physical CPU;
they are sharing the physical CPU execution time. The thread state column
- shows the current states of the vCPUs. BLOCKED state means the vCPU is
- waiting for an I/O operation to be completed. Once it is done, the state will
- change to RUNNABLE. When this vCPU gets its pCPU execution time, its state
- will change to RUNNING, then the vCPU is actually running on the pCPU.
+ shows the current states of the vCPUs. The BLOCKED state can occur for
+ different reasons, most likely the vCPU is waiting for an I/O operation to be
+ completed. Once it is done, the state will change to RUNNABLE. When this vCPU
+ gets its pCPU execution time, its state will change to RUNNING, then the vCPU
+ is actually running on the pCPU.
Learn More
**********
|
ci: relax
Addresses issues when running
due to GH rebases done on UX rewriting the commit with the primary email
instead of whatever email was used originally. | @@ -32,7 +32,16 @@ for sha in $commits; do
lines="$(git show -s --format=%B ${sha})"
found_author=false
+ # Don't enforce committer email on forks; this primarily avoids issues
+ # running workflows on the zephyr fork, because rebases done in the GH UX
+ # use the primary email of the committer, which might not match the one
+ # used in git CLI.
+ if [[ $GITHUB_REPOSITORY == mcu-tools/* ]]; then
found_committer=false
+ else
+ found_committer=true
+ fi
+
IFS=$'\n'
for line in ${lines}; do
stripped=$(echo $line | sed -e 's/^\s*//' | sed -e 's/\s*$//')
|
[catboost] Fix test_whl.py | @@ -14,7 +14,7 @@ def test_wheel():
cpu_so_path = yatest.common.binary_path(os.path.join(PYTHON_PACKAGE_DIR, "catboost", "no_cuda", cpu_so_name))
wheel_name = 'catboost.whl'
- make_wheel(wheel_name, 'catboost', yatest.common.source_path('.'), cpu_so_path)
+ make_wheel(wheel_name, 'catboost', '0.0.0', yatest.common.source_path('.'), cpu_so_path)
with zipfile.ZipFile(wheel_name, 'r') as f:
f.extractall('catboost')
|
Docs: Fixed a link jump error
The problem is described in the issue | @@ -261,7 +261,7 @@ Compared with Ethereum, the differences are listed below:
### Protocol Layer
#### Overview
The protocol layer is located in the second layer of the BoAT SDK, which mainly implements the protocol part of each blockchain. For Ethereum series blockchains, their protocols are very similar, such as Ethereum and PlatONE.
-The protocol layer is supported by the RPC layer. Please refer to [Protocol layer](#Protocol-layer).
+The protocol layer is supported by the RPC layer. Please refer to [RPC Layer](#RPC-Layer).
#### Ethereum's Protocol Layer Implementation
##### Raw Transaction Interface
|
Update Plan 9 bootstrap. | @@ -72,10 +72,10 @@ echo $pwd/6/6.out -I ../sys -I . blat.myr && $pwd/6/6.out -I ../sys -I . blat
echo $pwd/6/6.out -I ../sys -I . env+plan9.myr && $pwd/6/6.out -I ../sys -I . env+plan9.myr &&\
echo $pwd/6/6.out -I ../sys -I . execvp.myr && $pwd/6/6.out -I ../sys -I . execvp.myr &&\
echo $pwd/6/6.out -I ../sys -I . slput.myr && $pwd/6/6.out -I ../sys -I . slput.myr &&\
+echo $pwd/6/6.out -I ../sys -I . wait+plan9.myr && $pwd/6/6.out -I ../sys -I . wait+plan9.myr &&\
echo $pwd/6/6.out -I ../sys -I . spork.myr && $pwd/6/6.out -I ../sys -I . spork.myr &&\
echo $pwd/6/6.out -I ../sys -I . diriter.myr && $pwd/6/6.out -I ../sys -I . diriter.myr &&\
echo $pwd/6/6.out -I ../sys -I . clear.myr && $pwd/6/6.out -I ../sys -I . clear.myr &&\
-echo $pwd/6/6.out -I ../sys -I . wait+plan9.myr && $pwd/6/6.out -I ../sys -I . wait+plan9.myr &&\
echo $pwd/6/6.out -I ../sys -I . strjoin.myr && $pwd/6/6.out -I ../sys -I . strjoin.myr &&\
echo $pwd/6/6.out -I ../sys -I . pathjoin.myr && $pwd/6/6.out -I ../sys -I . pathjoin.myr &&\
echo $pwd/6/6.out -I ../sys -I . mktemp.myr && $pwd/6/6.out -I ../sys -I . mktemp.myr &&\
|
Fix synchro Solr schema | </analyzer>
</fieldType>
<!-- Entity extraction -->
-
+ <!--
<fieldType name="key_phrases" class="solr.TextField" sortMissingLast="true" omitNorms="true">
<analyzer>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.LowerCaseFilterFactory"/>
</analyzer>
</fieldType>
-
+ -->
</types>
|
Participants: fix display of nonmembers
Fixes urbit/landscape | @@ -33,6 +33,7 @@ import { StatelessAsyncAction } from '~/views/components/StatelessAsyncAction';
import useLocalState from '~/logic/state/local';
import useContactState from '~/logic/state/contact';
import useSettingsState, { selectCalmState } from '~/logic/state/settings';
+import {deSig} from '@urbit/api';
const TruncText = styled(Text)`
white-space: nowrap;
@@ -55,11 +56,16 @@ const searchParticipant = (search: string) => (p: Participant) => {
};
function getParticipants(cs: Contacts, group: Group) {
- const contacts: Participant[] = _.map(cs, (c, patp) => ({
+ const contacts: Participant[] = _.flow(
+ f.omitBy<Contacts>((_c, patp) => !group.members.has(patp.slice(1))),
+ f.toPairs,
+ f.map(([patp, c]: [string, Contact]) => ({
...c,
patp,
pending: false
- }));
+ }))
+ )(cs);
+ console.log(contacts);
const members: Participant[] = _.map(
Array.from(group.members)
.filter(e => group?.policy?.invite?.pending ? !group.policy.invite.pending.has(e) : true), m =>
@@ -84,7 +90,7 @@ const emptyContact = (patp: string, pending: boolean): Participant => ({
nickname: '',
bio: '',
status: '',
- color: '',
+ color: '0x0',
avatar: null,
cover: null,
groups: [],
|
Updated workflow. | @@ -45,10 +45,6 @@ jobs:
image: ubuntu:18.04
cc: gcc
cxx: g++
- - label: "Ubuntu 16.04 (Xenial Xerus) with GCC"
- image: ubuntu:16.04
- cc: gcc
- cxx: g++
# ====== Debian Linux =============================================
- label: "Debian 12 (Bookworm) with GCC"
|
linops: optimize linop_plus if one of the arguments is linop_null | @@ -531,6 +531,19 @@ static void plus_free(const linop_data_t* _data)
struct linop_s* linop_plus(const struct linop_s* a, const struct linop_s* b)
{
+
+#if 1
+ // detect null operations and just clone
+
+ if (operator_zero_or_null_p(a->forward)) {
+
+ return (struct linop_s*) linop_clone(b);
+ } else if (operator_zero_or_null_p(b->forward)) {
+
+ return (struct linop_s*) linop_clone(a);
+ }
+#endif
+
auto bdo = linop_domain(b);
assert(CFL_SIZE == bdo->size);
iovec_check(linop_domain(a), bdo->N, bdo->dims, bdo->strs);
@@ -542,8 +555,6 @@ struct linop_s* linop_plus(const struct linop_s* a, const struct linop_s* b)
PTR_ALLOC(struct plus_data_s, data);
SET_TYPEID(plus_data_s, data);
- // maybe detect null operations and just clone
-
data->a = linop_clone(a);
data->b = linop_clone(b);
|
UserNotes: Check current directory for portable database file | @@ -273,7 +273,9 @@ VOID NTAPI LoadCallback(
_In_opt_ PVOID Context
)
{
+ static PH_STRINGREF databaseFile = PH_STRINGREF_INIT(L"usernotesdb.xml");
PPH_PLUGIN toolStatusPlugin;
+ PPH_STRING directory;
PPH_STRING path;
if (toolStatusPlugin = PhFindPlugin(TOOLSTATUS_PLUGIN_NAME))
@@ -284,21 +286,29 @@ VOID NTAPI LoadCallback(
ToolStatusInterface = NULL;
}
+ directory = PH_AUTO(PhGetApplicationDirectory());
+ path = PH_AUTO(PhConcatStringRef2(&directory->sr, &databaseFile));
+
+ if (RtlDoesFileExists_U(path->Buffer))
+ {
+ SetDbPath(path);
+ }
+ else
+ {
path = PhaGetStringSetting(SETTING_NAME_DATABASE_PATH);
path = PH_AUTO(PhExpandEnvironmentStrings(&path->sr));
- LoadCustomColors();
-
if (RtlDetermineDosPathNameType_U(path->Buffer) == RtlPathTypeRelative)
{
- PPH_STRING directory;
-
directory = PH_AUTO(PhGetApplicationDirectory());
path = PH_AUTO(PhConcatStringRef2(&directory->sr, &path->sr));
}
SetDbPath(path);
+ }
+
LoadDb();
+ LoadCustomColors();
}
VOID NTAPI UnloadCallback(
|
Add ./ to path of tools. | @@ -17,7 +17,7 @@ status=0
# Run ippserver...
echo "Running ippserver..."
cd ..
-CUPS_DEBUG_LOG=test-cups.log CUPS_DEBUG_LEVEL=4 CUPS_DEBUG_FILTER='^(http|_http|ipp|_ipp|cupsDo|cupsGet|cupsSend)' server/ippserver -vvv -C test 2>test-ippserver.log &
+CUPS_DEBUG_LOG=test-cups.log CUPS_DEBUG_LEVEL=4 CUPS_DEBUG_FILTER='^(http|_http|ipp|_ipp|cupsDo|cupsGet|cupsSend)' ./server/ippserver -vvv -C test 2>test-ippserver.log &
ippserver=$!
echo "ippserver has PID $ippserver, waiting for server to come up..."
@@ -26,11 +26,11 @@ echo ""
# Test the instance...
echo "Running ippfind + ipptool..."
-libcups/tools/ippfind-static -T 5 --literal-name "ipp-everywhere-pdf" --exec libcups/tools/ipptool-static -V 2.0 -tIf libcups/examples/document-letter.pdf '{}' libcups/examples/ipp-2.0.test \; || status=1
+./libcups/tools/ippfind-static -T 5 --literal-name "ipp-everywhere-pdf" --exec ./libcups/tools/ipptool-static -V 2.0 -tIf libcups/examples/document-letter.pdf '{}' libcups/examples/ipp-2.0.test \; || status=1
echo ""
echo "Running IPP System Service tests..."
-libcups/tools/ippfind-static -T 5 --literal-name "ipp-everywhere-pdf" --exec libcups/tools/ipptool-static -V 2.0 -tI 'ipp://{service_hostname}:{service_port}/ipp/system' examples/pwg5100.22.test \; || status=1
+./libcups/tools/ippfind-static -T 5 --literal-name "ipp-everywhere-pdf" --exec ./libcups/tools/ipptool-static -V 2.0 -tI 'ipp://{service_hostname}:{service_port}/ipp/system' examples/pwg5100.22.test \; || status=1
# Clean up
kill $ippserver
|
Visual C: reduce the dependency paths to be relative | @@ -12,7 +12,7 @@ use warnings;
use lib '.';
use configdata;
-use File::Spec::Functions qw(canonpath rel2abs);
+use File::Spec::Functions qw(:DEFAULT rel2abs);
use File::Compare qw(compare_text);
# When using stat() on Windows, we can get it to perform better by avoid some
@@ -42,11 +42,26 @@ exit 0 unless $rebuild;
# Ok, primary checks are done, time to do some real work
+my $producer = shift @ARGV;
+die "Producer not given\n" unless $producer;
+
my $abs_srcdir = rel2abs($config{sourcedir});
my $abs_blddir = rel2abs($config{builddir});
-my $producer = shift @ARGV;
-die "Producer not given\n" unless $producer;
+# Convenient cache of absolute to relative map. We start with filling it
+# with mappings for the known generated header files. They are relative to
+# the current working directory, so that's an easy task.
+# NOTE: there's more than C header files that are generated. They will also
+# generate entries in this map. We could of course deal with C header files
+# only, but in case we decide to handle more than just C files in the future,
+# we already have the mechanism in place here.
+# NOTE2: we lower case the index to make it searchable without regard for
+# character case. That could seem dangerous, but as long as we don't have
+# files we depend on in the same directory that only differ by character case,
+# we're fine.
+my %depconv_cache =
+ map { lc catfile($abs_blddir, $_) => $_ }
+ keys %{$unified_info{generate}};
my %procedures = (
'gcc' => undef, # gcc style dependency files needs no mods
@@ -138,12 +153,22 @@ my %procedures = (
# VC gives us absolute paths for all include files, so to
# remove system header dependencies, we need to check that
- # they don't match $abs_srcdir or $abs_blddir
- $tail = canonpath($tail);
- if ($tail =~ m|^\Q$abs_srcdir\E|i
- || $tail =~ m|^\Q$abs_blddir\E|i) {
- return ($objfile, "\"$tail\"");
+ # they don't match $abs_srcdir or $abs_blddir.
+ $tail = lc canonpath($tail);
+
+ unless (defined $depconv_cache{$tail}) {
+ my $dep = $tail;
+ # Since we have already pre-populated the cache with
+ # mappings for generated headers, we only need to deal
+ # with the source tree.
+ if ($dep =~ s|^\Q$abs_srcdir\E\\|\$(SRCDIR)\\|i) {
+ $depconv_cache{$tail} = $dep;
+ }
}
+ return ($objfile, '"'.$depconv_cache{$tail}.'"')
+ if defined $depconv_cache{$tail};
+ print STDERR "DEBUG[VC]: ignoring $objfile <- $tail\n"
+ if $debug;
}
return undef;
|
Print error if can't create directory for system resources | @@ -185,7 +185,9 @@ int lime::downloadImageResource(const std::string &name)
#else
const std::string mkdirCmd("md.exe \""+destDir+"\"");
#endif
- std::system(mkdirCmd.c_str());
+ int result = std::system(mkdirCmd.c_str());
+ if (result != 0)
+ return lime::ReportError(result, "Failed to create directory: %s", destDir.c_str());
}
//check for write access
|
[config] Load config changes to local copy of overlay_params so it does not block rendering thread while parsing | @@ -13,6 +13,7 @@ static void *fileChanged(void *params_void) {
notify_thread *nt = reinterpret_cast<notify_thread *>(params_void);
int length, i = 0;
char buffer[EVENT_BUF_LEN];
+ overlay_params local_params = *nt->params;
while (!nt->quit) {
length = read( nt->fd, buffer, EVENT_BUF_LEN );
@@ -21,14 +22,14 @@ static void *fileChanged(void *params_void) {
(struct inotify_event *) &buffer[i];
i += EVENT_SIZE + event->len;
if (event->mask & IN_MODIFY) {
+ parse_overlay_config(&local_params, getenv("MANGOHUD_CONFIG"));
std::lock_guard<std::mutex> lk(nt->mutex);
- parse_overlay_config(nt->params, getenv("MANGOHUD_CONFIG"));
+ *nt->params = local_params;
}
}
i = 0;
std::this_thread::sleep_for(std::chrono::milliseconds(10));
}
- printf("%s quit\n", __func__);
return nullptr;
}
|
dnstap io, fix for review comment. | @@ -1681,7 +1681,6 @@ static void dtio_open_output(struct dt_io_thread* dtio)
dtio_reconnect_enable(dtio);
return;
}
- }
if(dtio->upstream_is_tls) {
if(!dtio_setup_ssl(dtio)) {
dtio_close_fd(dtio);
@@ -1689,6 +1688,7 @@ static void dtio_open_output(struct dt_io_thread* dtio)
return;
}
}
+ }
dtio->check_nb_connect = 1;
/* the EV_READ is to catch channel close, write to write packets */
|
libnet: some bugfixes in the DHCP functionality | ///< the DHCP timeout in milli seconds
#define DHCP_TIMEOUT_MSECS (120UL * 1000)
+#define DHCP_RECORD_FIELDS "{ ip: %d, gw: %d, netmask: %d }"
-#define DHCP_RECORD_FORMAT "net.ipconfig {ip: %" PRIu32 ", "\
- "gw:" PRIu32 ", " \
- "netmask: %" PRIu32 "}"
+
+#define DHCP_RECORD_FORMAT "net.ipconfig " DHCP_RECORD_FIELDS
#define DHCP_RECORD_REGEX "net.ipconfig {ip: _, gw: _, netmask: _}"
@@ -85,6 +85,12 @@ errval_t dhcpd_start(net_flags_t flags)
struct net_state *st = get_default_net_state();
+ // initialize octopus if not already done
+ err = oct_init();
+ if (err_is_fail(err)) {
+ return err;
+ }
+
NETDEBUG("starting DHCP...\n");
err_t lwip_err = dhcp_start(&st->netif);
if(lwip_err != ERR_OK) {
@@ -92,6 +98,7 @@ errval_t dhcpd_start(net_flags_t flags)
return -1;
}
+ st->waitset = get_default_waitset();
st->dhcp_ticks = 1;
st->dhcp_running = 1;
@@ -149,22 +156,34 @@ static void dhcpd_change_event(octopus_mode_t mode, const char* record, void* ar
struct net_state *st = arg;
- if (mode & OCT_ON_SET) {
+ NETDEBUG("DHCP change event: %s\n", record);
- ip_addr_t ip, nm, gw;
+ if (mode & OCT_ON_SET) {
- err = oct_read(record, DHCP_RECORD_FORMAT, &ip.addr, &gw.addr, &nm.addr);
+ uint64_t ip, nm, gw;
+ err = oct_read(record, "_" DHCP_RECORD_FIELDS, &ip, &gw, &nm);
if (err_is_fail(err)) {
DEBUG_ERR(err, "cannot read DHCPD record '%s\n", record);
+ return;
}
- netif_set_addr(&st->netif, &ip, &nm, &gw);
+ ip_addr_t ipaddr, netmask, gateway;
+ ipaddr.addr = (uint32_t)ip;
+ netmask.addr = (uint32_t)nm;
+ gateway.addr = (uint32_t)gw;
+
+ NETDEBUG("DHCP got ip set: %s \n", ip4addr_ntoa(&ipaddr));
+ NETDEBUG("DHCP got gw set: %s\n", ip4addr_ntoa(&gateway));
+ NETDEBUG("DHCP got nm set: %s\n", ip4addr_ntoa(&netmask));
+
+ netif_set_addr(&st->netif, &ipaddr, &netmask, &gateway);
netif_set_up(&st->netif);
st->dhcp_done = true;
}
if (mode & OCT_ON_DEL) {
+
/* DHCP has been removed */
netif_set_down(&st->netif);
}
@@ -179,7 +198,14 @@ errval_t dhcpd_query(net_flags_t flags)
{
errval_t err;
+ // initialize octopus if not already done
+ err = oct_init();
+ if (err_is_fail(err)) {
+ return err;
+ }
+
struct net_state *st = get_default_net_state();
+ assert(st);
st->dhcp_ticks = 1;
st->dhcp_running = 1;
|
growth_factor_and_growth_rate changes | @@ -80,7 +80,9 @@ static double df_integrand(double a,void * spline_void)
INPUT: scale factor, cosmology
TASK: compute the growth (D(z)) and the growth rate, logarithmic derivative (f?)
*/
-static int growth_factor_and_growth_rate(double a,double *gf,double *fg,ccl_cosmology *cosmo)
+
+// RH had issuse here
+static int growth_factor_and_growth_rate(double a,double *gf,double *fg,ccl_cosmology *cosmo, int * status)
{
if(a<EPS_SCALEFAC_GROWTH) {
*gf=a;
@@ -98,10 +100,10 @@ static int growth_factor_and_growth_rate(double a,double *gf,double *fg,ccl_cos
y[1]=EPS_SCALEFAC_GROWTH*EPS_SCALEFAC_GROWTH*EPS_SCALEFAC_GROWTH*
h_over_h0(EPS_SCALEFAC_GROWTH,&(cosmo->params));
- int status=gsl_odeiv2_driver_apply(d,&ainit,a,y);
+ * status=gsl_odeiv2_driver_apply(d,&ainit,a,y);
gsl_odeiv2_driver_free(d);
- if(status!=GSL_SUCCESS)
+ if(*status!=GSL_SUCCESS)
return 1;
*gf=y[0];
|
OPENSSL_NO_GOST has nothing to do with low-level algos | @@ -150,7 +150,6 @@ EOF
);
foreach my $cmd (
"md2", "md4", "md5",
- "gost",
"sha1", "sha224", "sha256", "sha384",
"sha512", "sha512-224", "sha512-256",
"sha3-224", "sha3-256", "sha3-384", "sha3-512",
|
pbio/hbridge: direction enum even without hbridge
This is also used by other modules (tacho, gyro), and should probably be moved elsewhere. | #include <stdint.h>
#include <pbio/port.h>
+typedef enum {
+ PBIO_DIRECTION_CLOCKWISE, /**< Positive means clockwise */
+ PBIO_DIRECTION_COUNTERCLOCKWISE, /**< Positive means counterclockwise */
+} pbio_direction_t;
+
#if PBDRV_CONFIG_HBRIDGE
#define PBIO_DUTY_STEPS (PBDRV_MAX_DUTY)
#define PBIO_DUTY_USER_STEPS (100)
#define PBIO_DUTY_STEPS_PER_USER_STEP (PBIO_DUTY_STEPS/PBIO_DUTY_USER_STEPS)
-typedef enum {
- PBIO_DIRECTION_CLOCKWISE, /**< Positive means clockwise */
- PBIO_DIRECTION_COUNTERCLOCKWISE, /**< Positive means counterclockwise */
-} pbio_direction_t;
typedef enum {
PBIO_HBRIDGE_COAST, /**< hbridge set to coast */
|
fix(draw_img):radius Mask doesn't work in Specific condition | @@ -358,8 +358,7 @@ LV_ATTRIBUTE_FAST_MEM static void lv_draw_map(const lv_area_t * map_area, const
draw_area.x2 -= disp_area->x1;
draw_area.y2 -= disp_area->y1;
- bool mask_any = lv_draw_mask_is_any(map_area);
-
+ bool mask_any = lv_draw_mask_is_any(&draw_area);
/*The simplest case just copy the pixels into the draw_buf*/
if(!mask_any && draw_dsc->angle == 0 && draw_dsc->zoom == LV_IMG_ZOOM_NONE &&
chroma_key == false && alpha_byte == false && draw_dsc->recolor_opa == LV_OPA_TRANSP) {
|
Make script project CMake script more extensible. | @@ -32,18 +32,18 @@ get_filename_component(SCRIPT_PROJECT_CONFIG_PATH ${CMAKE_CURRENT_LIST_FILE} PAT
function(script_project name language configuration)
- # Create project file
+ # Define upper and lower versions of the language
string(TOLOWER ${language} language_lower)
- # Create project file
+ # Define project target name
set(custom_target "${language_lower}-${name}")
+ # Define target for the configuration
+ set(PACKAGE_TARGET "${custom_target}")
+
# Create project file
configure_file(${configuration} ${custom_target}-config.cmake @ONLY)
- # Include generated project file
- include(${CMAKE_CURRENT_BINARY_DIR}/${custom_target}-config.cmake)
-
# Set custom target
add_custom_target(${custom_target} ALL)
@@ -77,4 +77,7 @@ function(script_project name language configuration)
${CMAKE_CURRENT_SOURCE_DIR}/source ${LOADER_SCRIPT_PATH}
)
+ # Include generated project file
+ include(${CMAKE_CURRENT_BINARY_DIR}/${custom_target}-config.cmake)
+
endfunction()
|
filter_kubernetes: fix metadata lookup when source is Systemd | @@ -195,7 +195,7 @@ static int pack_map_content(msgpack_packer *pck, msgpack_sbuffer *sbuf,
msgpack_object k;
msgpack_object v;
msgpack_object root;
- struct flb_time log_time;
+ struct flb_time log_time = {0};
/* Original map size */
map_size = source_map.via.map.size;
@@ -389,6 +389,7 @@ static int cb_kube_filter(void *data, size_t bytes,
struct flb_config *config)
{
int ret;
+ size_t pre = 0;
size_t off = 0;
char *cache_buf = NULL;
size_t cache_size = 0;
@@ -402,10 +403,10 @@ static int cb_kube_filter(void *data, size_t bytes,
struct flb_kube *ctx = filter_context;
struct flb_kube_meta meta = {0};
struct flb_kube_props props = {0};
-
(void) f_ins;
(void) config;
+ if (ctx->use_journal == FLB_FALSE) {
/* Check if we have some cached metadata for the incoming events */
ret = flb_kube_meta_get(ctx,
tag, tag_len,
@@ -419,6 +420,7 @@ static int cb_kube_filter(void *data, size_t bytes,
if (props.parser != NULL) {
parser = flb_parser_get(props.parser, config);
}
+ }
/* Create temporal msgpack buffer */
msgpack_sbuffer_init(&tmp_sbuf);
@@ -432,6 +434,33 @@ static int cb_kube_filter(void *data, size_t bytes,
continue;
}
+ /*
+ * Journal entries can be origined by different Pods, so we are forced
+ * to parse and check it metadata.
+ *
+ * note: when the source is in_tail the situation is different since all
+ * records passed to the filter have a unique source log file.
+ */
+ if (ctx->use_journal == FLB_TRUE) {
+ parser = NULL;
+ cache_buf = NULL;
+ memset(&props, '\0', sizeof(struct flb_kube_props));
+
+ ret = flb_kube_meta_get(ctx,
+ tag, tag_len,
+ data + pre, off - pre,
+ &cache_buf, &cache_size, &meta, &props);
+ if (ret == -1) {
+ flb_kube_prop_destroy(&props);
+ return FLB_FILTER_NOTOUCH;
+ }
+
+ if (props.parser != NULL) {
+ parser = flb_parser_get(props.parser, config);
+ }
+ pre = off;
+ }
+
/* get time and map */
time = root.via.array.ptr[0];
map = root.via.array.ptr[1];
@@ -451,14 +480,21 @@ static int cb_kube_filter(void *data, size_t bytes,
flb_free(cache_buf);
}
+ flb_kube_meta_release(&meta);
flb_kube_prop_destroy(&props);
return FLB_FILTER_NOTOUCH;
}
+
+ if (ctx->use_journal == FLB_TRUE) {
+ flb_kube_meta_release(&meta);
+ }
}
msgpack_unpacked_destroy(&result);
/* Release meta fields */
+ if (ctx->use_journal == FLB_FALSE) {
flb_kube_meta_release(&meta);
+ }
/* link new buffers */
*out_buf = tmp_sbuf.data;
|
Input script ctx generation more DRY | @@ -1684,12 +1684,12 @@ class ScriptBuilder {
this._addComment(`Input Script Attach`);
const scriptRef = this._compileSubScript("input", script);
const inputValue = inputDec(input);
- let ctx = inputValue.toString(2).padStart(8, "0").indexOf("1");
- if (ctx === -1) {
- ctx = 0;
+ let ctx = inputValue.toString(2).padStart(8, "0").indexOf("1") + 1;
+ if (ctx <= 0) {
+ ctx = 1;
}
- this._inputContextPrepare(scriptRef, ctx + 1);
- this._inputContextAttach(inputValue, ctx + 1);
+ this._inputContextPrepare(scriptRef, ctx);
+ this._inputContextAttach(inputValue, ctx);
this._addNL();
};
|
Update CMakeLists.txt
add "-Wno-format-truncation" to supress format truncation warning in cmake | @@ -100,6 +100,13 @@ include (CheckFunctionKeywords)
include (CheckIncludeFiles)
include (CheckTypeSize)
+# supress format-truncation warning
+include (CheckCCompilerFlag)
+check_c_compiler_flag(-Wno-format-truncation HAS_NO_FORMAT_TRUNCATION)
+if (HAS_NO_FORMAT_TRUNCATION)
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-format-truncation")
+endif()
+
if (MSVC)
# Turn off Microsofts "security" warnings.
add_definitions( "/W3 /D_CRT_SECURE_NO_WARNINGS /wd4005 /wd4996 /nologo" )
|
VERSION bump to version 1.3.49 | @@ -31,7 +31,7 @@ endif()
# micro version is changed with a set of small changes or bugfixes anywhere in the project.
set(SYSREPO_MAJOR_VERSION 1)
set(SYSREPO_MINOR_VERSION 3)
-set(SYSREPO_MICRO_VERSION 48)
+set(SYSREPO_MICRO_VERSION 49)
set(SYSREPO_VERSION ${SYSREPO_MAJOR_VERSION}.${SYSREPO_MINOR_VERSION}.${SYSREPO_MICRO_VERSION})
# Version of the library
|
snap: bump to v1.5.0 | name: fluent-bit
base: core18
-version: '1.4.0'
+version: '1.5.0'
summary: High performance logs and stream processor
description: |
Fluent Bit is a high performance log processor and stream processor for Linux.
|
Fix character positioning for colourlcd240x240 | @@ -394,7 +394,7 @@ mp_obj_t BreakoutColourLCD240x240_character(size_t n_args, const mp_obj_t *pos_a
breakout_colourlcd240x240_BreakoutColourLCD240x240_obj_t *self = MP_OBJ_TO_PTR2(args[ARG_self].u_obj, breakout_colourlcd240x240_BreakoutColourLCD240x240_obj_t);
int c = mp_obj_get_int(args[ARG_char].u_obj);
- int x = args[ARG_y].u_int;
+ int x = args[ARG_x].u_int;
int y = args[ARG_y].u_int;
Point p(x, y);
|
adding libtool-ltdl-devel to buildrequires | @@ -71,6 +71,7 @@ BuildRequires: rpm-build
BuildRequires: autoconf
BuildRequires: automake
BuildRequires: libtool
+BuildRequires: libtool-ltdl-devel
BuildRequires: hwloc-devel
BuildRequires: libX11-devel
BuildRequires: libXt-devel
|
snap: out_pgsql: Added support of pgsql plugin to snap package | @@ -14,6 +14,7 @@ confinement: 'strict'
apps:
fluent-bit:
command: bin/fluent-bit
+ daemon: simple
parts:
fluent-bit:
@@ -23,6 +24,7 @@ parts:
stage-packages:
- libsasl2-2
- libssl1.1
+ - libpq5
build-packages:
- g++
- make
@@ -32,9 +34,12 @@ parts:
- bison
- valgrind
- libssl-dev
+ - libpq5
+ - postgresql-server-dev-all
configflags:
- -DFLB_DEBUG=On
- -DFLB_OUT_KAFKA=On
- -DFLB_JEMALLOC=On
- -DFLB_EXAMPLES=OFF
- -DFLB_SHARED_LIB=Off
+ - -DFLB_OUT_PGSQL=On
|
[platform][rosco-m68k] change the tick rate to 100Hz
1000kHz is probably a bit much for this board. | // driver for a 68c681 acting as a dual uart and a system timer and a few gpios
// ticks in units of ms
+#define TICK_HZ (100U)
+#define TICK_MS (1000U / TICK_HZ)
+STATIC_ASSERT(TICK_HZ * TICK_MS == 1000U);
static volatile uint32_t ticks;
// periodic timer callback stuff
@@ -97,11 +100,11 @@ void duart_early_init(void) {
// TODO: set up UARTA again
// for now assume it's already configured
- // set up a periodic counter at 1khz
+ // set up a periodic counter at TICK_HZ
read_reg(DUART_REG_STC_R); // stop the counter
// compute the counter
- uint16_t count = 3686400 / 2 / 1000;
+ uint16_t count = 3686400 / 2 / TICK_HZ;
write_reg(DUART_REG_CTL_RW, count & 0xff);
write_reg(DUART_REG_CTU_RW, (count >> 8) & 0xff);
@@ -123,7 +126,6 @@ void duart_init(void) {
// enable uart RX irq
cached_imr |= (1<<1); // RXRDY/FFULLA
write_reg(DUART_REG_IMR_W, cached_imr);
-
}
status_t platform_set_periodic_timer(platform_timer_callback callback, void *arg, lk_time_t interval) {
@@ -142,7 +144,7 @@ enum handler_return duart_irq(void) {
uint8_t isr = read_reg(DUART_REG_ISR_R);
if (likely(isr & (1<<3))) { // counter #1 ready
- ticks++;
+ ticks += TICK_MS;
// ack the timer hardware
read_reg(DUART_REG_STC_R);
@@ -227,6 +229,8 @@ void target_set_debug_led(unsigned int led, bool on) {
case 1:
bit = 3; // red LED
break;
+ default:
+ return;
}
if (on) {
|
Fixed bug in WriteBodyDsemiDtEqtide. | @@ -1644,7 +1644,7 @@ void WriteBodyDsemiDtEqtide(BODY *body,CONTROL *control,OUTPUT *output,SYSTEM *s
iPert=1;
else
iPert=0;
- /* XXX Broken */
+ /* XXX Broken
if (control->Evolve.iEqtideModel == CPL)
*dTmp = fdCPLDsemiDtBody(body[iBody],body[iPert].dMass,body[1].dSemi,body[1].dEccSq);
@@ -1658,6 +1658,7 @@ void WriteBodyDsemiDtEqtide(BODY *body,CONTROL *control,OUTPUT *output,SYSTEM *s
*dTmp *= fdUnitsTime(units->iTime)/fdUnitsLength(units->iLength);
fsUnitsVel(units,cUnit);
}
+ */
}
void WriteBodyDeccDtEqtide(BODY *body,CONTROL *control,OUTPUT *output,SYSTEM *system,UNITS *units,UPDATE *update,int iBody,double *dTmp,char cUnit[]) {
|
Add const for relevant method parameters in redis-benchmark | @@ -971,7 +971,7 @@ static void startBenchmarkThreads() {
pthread_join(config.threads[i]->thread, NULL);
}
-static void benchmark(char *title, char *cmd, int len) {
+static void benchmark(const char *title, char *cmd, int len) {
client c;
config.title = title;
@@ -1680,7 +1680,7 @@ int showThroughput(struct aeEventLoop *eventLoop, long long id, void *clientData
/* Return true if the named test was selected using the -t command line
* switch, or if all the tests are selected (no -t passed by user). */
-int test_is_selected(char *name) {
+int test_is_selected(const char *name) {
char buf[256];
int l = strlen(name);
|
luax_readmesh supports Model again; | #include <stdlib.h>
#include <stdarg.h>
#include <string.h>
+#ifndef LOVR_DISABLE_GRAPHICS
+#include "data/modelData.h"
+#include "graphics/graphics.h"
+#endif
typedef void voidFn(void);
typedef void destructorFn(void*);
@@ -522,5 +526,16 @@ int luax_readmesh(lua_State* L, int index, float** vertices, uint32_t* vertexCou
return index + 2;
}
+#ifndef LOVR_DISABLE_GRAPHICS
+ Model* model = luax_totype(L, index, Model);
+
+ if (model) {
+ ModelData* modelData = lovrModelGetInfo(model)->data;
+ lovrModelDataGetTriangles(modelData, vertices, indices, vertexCount, indexCount);
+ *shouldFree = false;
+ return index + 1;
+ }
+#endif
+
return luaL_argerror(L, index, "table or Model");
}
|
BugID:17132007: fix the problem of UNINIT white scan in LoRaMac.c | @@ -3084,7 +3084,7 @@ LoRaMacStatus_t LoRaMacMcpsRequest(McpsReq_t *mcpsRequest)
uint8_t fPort = 0;
void *fBuffer;
uint16_t fBufferSize;
- int8_t datarate;
+ int8_t datarate = 0;
bool readyToSend = false;
if (mcpsRequest == NULL) {
|
Update README: GNUPLOT's "sixel" driver is renamed as "sixeltek" | @@ -1345,7 +1345,7 @@ We are greatly inspired by the quality of ImageMagick and added some resampling
- [GNUPLOT](http://www.gnuplot.info/)
- Recent version of GNUPLOT supports new terminal driver "sixel" / "sixelgd".
+ Recent version of GNUPLOT supports new terminal driver "sixeltek(sixel)" / "sixelgd".

|
[bsp] mini optimized the drv_xpt2046.c
error device name xpt2049 in pi4 , so changed xpt2049 -> xpt2046. | #define DBG_LVL DBG_INFO
#include <rtdbg.h>
-//XPT2049
+//XPT2046
#define READ_X (0xD0)
#define READ_Y (0x90)
@@ -218,7 +218,7 @@ static struct rt_touch_ops touch_ops =
.touch_control = xpt2046_control,
};
-static int hw_xpt2049_touch_init(void)
+static int hw_xpt2046_touch_init(void)
{
//touch sem
rt_sem_init(&touch_ack, "touch_ack", 0, RT_IPC_FLAG_FIFO);
@@ -249,4 +249,4 @@ static int hw_xpt2049_touch_init(void)
return 0;
}
-INIT_DEVICE_EXPORT(hw_xpt2049_touch_init);
+INIT_DEVICE_EXPORT(hw_xpt2046_touch_init);
|
board/servo_v4/usb_pd_config.h: Format with clang-format
BRANCH=none
TEST=none | @@ -193,13 +193,23 @@ static inline void pd_select_polarity(int port, int polarity)
if (port == 0) {
/* CHG use the right comparator inverted input for COMP2 */
STM32_COMP_CSR = (val & ~STM32_COMP_CMP2INSEL_MASK) |
- (polarity ? STM32_COMP_CMP2INSEL_INM4 /* PA4: C0_CC2 */
- : STM32_COMP_CMP2INSEL_INM6);/* PA2: C0_CC1 */
+ (polarity ? STM32_COMP_CMP2INSEL_INM4 /* PA4:
+ C0_CC2
+ */
+ :
+ STM32_COMP_CMP2INSEL_INM6); /* PA2:
+ C0_CC1
+ */
} else {
/* DUT use the right comparator inverted input for COMP1 */
STM32_COMP_CSR = (val & ~STM32_COMP_CMP1INSEL_MASK) |
- (polarity ? STM32_COMP_CMP1INSEL_INM5 /* PA5: C1_CC2 */
- : STM32_COMP_CMP1INSEL_INM6);/* PA0: C1_CC1 */
+ (polarity ? STM32_COMP_CMP1INSEL_INM5 /* PA5:
+ C1_CC2
+ */
+ :
+ STM32_COMP_CMP1INSEL_INM6); /* PA0:
+ C1_CC1
+ */
}
}
@@ -279,7 +289,6 @@ static inline void pd_config_init(int port, uint8_t power_role)
/* Initialize TX pins and put them in Hi-Z */
pd_tx_init();
-
}
int pd_adc_read(int port, int cc);
|
conffile: allocate ciphers and suites on router slab
string values are allocated on a parser slab which is freed after the
parser finishes, so copy cipher and suite to the router slab so it
remains available with the router. | @@ -983,13 +983,15 @@ transport_ssl_protover: crSSL3 { $$ = _rp_SSL3; }
| crTLS1_2 { $$ = _rp_TLS1_2; }
| crTLS1_3 { $$ = _rp_TLS1_3; }
;
-transport_opt_ssl_ciphers: { $$ = NULL; }
- | crCIPHERS crSTRING[ciphers] { $$ = $ciphers; }
+transport_opt_ssl_ciphers:
+ { $$ = NULL; }
+ | crCIPHERS crSTRING[ciphers]
+ { $$ = ra_strdup(ralloc, $ciphers); }
;
transport_opt_ssl_ciphersuites:
{ $$ = NULL; }
| crCIPHERSUITES crSTRING[suites]
- { $$ = $suites; }
+ { $$ = ra_strdup(ralloc, $suites); }
;
transport_mode_trans: crTRANSPORT crPLAIN
|
meta: test for autovivify + quiet flag | @@ -364,6 +364,11 @@ my $sock = $server->sock;
like(scalar <$sock>, qr/^VA 2/, "get response");
like(scalar <$sock>, qr/^bo/, "get value");
like(scalar <$sock>, qr/^MN/, "end token");
+
+ # "quiet" won't do anything with autoviv, since the only case (miss)
+ # should return data anyway.
+ print $sock "mg quietautov s N30 t q\r\n";
+ like(scalar <$sock>, qr/^OK s0/, "quiet doesn't override autovivication");
}
{
|
[snitch] Clean up remaining mentions of const cache | // Author: Florian Zaruba <zarubaf@iis.ee.ethz.ch>
// Samuel Riedel <sriedel@iis.ee.ethz.ch>
-/// Serve read memory requests from a constant cache.
+/// Serve read memory requests from a read-only cache.
/// The cacheable region can be runtime configured. All writes and read
/// requests outside the configured regions will be forwarded.
module snitch_read_only_cache #(
@@ -49,13 +49,13 @@ module snitch_read_only_cache #(
// Check for supported parameters
if (AxiDataWidth < 32)
- $error("snitch_const_cache: AxiDataWidth must be larger than 32.");
+ $error("snitch_read_only_cache: AxiDataWidth must be larger than 32.");
if (AxiDataWidth > LineWidth)
- $error("snitch_const_cache: LineWidth must be larger than/equal to AxiDataWidth.");
+ $error("snitch_read_only_cache: LineWidth must be larger than/equal to AxiDataWidth.");
if (NrAddrRules < 1)
- $error("snitch_const_cache: NrAddrRules must be larger than/equal to 1.");
+ $error("snitch_read_only_cache: NrAddrRules must be larger than/equal to 1.");
if (MaxTrans < 1)
- $error("snitch_const_cache: MaxTrans must be larger than/equal to 1.");
+ $error("snitch_read_only_cache: MaxTrans must be larger than/equal to 1.");
// --------------------------------------------------
// AXI Demux
|
xpath BUGFIX handle both set node types
Fixes | @@ -5141,6 +5141,8 @@ xpath_true(struct lyxp_set **UNUSED(args), uint16_t UNUSED(arg_count), struct ly
/**
* @brief Skip prefix and return corresponding model if there is a prefix. Logs directly.
*
+ * XPath @p set is expected to be a (sc)node set!
+ *
* @param[in,out] qname Qualified node name. If includes prefix, it is skipped.
* @param[in,out] qname_len Length of @p qname, is updated accordingly.
* @param[in] set Set with XPath context.
@@ -5155,6 +5157,8 @@ moveto_resolve_model(const char **qname, uint16_t *qname_len, struct lyxp_set *s
int pref_len;
char *str;
+ assert((set->type == LYXP_SET_NODE_SET) || (set->type == LYXP_SET_SCNODE_SET));
+
if ((ptr = ly_strnchr(*qname, ':', *qname_len))) {
/* specific module */
pref_len = ptr - *qname;
@@ -5176,7 +5180,11 @@ moveto_resolve_model(const char **qname, uint16_t *qname_len, struct lyxp_set *s
/* Check for errors and non-implemented modules, as they are not valid */
if (!mod || !mod->implemented) {
+ if (set->type == LYXP_SET_SCNODE_SET) {
+ LOGVAL(set->ctx, LY_VLOG_LYSC, set->ctx_scnode, LY_VCODE_XP_INMOD, pref_len, *qname);
+ } else {
LOGVAL(set->ctx, LY_VLOG_LYD, set->ctx_node, LY_VCODE_XP_INMOD, pref_len, *qname);
+ }
return LY_EVALID;
}
|
Add onDrop handler to NmeApplication | @@ -131,6 +131,10 @@ class NmeApplication implements IAppEventHandler implements IPollClient
{
}
+ public function onDrop(event:AppEvent):Void
+ {
+ }
+
public function onUnhandledException(exception:Dynamic, stack:Array<StackItem>):Void
{
trace("Exception: " + exception+"\n" + haxe.CallStack.toString(stack));
|
Make sure that -fPIC is present when needed
override user-provided FFLAGS if necessary | @@ -1154,8 +1154,6 @@ ifndef FCOMMON_OPT
FCOMMON_OPT = -O2 -frecursive
endif
-
-
override CFLAGS += $(COMMON_OPT) $(CCOMMON_OPT) -I$(TOPDIR)
override PFLAGS += $(COMMON_OPT) $(CCOMMON_OPT) -I$(TOPDIR) -DPROFILE $(COMMON_PROF)
@@ -1163,6 +1161,12 @@ override FFLAGS += $(COMMON_OPT) $(FCOMMON_OPT)
override FPFLAGS += $(FCOMMON_OPT) $(COMMON_PROF)
#MAKEOVERRIDES =
+ifdef NEED_PIC
+ifeq (,$(findstring PIC,$(FFLAGS)))
+override FFLAGS += -fPIC
+endif
+endif
+
#For LAPACK Fortran codes.
#Disable -fopenmp for LAPACK Fortran codes on Windows.
ifdef OS_WINDOWS
|
Minor cleanup in `IDataInspector` | @@ -69,7 +69,7 @@ namespace MAT_NS_BEGIN
/// Returns unique name for current Data Inspector
/// </summary>
/// <returns>Name of Data Inspector</returns>
- virtual const char* GetName() const noexcept { return ""; }
+ virtual const char* GetName() const noexcept = 0;
};
}
|
fix changelog maintainer email | @@ -342,4 +342,4 @@ oidc-agent (3.3.2) UNRELEASED; urgency=medium
* Add aai-demo.egi.eu
* Add public client for aai-demo.egi.eu
- -- Marcus Hardt <hardt@kit.edu> Thu, 12 Mar 2020 08:23:13 +0200
+ -- Marcus Hardt <packages@lists.kit.edu> Thu, 12 Mar 2020 08:23:13 +0200
|
Update CompanionOwnerID offset | @@ -43,7 +43,7 @@ namespace FFXIVClientStructs.FFXIV.Client.Game.Character
[FieldOffset(0x1940)] public uint NameID;
- [FieldOffset(0x1950)] public uint CompanionOwnerID;
+ [FieldOffset(0x19A8)] public uint CompanionOwnerID;
[FieldOffset(0x195C)] public ushort CurrentWorld;
[FieldOffset(0x195E)] public ushort HomeWorld;
[FieldOffset(0x197F)] public byte Icon;
|
linux-raspberrypi: Drop CONFIG_OABI_COMPAT completely
There's no reason to support the old, outdated ARM OABI when upstream
linux-raspberrypi doesn't support this by default. | @@ -32,9 +32,6 @@ CMDLINE_append += ' ${@oe.utils.conditional("DISABLE_RPI_BOOT_LOGO", "1", "logo.
CMDLINE_DEBUG ?= ""
CMDLINE_append = " ${CMDLINE_DEBUG}"
-# Enable OABI compat for people stuck with obsolete userspace
-ARM_KEEP_OABI ?= "1"
-
KERNEL_INITRAMFS ?= '${@oe.utils.conditional("INITRAMFS_IMAGE_BUNDLE", "1", "1", "", d)}'
KERNEL_MODULE_AUTOLOAD += "${@bb.utils.contains("MACHINE_FEATURES", "pitft28r", "stmpe-ts", "", d)}"
@@ -98,11 +95,6 @@ do_configure_prepend() {
mv -f ${B}/.config ${B}/.config.patched
CONF_SED_SCRIPT=""
- # oabi / eabi support
- if [ "${ARM_KEEP_OABI}" = "1" ] ; then
- kernel_configure_variable OABI_COMPAT y
- fi
-
# Localversion
kernel_configure_variable LOCALVERSION "\"\""
|
VERSION bump to version 2.0.77 | @@ -58,7 +58,7 @@ set (CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR})
# set version of the project
set(LIBYANG_MAJOR_VERSION 2)
set(LIBYANG_MINOR_VERSION 0)
-set(LIBYANG_MICRO_VERSION 76)
+set(LIBYANG_MICRO_VERSION 77)
set(LIBYANG_VERSION ${LIBYANG_MAJOR_VERSION}.${LIBYANG_MINOR_VERSION}.${LIBYANG_MICRO_VERSION})
# set version of the library
set(LIBYANG_MAJOR_SOVERSION 2)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.