{"diff_hunk":"@@ -163,6 +163,9 @@ int\n main(int argc, char **argv)\n {\n test_query(SIGTERM);\n+#if !defined(MACOS)\n+ test_rt_sigprocmask();\n+#endif\n #if !defined(MACOS) && !defined(X64)\n test_non_rt_sigaction(SIGPIPE);\n #endif","source_code":"\/* **********************************************************\n * Copyright (c) 2015-2016 Google, Inc. All rights reserved.\n * **********************************************************\/\n\n\/*\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are met:\n *\n * * Redistributions of source code must retain the above copyright notice,\n * this list of conditions and the following disclaimer.\n *\n * * Redistributions in binary form must reproduce the above copyright notice,\n * this list of conditions and the following disclaimer in the documentation\n * and\/or other materials provided with the distribution.\n *\n * * Neither the name of Google, Inc. nor the names of its contributors may be\n * used to endorse or promote products derived from this software without\n * specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n * ARE DISCLAIMED. IN NO EVENT SHALL GOOGLE, INC. OR CONTRIBUTORS BE LIABLE\n * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\n * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH\n * DAMAGE.\n *\/\n\n\/*\n * test of sigaction\n *\/\n#include \"tools.h\"\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n#define SENTINEL 0x12345678UL\n\n#if !defined(MACOS) && !defined(X64)\ntypedef struct old_sigaction_t {\n void (*handler)(int, siginfo_t *, void *);\n unsigned int sa_mask;\n unsigned long sa_flags;\n void (*sa_restorer)(void);\n} old_sigaction_t;\n#endif\n\nstatic void\ntest_query(int sig)\n{\n \/* i#1984: test that the prior action is returned *\/\n int rc;\n struct sigaction first_act;\n struct sigaction new_act;\n struct sigaction old_act;\n memset((void *)&first_act, 0, sizeof(first_act));\n first_act.sa_sigaction = (void (*)(int, siginfo_t *, void *))SENTINEL;\n sigemptyset(&first_act.sa_mask);\n sigaddset(&first_act.sa_mask, SIGUSR1);\n sigaddset(&first_act.sa_mask, SIGUSR2);\n rc = sigaction(sig, &first_act, NULL);\n assert(rc == 0);\n\n \/* Test with nothing. *\/\n rc = sigaction(sig, NULL, NULL);\n assert(rc == 0);\n\n \/* Test without a new action. *\/\n memset((void *)&old_act, 0xff, sizeof(old_act));\n rc = sigaction(sig, NULL, &old_act);\n assert(rc == 0 && old_act.sa_sigaction == first_act.sa_sigaction &&\n \/* The flags do not match due to SA_RESTORER. *\/\n \/* The rest of mask is uninit stack values from the libc wrapper. *\/\n *(long *)&old_act.sa_mask == *(long *)&first_act.sa_mask);\n\n \/* Test with a new action. *\/\n memset((void *)&old_act, 0xff, sizeof(old_act));\n memset((void *)&new_act, 0, sizeof(new_act));\n new_act.sa_sigaction = (void (*)(int, siginfo_t *, void *))SIG_IGN;\n sigemptyset(&new_act.sa_mask);\n rc = sigaction(sig, &new_act, &old_act);\n assert(rc == 0 && old_act.sa_sigaction == first_act.sa_sigaction &&\n \/* The flags do not match due to SA_RESTORER. *\/\n \/* The rest of mask is uninit stack values from the libc wrapper. *\/\n *(long *)&old_act.sa_mask == *(long *)&first_act.sa_mask);\n\n \/* Test pattern from i#1984 issue and ensure no assert. *\/\n memset(&new_act, 0, sizeof(new_act));\n memset(&old_act, 0, sizeof(old_act));\n new_act.sa_sigaction = (void (*)(int, siginfo_t *, void *))SENTINEL;\n sigaction(SIGINT, &new_act, 0);\n sigaction(SIGINT, &new_act, &old_act);\n new_act.sa_handler = SIG_IGN;\n sigaction(SIGTSTP, &new_act, &old_act);\n}\n\nstatic void\nset_sigaction_handler(int sig, void *action)\n{\n int rc;\n struct sigaction act;\n memset((void *)&act, 0, sizeof(act));\n act.sa_sigaction = (void (*)(int, siginfo_t *, void *))action;\n \/* Arm the signal. *\/\n rc = sigaction(sig, &act, NULL);\n assert(rc == 0);\n}\n\n#if !defined(MACOS) && !defined(X64)\nstatic void\ntest_non_rt_sigaction(int sig)\n{\n int rc;\n old_sigaction_t first_act;\n old_sigaction_t new_act;\n old_sigaction_t old_act;\n memset((void *)&first_act, 0, sizeof(first_act));\n first_act.handler = (void (*)(int, siginfo_t *, void *))SENTINEL;\n first_act.sa_mask |= (1 << SIGUSR1);\n first_act.sa_mask |= (1 << SIGUSR2);\n rc = dynamorio_syscall(SYS_sigaction, 3, sig, &first_act, NULL);\n assert(rc == 0);\n\n \/* Test with nothing. *\/\n rc = dynamorio_syscall(SYS_sigaction, 3, sig, NULL, NULL);\n assert(rc == 0);\n\n \/* Test passing NULL to non-rt sigaction, which is used on Android (i#1822) *\/\n memset((void *)&old_act, 0xff, sizeof(old_act));\n rc = dynamorio_syscall(SYS_sigaction, 3, sig, NULL, &old_act);\n assert(rc == 0 && old_act.handler == first_act.handler &&\n \/* The flags do not match due to SA_RESTORER. *\/\n \/* The rest of mask is uninit stack values from the libc wrapper. *\/\n *(long *)&old_act.sa_mask == *(long *)&first_act.sa_mask);\n\n \/* Test with a new action. *\/\n memset((void *)&old_act, 0xff, sizeof(old_act));\n memset((void *)&new_act, 0, sizeof(new_act));\n new_act.handler = (void (*)(int, siginfo_t *, void *))SIG_IGN;\n rc = dynamorio_syscall(SYS_sigaction, 3, sig, &new_act, &old_act);\n assert(rc == 0 && old_act.handler == first_act.handler &&\n \/* The flags do not match due to SA_RESTORER. *\/\n \/* The rest of mask is uninit stack values from the libc wrapper. *\/\n *(long *)&old_act.sa_mask == *(long *)&first_act.sa_mask);\n\n \/* Clear handler *\/\n memset((void *)&new_act, 0, sizeof(new_act));\n rc = dynamorio_syscall(SYS_sigaction, 3, sig, &new_act, NULL);\n assert(rc == 0);\n}\n#endif\n\nint\nmain(int argc, char **argv)\n{\n test_query(SIGTERM);\n#if !defined(MACOS) && !defined(X64)\n test_non_rt_sigaction(SIGPIPE);\n#endif\n set_sigaction_handler(SIGTERM, (void *)SIG_IGN);\n print(\"Sending SIGTERM first time\\n\");\n kill(getpid(), SIGTERM);\n set_sigaction_handler(SIGTERM, (void *)SIG_DFL);\n print(\"Sending SIGTERM second time\\n\");\n kill(getpid(), SIGTERM);\n print(\"Should not be reached\\n\");\n}\n","lang_cluster":"C","diff_tag":0,"review_comment":"","id":1} {"diff_hunk":"@@ -147,17 +147,12 @@ class TestCtu(unittest.TestCase):\n for arch in glob.glob(os.path.join(ctu_dir, '*')):\n fn_map_file = os.path.join(ctu_dir, arch, 'externalFnMap.txt')\n self.assertTrue(os.path.isfile(fn_map_file))\n- if not reparse:\n- ast_dir = os.path.join(ctu_dir, arch, 'ast')\n- self.assertTrue(os.path.isdir(ast_dir))\n \n- def __do_ctu_analyze(self, reparse):\n+ def __do_ctu_analyze(self):\n \"\"\" Execute CTU analyze phase. \"\"\"\n \n cmd = [self._codechecker_cmd, 'analyze', '-o', self.report_dir,\n '--analyzers', 'clangsa', '--ctu-analyze']\n- if reparse:\n- cmd.append('--ctu-on-the-fly')\n cmd.append(self.buildlog)\n out, _ = call_command(cmd, cwd=self.test_dir, env=self.env)\n return out","source_code":"#\n# -----------------------------------------------------------------------------\n# The CodeChecker Infrastructure\n# This file is distributed under the University of Illinois Open Source\n# License. See LICENSE.TXT for details.\n# -----------------------------------------------------------------------------\n\"\"\" CTU function test.\"\"\"\n\nimport glob\nimport json\nimport os\nimport shutil\nimport unittest\n\nfrom libtest import env\nfrom libtest.codechecker import call_command\n\nNO_CTU_MESSAGE = \"CTU is not supported\"\n\n\nclass TestCtu(unittest.TestCase):\n \"\"\" Test CTU functionality. \"\"\"\n\n def setUp(self):\n \"\"\" Set up workspace.\"\"\"\n\n # TEST_WORKSPACE is automatically set by test package __init__.py .\n self.test_workspace = os.environ['TEST_WORKSPACE']\n\n test_class = self.__class__.__name__\n print('Running ' + test_class + ' tests in ' + self.test_workspace)\n\n # Get the CodeChecker cmd if needed for the tests.\n self._codechecker_cmd = env.codechecker_cmd()\n self.env = env.codechecker_env()\n self.report_dir = os.path.join(self.test_workspace, 'reports')\n os.makedirs(self.report_dir)\n self.test_dir = os.path.join(os.path.dirname(__file__), 'test_files')\n\n # Get if clang is CTU-capable or not.\n cmd = [self._codechecker_cmd, 'analyze', '-h']\n output, _ = call_command(cmd, cwd=self.test_dir, env=self.env)\n self.ctu_capable = '--ctu-' in output\n print(\"'analyze' reported CTU-compatibility? \" + str(self.ctu_capable))\n\n # Fix the \"template\" build JSONs to contain a proper directory\n # so the tests work.\n raw_buildlog = os.path.join(self.test_dir, 'buildlog.json')\n with open(raw_buildlog) as log_file:\n build_json = json.load(log_file)\n for command in build_json:\n command['directory'] = self.test_dir\n\n self.__old_pwd = os.getcwd()\n os.chdir(self.test_workspace)\n self.buildlog = os.path.join(self.test_workspace, 'buildlog.json')\n with open(self.buildlog, 'w') as log_file:\n json.dump(build_json, log_file)\n\n def tearDown(self):\n \"\"\" Tear down workspace.\"\"\"\n\n shutil.rmtree(self.report_dir, ignore_errors=True)\n os.chdir(self.__old_pwd)\n\n def test_ctu_all_no_reparse(self):\n \"\"\" Test full CTU without reparse. \"\"\"\n\n self.__test_ctu_all(False)\n\n def test_ctu_collect_no_reparse(self):\n \"\"\" Test CTU collect phase without reparse. \"\"\"\n\n self.__test_ctu_collect(False)\n\n def test_ctu_analyze_no_reparse(self):\n \"\"\" Test CTU analyze phase without reparse. \"\"\"\n\n self.__test_ctu_analyze(False)\n\n def test_ctu_all_reparse(self):\n \"\"\" Test full CTU with reparse. \"\"\"\n\n self.__test_ctu_all(True)\n\n def test_ctu_collect_reparse(self):\n \"\"\" Test CTU collect phase with reparse. \"\"\"\n\n self.__test_ctu_collect(True)\n\n def test_ctu_analyze_reparse(self):\n \"\"\" Test CTU analyze phase with reparse. \"\"\"\n\n self.__test_ctu_analyze(True)\n\n def __test_ctu_all(self, reparse):\n \"\"\" Test full CTU. \"\"\"\n\n if not self.ctu_capable:\n self.skipTest(NO_CTU_MESSAGE)\n output = self.__do_ctu_all(reparse)\n self.__check_ctu_analyze(output)\n\n def __test_ctu_collect(self, reparse):\n \"\"\" Test CTU collect phase. \"\"\"\n\n if not self.ctu_capable:\n self.skipTest(NO_CTU_MESSAGE)\n self.__do_ctu_collect(reparse)\n self.__check_ctu_collect(reparse)\n\n def __test_ctu_analyze(self, reparse):\n \"\"\" Test CTU analyze phase. \"\"\"\n\n if not self.ctu_capable:\n self.skipTest(NO_CTU_MESSAGE)\n self.__do_ctu_collect(reparse)\n output = self.__do_ctu_analyze(reparse)\n self.__check_ctu_analyze(output)\n\n def __do_ctu_all(self, reparse):\n \"\"\" Execute a full CTU run. \"\"\"\n\n cmd = [self._codechecker_cmd, 'analyze', '-o', self.report_dir,\n '--analyzers', 'clangsa', '--ctu-all']\n if reparse:\n cmd.append('--ctu-on-the-fly')\n cmd.append(self.buildlog)\n out, _ = call_command(cmd, cwd=self.test_dir, env=self.env)\n return out\n\n def __do_ctu_collect(self, reparse):\n \"\"\" Execute CTU collect phase. \"\"\"\n\n cmd = [self._codechecker_cmd, 'analyze', '-o', self.report_dir,\n '--analyzers', 'clangsa', '--ctu-collect']\n if reparse:\n cmd.append('--ctu-on-the-fly')\n cmd.append(self.buildlog)\n call_command(cmd, cwd=self.test_dir, env=self.env)\n\n def __check_ctu_collect(self, reparse):\n \"\"\" Check artifacts of CTU collect phase. \"\"\"\n\n ctu_dir = os.path.join(self.report_dir, 'ctu-dir')\n self.assertTrue(os.path.isdir(ctu_dir))\n for arch in glob.glob(os.path.join(ctu_dir, '*')):\n fn_map_file = os.path.join(ctu_dir, arch, 'externalFnMap.txt')\n self.assertTrue(os.path.isfile(fn_map_file))\n if not reparse:\n ast_dir = os.path.join(ctu_dir, arch, 'ast')\n self.assertTrue(os.path.isdir(ast_dir))\n\n def __do_ctu_analyze(self, reparse):\n \"\"\" Execute CTU analyze phase. \"\"\"\n\n cmd = [self._codechecker_cmd, 'analyze', '-o', self.report_dir,\n '--analyzers', 'clangsa', '--ctu-analyze']\n if reparse:\n cmd.append('--ctu-on-the-fly')\n cmd.append(self.buildlog)\n out, _ = call_command(cmd, cwd=self.test_dir, env=self.env)\n return out\n\n def __check_ctu_analyze(self, output):\n \"\"\" Check artifacts of CTU analyze phase. \"\"\"\n\n self.assertNotIn(\"Failed to analyze\", output)\n self.assertIn(\"analyzed lib.c successfully\", output)\n self.assertIn(\"analyzed main.c successfully\", output)\n\n cmd = [self._codechecker_cmd, 'parse', self.report_dir]\n output, _ = call_command(cmd, cwd=self.test_dir, env=self.env)\n self.assertIn(\"no defects while analyzing lib.c\", output)\n self.assertIn(\"defect(s) while analyzing main.c\", output)\n self.assertIn(\"lib.c:3:\", output)\n self.assertIn(\"[core.NullDereference]\", output)\n","lang_cluster":"C","diff_tag":0,"review_comment":"","id":2} {"diff_hunk":"@@ -4,6 +4,7 @@\n #include \"..\/mem\/pool.h\"\n #include \"ponyassert.h\"\n #include \n+#include \n \n #ifdef USE_VALGRIND\n #include ","source_code":"#define PONY_WANT_ATOMIC_DEFS\n\n#include \"messageq.h\"\n#include \"..\/mem\/pool.h\"\n#include \"ponyassert.h\"\n#include \n\n#ifdef USE_VALGRIND\n#include \n#endif\n\n#ifndef NDEBUG\n\nstatic size_t messageq_size_debug(messageq_t* q)\n{\n pony_msg_t* tail = q->tail;\n size_t count = 0;\n\n while(atomic_load_explicit(&tail->next, memory_order_relaxed) != NULL)\n {\n count++;\n tail = atomic_load_explicit(&tail->next, memory_order_relaxed);\n }\n\n return count;\n}\n\n#endif\n\nvoid ponyint_messageq_init(messageq_t* q)\n{\n pony_msg_t* stub = POOL_ALLOC(pony_msg_t);\n stub->index = POOL_INDEX(sizeof(pony_msg_t));\n atomic_store_explicit(&stub->next, NULL, memory_order_relaxed);\n\n atomic_store_explicit(&q->head, (pony_msg_t*)((uintptr_t)stub | 1),\n memory_order_relaxed);\n q->tail = stub;\n\n#ifndef NDEBUG\n messageq_size_debug(q);\n#endif\n}\n\nvoid ponyint_messageq_destroy(messageq_t* q)\n{\n pony_msg_t* tail = q->tail;\n pony_assert((((uintptr_t)atomic_load_explicit(&q->head, memory_order_relaxed) &\n ~(uintptr_t)1)) == (uintptr_t)tail);\n#ifdef USE_VALGRIND\n ANNOTATE_HAPPENS_BEFORE_FORGET_ALL(tail);\n#endif\n\n ponyint_pool_free(tail->index, tail);\n atomic_store_explicit(&q->head, NULL, memory_order_relaxed);\n q->tail = NULL;\n}\n\nbool ponyint_messageq_push(messageq_t* q, pony_msg_t* first, pony_msg_t* last)\n{\n atomic_store_explicit(&last->next, NULL, memory_order_relaxed);\n\n \/\/ Without that fence, the store to last->next above could be reordered after\n \/\/ the exchange on the head and after the store to prev->next done by the\n \/\/ next push, which would result in the pop incorrectly seeing the queue as\n \/\/ empty.\n \/\/ Also synchronise with the pop on prev->next.\n atomic_thread_fence(memory_order_release);\n\n pony_msg_t* prev = atomic_exchange_explicit(&q->head, last,\n memory_order_relaxed);\n\n bool was_empty = ((uintptr_t)prev & 1) != 0;\n prev = (pony_msg_t*)((uintptr_t)prev & ~(uintptr_t)1);\n\n#ifdef USE_VALGRIND\n \/\/ Double fence with Valgrind since we need to have prev in scope for the\n \/\/ synchronisation annotation.\n ANNOTATE_HAPPENS_BEFORE(&prev->next);\n atomic_thread_fence(memory_order_release);\n#endif\n atomic_store_explicit(&prev->next, first, memory_order_relaxed);\n\n return was_empty;\n}\n\nbool ponyint_messageq_push_single(messageq_t* q, pony_msg_t* first,\n pony_msg_t* last)\n{\n atomic_store_explicit(&last->next, NULL, memory_order_relaxed);\n\n \/\/ If we have a single producer, the swap of the head need not be atomic RMW.\n pony_msg_t* prev = atomic_load_explicit(&q->head, memory_order_relaxed);\n atomic_store_explicit(&q->head, last, memory_order_relaxed);\n\n bool was_empty = ((uintptr_t)prev & 1) != 0;\n prev = (pony_msg_t*)((uintptr_t)prev & ~(uintptr_t)1);\n\n \/\/ If we have a single producer, the fence can be replaced with a store\n \/\/ release on prev->next.\n#ifdef USE_VALGRIND\n ANNOTATE_HAPPENS_BEFORE(&prev->next);\n#endif\n atomic_store_explicit(&prev->next, first, memory_order_release);\n\n return was_empty;\n}\n\npony_msg_t* ponyint_messageq_pop(messageq_t* q)\n{\n pony_msg_t* tail = q->tail;\n pony_msg_t* next = atomic_load_explicit(&tail->next, memory_order_relaxed);\n\n if(next != NULL)\n {\n q->tail = next;\n atomic_thread_fence(memory_order_acquire);\n#ifdef USE_VALGRIND\n ANNOTATE_HAPPENS_AFTER(&tail->next);\n ANNOTATE_HAPPENS_BEFORE_FORGET_ALL(tail);\n#endif\n ponyint_pool_free(tail->index, tail);\n }\n\n return next;\n}\n\nbool ponyint_messageq_markempty(messageq_t* q)\n{\n pony_msg_t* tail = q->tail;\n pony_msg_t* head = atomic_load_explicit(&q->head, memory_order_relaxed);\n\n if(((uintptr_t)head & 1) != 0)\n return true;\n\n if(head != tail)\n return false;\n\n head = (pony_msg_t*)((uintptr_t)head | 1);\n\n#ifdef USE_VALGRIND\n ANNOTATE_HAPPENS_BEFORE(&q->head);\n#endif\n return atomic_compare_exchange_strong_explicit(&q->head, &tail, head,\n memory_order_release, memory_order_relaxed);\n}\n","lang_cluster":"C","diff_tag":0,"review_comment":"","id":3} {"diff_hunk":"@@ -16,6 +16,7 @@\n * limitations under the License.\n *\/\n #include \"stackdriver.h\"\n+#include \"stackdriver_helper.h\"\n #include \"stackdriver_operation.h\"\n \n typedef enum {","source_code":"\/* Fluent Bit\n * ==========\n * Copyright (C) 2019-2020 The Fluent Bit Authors\n * Copyright (C) 2015-2018 Treasure Data Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n#include \"stackdriver.h\"\n#include \"stackdriver_operation.h\"\n\ntypedef enum {\n NO_OPERATION = 1,\n OPERATION_EXISTED = 2\n} operation_status;\n\n\nvoid add_operation_field(flb_sds_t *operation_id, flb_sds_t *operation_producer, \n int *operation_first, int *operation_last, \n msgpack_packer *mp_pck)\n{ \n msgpack_pack_str(mp_pck, 9);\n msgpack_pack_str_body(mp_pck, \"operation\", 9);\n msgpack_pack_map(mp_pck, 4);\n msgpack_pack_str(mp_pck, 2);\n msgpack_pack_str_body(mp_pck, \"id\", 2);\n msgpack_pack_str(mp_pck, flb_sds_len(*operation_id));\n msgpack_pack_str_body(mp_pck, *operation_id, flb_sds_len(*operation_id));\n msgpack_pack_str(mp_pck, 8);\n msgpack_pack_str_body(mp_pck, \"producer\", 8);\n msgpack_pack_str(mp_pck, flb_sds_len(*operation_producer));\n msgpack_pack_str_body(mp_pck, *operation_producer, flb_sds_len(*operation_producer));\n msgpack_pack_str(mp_pck, 5);\n msgpack_pack_str_body(mp_pck, \"first\", 5);\n if (*operation_first == FLB_TRUE) {\n msgpack_pack_true(mp_pck);\n }\n else {\n msgpack_pack_false(mp_pck);\n }\n \n msgpack_pack_str(mp_pck, 4);\n msgpack_pack_str_body(mp_pck, \"last\", 4);\n if (*operation_last == FLB_TRUE) {\n msgpack_pack_true(mp_pck);\n }\n else {\n msgpack_pack_false(mp_pck);\n }\n}\n\n\/* Return true if operation extracted *\/\nint extract_operation(flb_sds_t *operation_id, flb_sds_t *operation_producer, \n int *operation_first, int *operation_last, \n msgpack_object *obj, int *extra_subfields)\n{\n operation_status op_status = NO_OPERATION;\n\n if (obj->via.map.size != 0) { \t\n msgpack_object_kv *p = obj->via.map.ptr;\n msgpack_object_kv *const pend = obj->via.map.ptr + obj->via.map.size;\n\n for (; p < pend && op_status == NO_OPERATION; ++p) {\n if (p->val.type == MSGPACK_OBJECT_MAP && p->key.type == MSGPACK_OBJECT_STR\n && strncmp(OPERATION_FIELD_IN_JSON, p->key.via.str.ptr, p->key.via.str.size) == 0) {\n \n op_status = OPERATION_EXISTED;\n msgpack_object sub_field = p->val;\n \n msgpack_object_kv *tmp_p = sub_field.via.map.ptr;\n msgpack_object_kv *const tmp_pend = sub_field.via.map.ptr + sub_field.via.map.size;\n\n \/* Validate the subfields of operation *\/\n for (; tmp_p < tmp_pend; ++tmp_p) {\n if (tmp_p->key.type != MSGPACK_OBJECT_STR) {\n continue;\n }\n if (strncmp(\"id\", tmp_p->key.via.str.ptr, tmp_p->key.via.str.size) == 0) {\n if (tmp_p->val.type != MSGPACK_OBJECT_STR) {\n continue;\n }\n *operation_id = flb_sds_copy(*operation_id, tmp_p->val.via.str.ptr, tmp_p->val.via.str.size);\n }\n else if (strncmp(\"producer\", tmp_p->key.via.str.ptr, tmp_p->key.via.str.size) == 0) {\n if (tmp_p->val.type != MSGPACK_OBJECT_STR) {\n continue;\n }\n *operation_producer = flb_sds_copy(*operation_producer, tmp_p->val.via.str.ptr, tmp_p->val.via.str.size);\n }\n else if (strncmp(\"first\", tmp_p->key.via.str.ptr, tmp_p->key.via.str.size) == 0) {\n if (tmp_p->val.type != MSGPACK_OBJECT_BOOLEAN) {\n continue;\n }\n if (tmp_p->val.via.boolean) {\n *operation_first = FLB_TRUE;\n }\n }\n else if (strncmp(\"last\", tmp_p->key.via.str.ptr, tmp_p->key.via.str.size) == 0) {\n if (tmp_p->val.type != MSGPACK_OBJECT_BOOLEAN) {\n continue;\n }\n if (tmp_p->val.via.boolean) {\n *operation_last = FLB_TRUE;\n }\n }\n else {\n \/* extra sub-fields *\/ \n *extra_subfields += 1;\n }\n\n }\n }\n }\n }\n \n return op_status == OPERATION_EXISTED;\n}\n\nvoid pack_extra_operation_subfields(msgpack_packer *mp_pck, msgpack_object *operation, int extra_subfields) {\n msgpack_object_kv *p = operation->via.map.ptr;\n msgpack_object_kv *const pend = operation->via.map.ptr + operation->via.map.size;\n\n msgpack_pack_map(mp_pck, extra_subfields);\n\n for (; p < pend; ++p) {\n if (strncmp(\"id\", p->key.via.str.ptr, p->key.via.str.size) != 0 \n && strncmp(\"producer\", p->key.via.str.ptr, p->key.via.str.size) != 0\n && strncmp(\"first\", p->key.via.str.ptr, p->key.via.str.size) != 0\n && strncmp(\"last\", p->key.via.str.ptr, p->key.via.str.size) != 0) {\n msgpack_pack_object(mp_pck, p->key);\n msgpack_pack_object(mp_pck, p->val);\n }\n }\n\n}\n","lang_cluster":"C","diff_tag":0,"review_comment":"","id":4} {"diff_hunk":"@@ -23,15 +23,20 @@ import (\n \t\"github.com\/projectcalico\/libcalico-go\/lib\/net\"\n )\n \n-\/\/ DataplanePassthru simply passes through some datamodel updates to the dataplane layer.\n-\/\/ It maps OnUpdate() calls to dedicated method calls for consistency with the\n-\/\/ rest of the dataplane API.\n+\/\/ DataplanePassthru passes through some datamodel updates to the dataplane layer, removing some\n+\/\/ duplicates along the way. It maps OnUpdate() calls to dedicated method calls for consistency\n+\/\/ with the rest of the dataplane API.\n type DataplanePassthru struct {\n \tcallbacks passthruCallbacks\n+\n+\thostIPs map[string]*net.IP\n }\n \n func NewDataplanePassthru(callbacks passthruCallbacks) *DataplanePassthru {\n-\treturn &DataplanePassthru{callbacks: callbacks}\n+\treturn &DataplanePassthru{\n+\t\tcallbacks: callbacks,\n+\t\thostIPs: map[string]*net.IP{},\n+\t}\n }\n \n func (h *DataplanePassthru) RegisterWith(dispatcher *dispatcher.Dispatcher) {","source_code":"\/\/ Copyright (c) 2016-2017 Tigera, Inc. All rights reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage calc\n\nimport (\n\tlog \"github.com\/Sirupsen\/logrus\"\n\n\t\"github.com\/projectcalico\/felix\/dispatcher\"\n\t\"github.com\/projectcalico\/libcalico-go\/lib\/backend\/api\"\n\t\"github.com\/projectcalico\/libcalico-go\/lib\/backend\/model\"\n\t\"github.com\/projectcalico\/libcalico-go\/lib\/net\"\n)\n\n\/\/ DataplanePassthru simply passes through some datamodel updates to the dataplane layer.\n\/\/ It maps OnUpdate() calls to dedicated method calls for consistency with the\n\/\/ rest of the dataplane API.\ntype DataplanePassthru struct {\n\tcallbacks passthruCallbacks\n}\n\nfunc NewDataplanePassthru(callbacks passthruCallbacks) *DataplanePassthru {\n\treturn &DataplanePassthru{callbacks: callbacks}\n}\n\nfunc (h *DataplanePassthru) RegisterWith(dispatcher *dispatcher.Dispatcher) {\n\tdispatcher.Register(model.HostIPKey{}, h.OnUpdate)\n\tdispatcher.Register(model.IPPoolKey{}, h.OnUpdate)\n}\n\nfunc (h *DataplanePassthru) OnUpdate(update api.Update) (filterOut bool) {\n\tswitch key := update.Key.(type) {\n\tcase model.HostIPKey:\n\t\thostname := key.Hostname\n\t\tif update.Value == nil {\n\t\t\tlog.WithField(\"update\", update).Debug(\"Passing-through HostIP deletion\")\n\t\t\th.callbacks.OnHostIPRemove(hostname)\n\t\t} else {\n\t\t\tlog.WithField(\"update\", update).Debug(\"Passing-through HostIP update\")\n\t\t\tip := update.Value.(*net.IP)\n\t\t\th.callbacks.OnHostIPUpdate(hostname, ip)\n\t\t}\n\tcase model.IPPoolKey:\n\t\tif update.Value == nil {\n\t\t\tlog.WithField(\"update\", update).Debug(\"Passing-through IPPool deletion\")\n\t\t\th.callbacks.OnIPPoolRemove(key)\n\t\t} else {\n\t\t\tlog.WithField(\"update\", update).Debug(\"Passing-through IPPool update\")\n\t\t\tpool := update.Value.(*model.IPPool)\n\t\t\th.callbacks.OnIPPoolUpdate(key, pool)\n\t\t}\n\t}\n\n\treturn false\n}\n","lang_cluster":"C","diff_tag":0,"review_comment":"","id":5} {"diff_hunk":"@@ -19,7 +19,39 @@\n \n #include \"jpath.h\"\n \n-static int jpath_set_destructive (json_t *o, char *path, json_t *val)\n+static int update_object_recursive (json_t *orig, json_t *val)\n+{\n+ const char *key;\n+ json_t *value;\n+\n+ json_object_foreach (val, key, value) {\n+ json_t *orig_value = json_object_get (orig, key);\n+\n+ if (json_is_object (value)) {\n+ if (!json_is_object (orig_value)) {\n+ json_t *o = json_object ();\n+ if (!o || json_object_set_new (orig, key, o) < 0) {\n+ errno = ENOMEM;\n+ json_decref (o);\n+ return -1;\n+ }\n+ orig_value = o;\n+ }\n+ if (update_object_recursive (orig_value, value) < 0)\n+ return -1;\n+ }\n+ else if (json_object_set (orig, key, value) < 0) {\n+ errno = ENOMEM;\n+ return -1;\n+ }\n+ }\n+ return 0;\n+}\n+\n+static int jpath_set_destructive (json_t *o,\n+ int replace,\n+ char *path,\n+ json_t *val)\n {\n char *cp;\n json_t *dir;","source_code":"\/************************************************************\\\n * Copyright 2021 Lawrence Livermore National Security, LLC\n * (c.f. AUTHORS, NOTICE.LLNS, COPYING)\n *\n * This file is part of the Flux resource manager framework.\n * For details, see https:\/\/github.com\/flux-framework.\n *\n * SPDX-License-Identifier: LGPL-3.0\n\\************************************************************\/\n\n#if HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n#include \n#include \n#include \n\n#include \"src\/common\/libutil\/errno_safe.h\"\n\n#include \"jpath.h\"\n\nstatic int jpath_set_destructive (json_t *o, char *path, json_t *val)\n{\n char *cp;\n json_t *dir;\n\n if ((cp = strchr (path, '.'))) {\n *cp++ = '\\0';\n if (strlen (path) == 0) {\n errno = EINVAL;\n return -1;\n }\n if (!(dir = json_object_get (o, path))) {\n if (!(dir = json_object ()))\n goto nomem;\n if (json_object_set_new (o, path, dir) < 0) {\n json_decref (dir);\n goto nomem;\n }\n }\n return jpath_set_destructive (dir, cp, val);\n }\n\n if (strlen (path) == 0) {\n errno = EINVAL;\n return -1;\n }\n if (json_object_set (o, path, val) < 0)\n goto nomem;\n return 0;\nnomem:\n errno = ENOMEM;\n return -1;\n}\n\nstatic int jpath_del_destructive (json_t *o, char *path)\n{\n char *cp;\n json_t *dir;\n\n if ((cp = strchr (path, '.'))) {\n *cp++ = '\\0';\n if (strlen (path) == 0) {\n errno = EINVAL;\n return -1;\n }\n if (!(dir = json_object_get (o, path)))\n return 0;\n return jpath_del_destructive (dir, cp);\n }\n\n if (strlen (path) == 0) {\n errno = EINVAL;\n return -1;\n }\n (void)json_object_del (o, path);\n return 0;\n}\n\nstatic json_t *jpath_get_destructive (json_t *o, char *path)\n{\n char *cp;\n json_t *dir;\n json_t *val;\n\n if ((cp = strchr (path, '.'))) {\n *cp++ = '\\0';\n if (strlen (path) == 0) {\n errno = EINVAL;\n return NULL;\n }\n if (!(dir = json_object_get (o, path))) {\n errno = ENOENT;\n return NULL;\n }\n return jpath_get_destructive (dir, cp);\n }\n\n if (strlen (path) == 0) {\n errno = EINVAL;\n return NULL;\n }\n if (!(val = json_object_get (o, path))) {\n errno = ENOENT;\n return NULL;\n }\n return val;\n}\n\nint jpath_set (json_t *o, const char *path, json_t *val)\n{\n char *cpy;\n int rc;\n\n if (!o || !path || !val) {\n errno = EINVAL;\n return -1;\n }\n if (!(cpy = strdup (path)))\n return -1;\n rc = jpath_set_destructive (o, cpy, val);\n ERRNO_SAFE_WRAP (free, cpy);\n return rc;\n}\n\nint jpath_del (json_t *o, const char *path)\n{\n char *cpy;\n int rc;\n\n if (!o || !path) {\n errno = EINVAL;\n return -1;\n }\n if (!(cpy = strdup (path)))\n return -1;\n rc = jpath_del_destructive (o, cpy);\n ERRNO_SAFE_WRAP (free, cpy);\n return rc;\n}\n\njson_t *jpath_get (json_t *o, const char *path)\n{\n char *cpy;\n json_t *ret;\n\n if (!o || !path) {\n errno = EINVAL;\n return NULL;\n }\n if (!(cpy = strdup (path)))\n return NULL;\n ret = jpath_get_destructive (o, cpy);\n ERRNO_SAFE_WRAP (free, cpy);\n return ret;\n}\n\n\/\/ vi:ts=4 sw=4 expandtab\n","lang_cluster":"C","diff_tag":0,"review_comment":"","id":6} {"diff_hunk":"@@ -53,7 +53,13 @@ static void keyboard_binding_execute(struct roots_keyboard *keyboard,\n \t}\n }\n \n-static void keyboard_keysym_press(struct roots_keyboard *keyboard,\n+\/**\n+ * Process a keypress from the keyboard.\n+ *\n+ * Returns true if the keysym was handled by a binding and false if the event\n+ * should be propagated to clients.\n+ *\/\n+static bool keyboard_keysym_press(struct roots_keyboard *keyboard,\n \t\txkb_keysym_t keysym) {\n \tssize_t i = keyboard_pressed_keysym_index(keyboard, keysym);\n \tif (i < 0) {","source_code":"#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"rootston\/input.h\"\n\nstatic ssize_t keyboard_pressed_keysym_index(struct roots_keyboard *keyboard,\n\t\txkb_keysym_t keysym) {\n\tfor (size_t i = 0; i < ROOTS_KEYBOARD_PRESSED_KEYSYMS_CAP; i++) {\n\t\tif (keyboard->pressed_keysyms[i] == keysym) {\n\t\t\treturn i;\n\t\t}\n\t}\n\treturn -1;\n}\n\nstatic const char *exec_prefix = \"exec \";\n\nstatic void keyboard_binding_execute(struct roots_keyboard *keyboard,\n\t\tconst char *command) {\n\tstruct roots_server *server = keyboard->input->server;\n\tif (strcmp(command, \"exit\") == 0) {\n\t\twl_display_terminate(server->wl_display);\n\t} else if (strcmp(command, \"close\") == 0) {\n\t\tif (keyboard->input->last_active_view != NULL) {\n\t\t\tview_close(keyboard->input->last_active_view);\n\t\t}\n\t} else if (strcmp(command, \"next_window\") == 0) {\n\t\tif (server->desktop->views->length > 0) {\n\t\t\tstruct roots_view *view = server->desktop->views->items[0];\n\t\t\tset_view_focus(keyboard->input, server->desktop, view);\n\t\t\twlr_seat_keyboard_notify_enter(keyboard->input->wl_seat,\n\t\t\t\tview->wlr_surface);\n\t\t}\n\t} else if (strncmp(exec_prefix, command, strlen(exec_prefix)) == 0) {\n\t\tconst char *shell_cmd = command + strlen(exec_prefix);\n\t\tpid_t pid = fork();\n\t\tif (pid < 0) {\n\t\t\twlr_log(L_ERROR, \"cannot execute binding command: fork() failed\");\n\t\t\treturn;\n\t\t} else if (pid == 0) {\n\t\t\texecl(\"\/bin\/sh\", \"\/bin\/sh\", \"-c\", shell_cmd, (void *)NULL);\n\t\t}\n\t} else {\n\t\twlr_log(L_ERROR, \"unknown binding command: %s\", command);\n\t}\n}\n\nstatic void keyboard_keysym_press(struct roots_keyboard *keyboard,\n\t\txkb_keysym_t keysym) {\n\tssize_t i = keyboard_pressed_keysym_index(keyboard, keysym);\n\tif (i < 0) {\n\t\ti = keyboard_pressed_keysym_index(keyboard, XKB_KEY_NoSymbol);\n\t\tif (i >= 0) {\n\t\t\tkeyboard->pressed_keysyms[i] = keysym;\n\t\t}\n\t}\n\n\tif (keysym >= XKB_KEY_XF86Switch_VT_1 &&\n\t\t\tkeysym <= XKB_KEY_XF86Switch_VT_12) {\n\t\tstruct roots_server *server = keyboard->input->server;\n\t\tif (wlr_backend_is_multi(server->backend)) {\n\t\t\tstruct wlr_session *session =\n\t\t\t\twlr_multi_get_session(server->backend);\n\t\t\tif (session) {\n\t\t\t\tunsigned vt = keysym - XKB_KEY_XF86Switch_VT_1 + 1;\n\t\t\t\twlr_session_change_vt(session, vt);\n\t\t\t}\n\t\t}\n\t\treturn;\n\t}\n\n\tuint32_t modifiers = wlr_keyboard_get_modifiers(keyboard->device->keyboard);\n\tstruct wl_list *bindings = &keyboard->input->server->config->bindings;\n\tstruct binding_config *bc;\n\twl_list_for_each(bc, bindings, link) {\n\t\tif (modifiers ^ bc->modifiers) {\n\t\t\tcontinue;\n\t\t}\n\n\t\tbool ok = true;\n\t\tfor (size_t i = 0; i < bc->keysyms_len; i++) {\n\t\t\tssize_t j = keyboard_pressed_keysym_index(keyboard, bc->keysyms[i]);\n\t\t\tif (j < 0) {\n\t\t\t\tok = false;\n\t\t\t\tbreak;\n\t\t\t}\n\t\t}\n\n\t\tif (ok) {\n\t\t\tkeyboard_binding_execute(keyboard, bc->command);\n\t\t}\n\t}\n}\n\nstatic void keyboard_keysym_release(struct roots_keyboard *keyboard,\n\t\txkb_keysym_t keysym) {\n\tssize_t i = keyboard_pressed_keysym_index(keyboard, keysym);\n\tif (i >= 0) {\n\t\tkeyboard->pressed_keysyms[i] = XKB_KEY_NoSymbol;\n\t}\n}\n\nstatic void keyboard_key_notify(struct wl_listener *listener, void *data) {\n\tstruct wlr_event_keyboard_key *event = data;\n\tstruct roots_keyboard *keyboard = wl_container_of(listener, keyboard, key);\n\n\tuint32_t keycode = event->keycode + 8;\n\tconst xkb_keysym_t *syms;\n\tint syms_len = xkb_state_key_get_syms(keyboard->device->keyboard->xkb_state,\n\t\tkeycode, &syms);\n\tfor (int i = 0; i < syms_len; i++) {\n\t\tif (event->state == WLR_KEY_PRESSED) {\n\t\t\tkeyboard_keysym_press(keyboard, syms[i]);\n\t\t} else { \/\/ WLR_KEY_RELEASED\n\t\t\tkeyboard_keysym_release(keyboard, syms[i]);\n\t\t}\n\t}\n}\n\nvoid keyboard_add(struct wlr_input_device *device, struct roots_input *input) {\n\tstruct roots_keyboard *keyboard = calloc(sizeof(struct roots_keyboard), 1);\n\tif (keyboard == NULL) {\n\t\treturn;\n\t}\n\tdevice->data = keyboard;\n\tkeyboard->device = device;\n\tkeyboard->input = input;\n\tkeyboard->key.notify = keyboard_key_notify;\n\twl_signal_add(&device->keyboard->events.key, &keyboard->key);\n\twl_list_insert(&input->keyboards, &keyboard->link);\n\n\tstruct xkb_rule_names rules;\n\tmemset(&rules, 0, sizeof(rules));\n\trules.rules = getenv(\"XKB_DEFAULT_RULES\");\n\trules.model = getenv(\"XKB_DEFAULT_MODEL\");\n\trules.layout = getenv(\"XKB_DEFAULT_LAYOUT\");\n\trules.variant = getenv(\"XKB_DEFAULT_VARIANT\");\n\trules.options = getenv(\"XKB_DEFAULT_OPTIONS\");\n\tstruct xkb_context *context = xkb_context_new(XKB_CONTEXT_NO_FLAGS);\n\tif (context == NULL) {\n\t\twlr_log(L_ERROR, \"Cannot create XKB context\");\n\t\treturn;\n\t}\n\twlr_keyboard_set_keymap(device->keyboard, xkb_map_new_from_names(context,\n\t\t&rules, XKB_KEYMAP_COMPILE_NO_FLAGS));\n\txkb_context_unref(context);\n\n\twlr_seat_attach_keyboard(input->wl_seat, device);\n}\n\nvoid keyboard_remove(struct wlr_input_device *device, struct roots_input *input) {\n\tstruct roots_keyboard *keyboard = device->data;\n\twlr_seat_detach_keyboard(input->wl_seat, device->keyboard);\n\twl_list_remove(&keyboard->key.link);\n\twl_list_remove(&keyboard->link);\n\tfree(keyboard);\n}\n","lang_cluster":"C","diff_tag":0,"review_comment":"","id":7} {"diff_hunk":"@@ -111,5 +111,5 @@ def remove_from_suppress_file(supp_file, value, hash_type):\n \n except Exception as ex:\n LOG.error(str(ex))\n- LOG.error(\"Failed to write: %s\" % (supp_file))\n+ LOG.error(\"Failed to write: %s\" % (suppress_file))\n return False","source_code":"# -------------------------------------------------------------------------\n# The CodeChecker Infrastructure\n# This file is distributed under the University of Illinois Open Source\n# License. See LICENSE.TXT for details.\n# -------------------------------------------------------------------------\n''' suppress file format\n\n123324353456463442341242342343#1 || bug hash comment\n\/sdfsfs\/sdf\/ || some path to suppress\n\n'''\n\nimport os\nimport string\nimport codecs\nfrom codechecker_lib import logger\n\nLOG = logger.get_new_logger('SUPPRESS_FILE_HANDLER')\n\n\nCOMMENT_SEPARATOR = '||'\nHASH_TYPE_SEPARATOR = '#'\n\n\ndef get_hash_and_path(suppress_file):\n\n paths, hashes = {}, {}\n\n def is_bug_hash(line):\n valid_chars = string.hexdigits + HASH_TYPE_SEPARATOR\n return all(c in valid_chars for c in line) and len(line) == 34\n\n LOG.debug('Processing suppress file: '+suppress_file)\n\n if os.path.exists(suppress_file):\n with codecs.open(suppress_file, 'r', 'UTF-8') as s_file:\n for line in s_file:\n if line == '':\n # skip empty lines\n continue\n res = line.split(COMMENT_SEPARATOR)\n if len(res) == 2:\n # there is a comment\n\n data = res[0].strip()\n comment = res[1].strip()\n if is_bug_hash(data):\n hashes[data] = comment\n else:\n paths[data] = comment\n if len(res) == 1:\n data = res[0].strip()\n if is_bug_hash(data):\n hashes[data] = ''\n else:\n paths[data] = ''\n\n LOG.debug(hashes)\n LOG.debug(paths)\n\n return hashes, paths\n\n\n# ---------------------------------------------------------------------------\ndef write_to_suppress_file(supp_file, value, hash_type, comment=''):\n\n comment = comment.decode('UTF-8')\n\n hashes, paths = get_hash_and_path(supp_file)\n\n value = value+HASH_TYPE_SEPARATOR+str(hash_type)\n try:\n if not os.stat(supp_file)[6] == 0:\n if value in hashes or value in paths:\n LOG.debug(\"Already found in\\n %s\" % (supp_file))\n return True\n\n s_file = codecs.open(supp_file, 'a', 'UTF-8')\n\n s_file.write(value+COMMENT_SEPARATOR+comment+'\\n')\n s_file.close()\n\n return True\n\n except Exception as ex:\n LOG.error(str(ex))\n LOG.error(\"Failed to write: %s\" % (supp_file))\n return False\n\n\ndef remove_from_suppress_file(supp_file, value, hash_type):\n\n LOG.debug('Removing ' + value + ' from \\n' + supp_file)\n\n try:\n s_file = codecs.open(supp_file, 'r+', 'UTF-8')\n lines = s_file.readlines()\n\n lines = filter(lambda line: not line.startswith(value +\n HASH_TYPE_SEPARATOR +\n str(hash_type) +\n COMMENT_SEPARATOR),\n lines)\n\n s_file.seek(0)\n s_file.truncate()\n s_file.writelines(lines)\n s_file.close()\n\n return True\n\n except Exception as ex:\n LOG.error(str(ex))\n LOG.error(\"Failed to write: %s\" % (supp_file))\n return False\n","lang_cluster":"C","diff_tag":0,"review_comment":"","id":8} {"diff_hunk":"@@ -34,8 +34,9 @@ type policyRenderer interface {\n \tProfileToIptablesChains(profileID *proto.ProfileID, policy *proto.Profile, ipVersion uint8) []*iptables.Chain\n }\n \n-func newPolicyManager(filterTable iptablesTable, ruleRenderer policyRenderer, ipVersion uint8) *policyManager {\n+func newPolicyManager(rawTable, filterTable iptablesTable, ruleRenderer policyRenderer, ipVersion uint8) *policyManager {\n \treturn &policyManager{\n+\t\trawTable: rawTable,\n \t\tfilterTable: filterTable,\n \t\truleRenderer: ruleRenderer,\n \t\tipVersion: ipVersion,","source_code":"\/\/ Copyright (c) 2016-2017 Tigera, Inc. All rights reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage intdataplane\n\nimport (\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/projectcalico\/felix\/go\/felix\/iptables\"\n\t\"github.com\/projectcalico\/felix\/go\/felix\/proto\"\n\t\"github.com\/projectcalico\/felix\/go\/felix\/rules\"\n)\n\n\/\/ policyManager simply renders policy\/profile updates into iptables.Chain objects and sends\n\/\/ them to the dataplane layer.\ntype policyManager struct {\n\tfilterTable iptablesTable\n\truleRenderer policyRenderer\n\tipVersion uint8\n}\n\ntype policyRenderer interface {\n\tPolicyToIptablesChains(policyID *proto.PolicyID, policy *proto.Policy, ipVersion uint8) []*iptables.Chain\n\tProfileToIptablesChains(profileID *proto.ProfileID, policy *proto.Profile, ipVersion uint8) []*iptables.Chain\n}\n\nfunc newPolicyManager(filterTable iptablesTable, ruleRenderer policyRenderer, ipVersion uint8) *policyManager {\n\treturn &policyManager{\n\t\tfilterTable: filterTable,\n\t\truleRenderer: ruleRenderer,\n\t\tipVersion: ipVersion,\n\t}\n}\n\nfunc (m *policyManager) OnUpdate(msg interface{}) {\n\tswitch msg := msg.(type) {\n\tcase *proto.ActivePolicyUpdate:\n\t\tlog.WithField(\"id\", msg.Id).Debug(\"Updating policy chains\")\n\t\tchains := m.ruleRenderer.PolicyToIptablesChains(msg.Id, msg.Policy, m.ipVersion)\n\t\tm.filterTable.UpdateChains(chains)\n\tcase *proto.ActivePolicyRemove:\n\t\tlog.WithField(\"id\", msg.Id).Debug(\"Removing policy chains\")\n\t\tinName := rules.PolicyChainName(rules.PolicyInboundPfx, msg.Id)\n\t\toutName := rules.PolicyChainName(rules.PolicyOutboundPfx, msg.Id)\n\t\tm.filterTable.RemoveChainByName(inName)\n\t\tm.filterTable.RemoveChainByName(outName)\n\tcase *proto.ActiveProfileUpdate:\n\t\tlog.WithField(\"id\", msg.Id).Debug(\"Updating profile chains\")\n\t\tchains := m.ruleRenderer.ProfileToIptablesChains(msg.Id, msg.Profile, m.ipVersion)\n\t\tm.filterTable.UpdateChains(chains)\n\tcase *proto.ActiveProfileRemove:\n\t\tlog.WithField(\"id\", msg.Id).Debug(\"Removing profile chains\")\n\t\tinName := rules.ProfileChainName(rules.PolicyInboundPfx, msg.Id)\n\t\toutName := rules.ProfileChainName(rules.PolicyOutboundPfx, msg.Id)\n\t\tm.filterTable.RemoveChainByName(inName)\n\t\tm.filterTable.RemoveChainByName(outName)\n\t}\n}\n\nfunc (m *policyManager) CompleteDeferredWork() error {\n\t\/\/ Nothing to do, we don't defer any work.\n\treturn nil\n}\n","lang_cluster":"C","diff_tag":0,"review_comment":"","id":9} {"diff_hunk":"@@ -121,7 +121,7 @@ def check(check_data):\n rh.postprocess_result()\n rh.handle_results()\n else:\n- # analisys failed\n+ # Analyses failed.\n LOG.error('Analyzing ' + source_file_name + ' failed.')\n if rh.analyzer_stdout != '':\n LOG.error(rh.analyzer_stdout)","source_code":"# -------------------------------------------------------------------------\n# The CodeChecker Infrastructure\n# This file is distributed under the University of Illinois Open Source\n# License. See LICENSE.TXT for details.\n# -------------------------------------------------------------------------\n'''\n'''\n\nimport os\nimport re\nimport sys\nimport signal\nimport multiprocessing\nimport ntpath\nimport traceback\nimport shutil\nfrom collections import defaultdict\n\nfrom codechecker_lib import logger\nfrom codechecker_lib import analyzer_env\n\nfrom codechecker_lib.analyzers import analyzer_types\n\nLOG = logger.get_new_logger('ANALISYS MANAGER')\n\n\ndef worker_result_handler(results):\n \"\"\"\n print the analisys summary\n \"\"\"\n\n successful_analysis = defaultdict(int)\n failed_analisys = defaultdict(int)\n skipped_num = 0\n\n for res, skipped, analyzer_type in results:\n if skipped:\n skipped_num += 1\n else:\n if res == 0:\n successful_analysis[analyzer_type] += 1\n else:\n failed_analisys[analyzer_type] += 1\n\n LOG.info(\"----==== Summary ====----\")\n LOG.info('Total compilation commands: ' + str(len(results)))\n if successful_analysis:\n LOG.info('Successfully analyzed')\n for analyzer_type, res in successful_analysis.iteritems():\n LOG.info(' ' + analyzer_type + ': ' + str(res))\n\n if failed_analisys:\n LOG.info(\"Failed to analyze\")\n for analyzer_type, res in failed_analisys.iteritems():\n LOG.info(' ' + analyzer_type + ': ' + str(res))\n\n if skipped_num:\n LOG.info('Skipped compilation commands: ' + str(skipped_num))\n LOG.info(\"----=================----\")\n\ndef check(check_data):\n \"\"\"\n Invoke clang with an action which called by processes.\n Different analyzer object belongs to for each build action\n\n skiplist handler is None if no skip file was configured\n \"\"\"\n args, action, context, analyzer_config_map, skp_handler, \\\n report_output_dir, use_db = check_data\n\n skipped = False\n try:\n # if one analysis fails the check fails\n return_codes = 0\n skipped = False\n for source in action.sources:\n\n # if there is no skiplist handler there was no skip list file\n # in the command line\n # cpp file skipping is handled here\n _, source_file_name = ntpath.split(source)\n\n if skp_handler and skp_handler.should_skip(source):\n LOG.debug_analyzer(source_file_name + ' is skipped')\n skipped = True\n continue\n\n # construct analyzer env\n analyzer_environment = analyzer_env.get_check_env(context.path_env_extra,\n context.ld_lib_path_extra)\n run_id = context.run_id\n\n rh = analyzer_types.construct_result_handler(args,\n action,\n run_id,\n report_output_dir,\n context.severity_map,\n skp_handler,\n use_db)\n\n #LOG.info('Analysing ' + source_file_name)\n\n # create a source analyzer\n source_analyzer = analyzer_types.construct_analyzer(action,\n analyzer_config_map)\n\n # source is the currently analyzed source file\n # there can be more in one buildaction\n source_analyzer.source_file = source\n\n # fills up the result handler with the analyzer information\n source_analyzer.analyze(rh, analyzer_environment)\n\n if rh.analyzer_returncode == 0:\n # analysis was successful\n # processing results\n if rh.analyzer_stdout != '':\n LOG.debug_analyzer('\\n' + rh.analyzer_stdout)\n if rh.analyzer_stderr != '':\n LOG.debug_analyzer('\\n' + rh.analyzer_stderr)\n rh.postprocess_result()\n rh.handle_results()\n else:\n # analisys failed\n LOG.error('Analyzing ' + source_file_name + ' failed.')\n if rh.analyzer_stdout != '':\n LOG.error(rh.analyzer_stdout)\n if rh.analyzer_stderr != '':\n LOG.error(rh.analyzer_stderr)\n return_codes = rh.analyzer_returncode\n\n if not args.keep_tmp:\n rh.clean_results()\n\n return (return_codes, skipped, action.analyzer_type)\n\n except Exception as e:\n LOG.debug_analyzer(str(e))\n traceback.print_exc(file=sys.stdout)\n return (1, skipped, action.analyzer_type)\n\ndef start_workers(args, actions, context, analyzer_config_map, skp_handler):\n \"\"\"\n start the workers in the process pool\n for every buildaction there is worker which makes the analysis\n \"\"\"\n\n # Handle SIGINT to stop this script running\n def signal_handler(*arg, **kwarg):\n try:\n pool.terminate()\n finally:\n sys.exit(1)\n\n signal.signal(signal.SIGINT, signal_handler)\n\n # Remove characters which could cause directory creation problems.\n no_spec_char_name = re.sub(r'[^\\w\\-_\\. ]', '_', args.name)\n report_output = os.path.join(context.codechecker_workspace,\n no_spec_char_name + '_reports')\n\n # create report output dir this will be used by the result handlers for each\n # analyzer to store analyzer results or temporary files\n # each analyzer instance does its own cleanup\n if not os.path.exists(report_output):\n os.mkdir(report_output)\n\n # Start checking parallel\n pool = multiprocessing.Pool(args.jobs)\n # pool.map(check, actions, 1)\n\n try:\n # Workaround, equialent of map\n # The main script does not get signal\n # while map or map_async function is running\n # It is a python bug, this does not happen if a timeout is specified;\n # then receive the interrupt immediately\n\n analyzed_actions = [(args,\n build_action,\n context,\n analyzer_config_map,\n skp_handler,\n report_output,\n True ) for build_action in actions]\n\n pool.map_async(check,\n analyzed_actions,\n 1,\n callback=worker_result_handler).get(float('inf'))\n\n pool.close()\n except Exception:\n pool.terminate()\n raise\n finally:\n pool.join()\n if not args.keep_tmp:\n LOG.debug('Removing temporary directory: ' + report_output)\n shutil.rmtree(report_output)\n","lang_cluster":"C","diff_tag":0,"review_comment":"","id":10} {"diff_hunk":"@@ -147,6 +147,13 @@ void shell_svc_destroy (struct shell_svc *svc)\n {\n if (svc) {\n int saved_errno = errno;\n+ if (svc->registered) {\n+ flux_future_t *f = NULL;\n+ if (!(f = flux_service_unregister (svc->shell->h, svc->name))\n+ || (flux_future_get (f, NULL) < 0))\n+ fprintf (stderr, \"unregister %s\\n\", svc->name);\n+ flux_future_destroy (f);\n+ }\n free (svc->rank_table);\n free (svc);\n errno = saved_errno;","source_code":"\/************************************************************\\\n * Copyright 2019 Lawrence Livermore National Security, LLC\n * (c.f. AUTHORS, NOTICE.LLNS, COPYING)\n *\n * This file is part of the Flux resource manager framework.\n * For details, see https:\/\/github.com\/flux-framework.\n *\n * SPDX-License-Identifier: LGPL-3.0\n\\************************************************************\/\n\n\/* Register a service named \"shell-\" on each shell and provide\n * helpers for registering request handlers for different \"methods\".\n *\n * Notes:\n * - Message handlers are not exposed. They are automatically set up to\n * allow FLUX_ROLE_USER access, started, and tied to flux_t for destruction.\n *\n * - Since request handlers can receive messages from any user, handlers\n * should call shell_svc_allowed() to verify that sender is instance owner,\n * or the shell user (job owner).\n *\n * - shell_svc_create () makes a synchronous RPC to register the service with\n * the broker.\n *\n * - Services should not be used until after the shells exit the init barrier,\n * to ensure service registration has completed.\n *\/\n\n#if HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n#include \n#include \n#include \n\n#include \"internal.h\"\n#include \"task.h\"\n#include \"svc.h\"\n\n#define TOPIC_STRING_SIZE 128\n\nstruct shell_svc {\n flux_shell_t *shell;\n uid_t uid; \/\/ effective uid of shell\n int *rank_table;\/\/ map shell rank to broker rank\n};\n\nstatic int lookup_rank (struct shell_svc *svc, int shell_rank, int *rank)\n{\n if (shell_rank < 0 || shell_rank >= svc->shell->info->shell_size) {\n errno = EINVAL;\n return -1;\n }\n *rank = svc->rank_table[shell_rank];\n return 0;\n}\n\nstatic int build_topic (struct shell_svc *svc,\n const char *method,\n char *buf,\n int len)\n{\n if (snprintf (buf,\n len,\n \"shell-%ju%s%s\",\n (uintmax_t)svc->shell->info->jobid,\n method ? \".\" : \"\",\n method ? method : \"\") >= len) {\n errno = EINVAL;\n return -1;\n }\n return 0;\n}\n\nflux_future_t *shell_svc_vpack (struct shell_svc *svc,\n const char *method,\n int shell_rank,\n int flags,\n const char *fmt,\n va_list ap)\n{\n char topic[TOPIC_STRING_SIZE];\n int rank;\n\n if (lookup_rank (svc, shell_rank, &rank) < 0)\n return NULL;\n if (build_topic (svc, method, topic, sizeof (topic)) < 0)\n return NULL;\n\n return flux_rpc_vpack (svc->shell->h, topic, rank, flags, fmt, ap);\n}\n\nflux_future_t *shell_svc_pack (struct shell_svc *svc,\n const char *method,\n int shell_rank,\n int flags,\n const char *fmt, ...)\n{\n flux_future_t *f;\n va_list ap;\n va_start (ap, fmt);\n f = shell_svc_vpack (svc, method, shell_rank, flags, fmt, ap);\n va_end (ap);\n return f;\n}\n\nint shell_svc_allowed (struct shell_svc *svc, const flux_msg_t *msg)\n{\n uint32_t rolemask;\n uint32_t userid;\n\n if (flux_msg_get_rolemask (msg, &rolemask) < 0\n || flux_msg_get_userid (msg, &userid) < 0)\n return -1;\n if (!(rolemask & FLUX_ROLE_OWNER) && userid != svc->uid) {\n errno = EPERM;\n return -1;\n }\n return 0;\n}\n\nint shell_svc_register (struct shell_svc *svc,\n const char *method,\n flux_msg_handler_f cb,\n void *arg)\n{\n struct flux_match match = FLUX_MATCH_REQUEST;\n flux_msg_handler_t *mh;\n flux_t *h = svc->shell->h;\n char topic[TOPIC_STRING_SIZE];\n\n if (build_topic (svc, method, topic, sizeof (topic)) < 0)\n return -1;\n match.topic_glob = topic;\n if (!(mh = flux_msg_handler_create (h, match, cb, arg)))\n return -1;\n if (flux_aux_set (h, NULL, mh, (flux_free_f)flux_msg_handler_destroy) < 0) {\n flux_msg_handler_destroy (mh);\n return -1;\n }\n flux_msg_handler_allow_rolemask (mh, FLUX_ROLE_USER);\n flux_msg_handler_start (mh);\n return 0;\n}\n\nvoid shell_svc_destroy (struct shell_svc *svc)\n{\n if (svc) {\n int saved_errno = errno;\n free (svc->rank_table);\n free (svc);\n errno = saved_errno;\n }\n}\n\nstruct shell_svc *shell_svc_create (flux_shell_t *shell)\n{\n struct shell_svc *svc;\n struct rcalc_rankinfo ri;\n int shell_size = shell->info->shell_size;\n int i;\n\n if (!(svc = calloc (1, sizeof (*svc))))\n return NULL;\n svc->shell = shell;\n svc->uid = geteuid ();\n if (!(svc->rank_table = calloc (shell_size, sizeof (*svc->rank_table))))\n goto error;\n for (i = 0; i < shell_size; i++) {\n if (rcalc_get_nth (shell->info->rcalc, i, &ri) < 0)\n goto error;\n svc->rank_table[i] = ri.rank;\n }\n if (!shell->standalone) {\n flux_future_t *f;\n char name[TOPIC_STRING_SIZE];\n if (build_topic (svc, NULL, name, sizeof (name)) < 0)\n goto error;\n if (!(f = flux_service_register (shell->h, name)))\n goto error;\n if (flux_future_get (f, NULL) < 0) {\n flux_future_destroy (f);\n goto error;\n }\n flux_future_destroy (f);\n }\n return svc;\nerror:\n shell_svc_destroy (svc);\n return NULL;\n}\n\n\/*\n * vi:tabstop=4 shiftwidth=4 expandtab\n *\/\n","lang_cluster":"C","diff_tag":0,"review_comment":"","id":11} {"diff_hunk":"@@ -31,12 +31,11 @@\n #include \n #include \n #include \"common_int.h\"\n-#include \n+#include \"ase_common.h\"\n \n fpga_result __FPGA_API__ fpgaCreateEventHandle(fpga_event_handle *handle)\n {\n \tstruct _fpga_event_handle *_eh;\n-\tfpga_result result = FPGA_OK;\n \n \tif (!handle)\n \t\treturn FPGA_INVALID_PARAM;","source_code":"\/\/ Copyright(c) 2017, Intel Corporation\n\/\/\n\/\/ Redistribution and use in source and binary forms, with or without\n\/\/ modification, are permitted provided that the following conditions are met:\n\/\/\n\/\/ * Redistributions of source code must retain the above copyright notice,\n\/\/ this list of conditions and the following disclaimer.\n\/\/ * Redistributions in binary form must reproduce the above copyright notice,\n\/\/ this list of conditions and the following disclaimer in the documentation\n\/\/ and\/or other materials provided with the distribution.\n\/\/ * Neither the name of Intel Corporation nor the names of its contributors\n\/\/ may be used to endorse or promote products derived from this software\n\/\/ without specific prior written permission.\n\/\/\n\/\/ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n\/\/ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n\/\/ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n\/\/ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n\/\/ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n\/\/ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n\/\/ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n\/\/ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n\/\/ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n\/\/ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n\/\/ POSSIBILITY OF SUCH DAMAGE.\n\n#ifdef HAVE_CONFIG_H\n#include \n#endif \/\/ HAVE_CONFIG_H\n\n#include \n#include \n#include \"common_int.h\"\n#include \n\nfpga_result __FPGA_API__ fpgaCreateEventHandle(fpga_event_handle *handle)\n{\n\tstruct _fpga_event_handle *_eh;\n\tfpga_result result = FPGA_OK;\n\n\tif (!handle)\n\t\treturn FPGA_INVALID_PARAM;\n\n\t_eh = malloc(sizeof(struct _fpga_event_handle));\n\tif (NULL == _eh) {\n\t\tFPGA_ERR(\"Could not allocate memory for event handle\");\n\t\treturn FPGA_NO_MEMORY;\n\t}\n\n\t\/* create eventfd *\/\n\t_eh->fd = eventfd(0, 0);\n\tif (_eh->fd < 0) {\n\t\tFPGA_ERR(\"eventfd : %s\", strerror(errno));\n\t\tresult = FPGA_NOT_FOUND;\n\t\tgoto out_free;\n\t}\n\n\t*handle = (fpga_event_handle)_eh;\n\treturn FPGA_OK;\n\nout_free:\n\tfree(_eh);\n\treturn result;\n}\n\nfpga_result __FPGA_API__ fpgaDestroyEventHandle(fpga_event_handle *handle)\n{\n\tstruct _fpga_event_handle *_eh;\n\tif (!handle)\n\t\treturn FPGA_INVALID_PARAM;\n\n\t_eh = (struct _fpga_event_handle *) *handle;\n\n\tif (NULL == _eh) {\n\t\tFPGA_ERR(\"Received NULL event handle\");\n\t\treturn FPGA_INVALID_PARAM;\n\t}\n\n\tif (close(_eh->fd) < 0) {\n\t\tFPGA_ERR(\"eventfd : %s\", strerror(errno));\n\t\tif (errno == EBADF)\n\t\t\treturn FPGA_INVALID_PARAM;\n\t\telse\n\t\t\treturn FPGA_EXCEPTION;\n\t}\n\n\tfree(*handle);\n\t*handle = NULL;\n\treturn FPGA_OK;\n}\n\nfpga_result __FPGA_API__ fpgaGetOSObjectFromEventHandle(const fpga_event_handle eh,\n\t\t\t\t\t\tint *fd)\n{\n\tstruct _fpga_event_handle *_eh = (struct _fpga_event_handle *) eh;\n\tif (NULL == _eh) {\n\t\tFPGA_ERR(\"Event handle is null\");\n\t\treturn FPGA_INVALID_PARAM;\n\t}\n\n\t*fd = _eh->fd;\n\n\treturn FPGA_OK;\n}\n\nfpga_result __FPGA_API__ fpgaRegisterEvent(fpga_handle handle,\n\t\t\t\t\t fpga_event_type type,\n\t\t\t\t\t fpga_event_handle event_handle,\n\t\t\t\t\t uint32_t flags)\n{\n\tUNUSED_PARAM(handle);\n\tif (type != FPGA_EVENT_INTERRUPT)\n\t\treturn FPGA_NOT_SUPPORTED;\n\n\tif (flags >= MAX_USR_INTRS)\n\t\treturn FPGA_INVALID_PARAM;\n\n\tif (register_event(FILE_DESCRIPTOR(event_handle), flags) == 0)\n\t\treturn FPGA_OK;\n\telse\n\t\treturn FPGA_EXCEPTION;\n}\n\nfpga_result __FPGA_API__ fpgaUnregisterEvent(fpga_handle handle,\n\t\t\t\t\t fpga_event_type event_type,\n\t\t\t\t\t fpga_event_handle event_handle)\n{\n\tUNUSED_PARAM(handle);\n\tif (event_type != FPGA_EVENT_INTERRUPT)\n\t\treturn FPGA_NOT_SUPPORTED;\n\n\tif (unregister_event(FILE_DESCRIPTOR(event_handle)) == 0)\n\t\treturn FPGA_OK;\n\telse\n\t\treturn FPGA_EXCEPTION;\n}\n","lang_cluster":"C","diff_tag":0,"review_comment":"","id":12} {"diff_hunk":"@@ -40,16 +40,17 @@ def get_log_env(logfile, context, original_env):\n \n # -----------------------------------------------------------------------------\n def get_check_env(path_env_extra, ld_lib_path_extra):\n- '''\n+ \"\"\"\n Extending the checker environment.\n Check environment is extended to find tools if they ar not on\n- the default places\n- '''\n+ the default places.\n+ \"\"\"\n new_env = os.environ.copy()\n \n if len(path_env_extra) > 0:\n extra_path = ':'.join(path_env_extra)\n- LOG.debug_analyzer('Extending PATH environment variable with: ' + extra_path)\n+ LOG.debug_analyzer(\n+ 'Extending PATH environment variable with: ' + extra_path)\n \n try:\n new_env['PATH'] = extra_path + ':' + new_env['PATH']","source_code":"# -------------------------------------------------------------------------\n# The CodeChecker Infrastructure\n# This file is distributed under the University of Illinois Open Source\n# License. See LICENSE.TXT for details.\n# -------------------------------------------------------------------------\n''''''\n\nimport os\n\nfrom codechecker_lib import logger\n\nLOG = logger.get_new_logger('ENV')\n\n\n# ------------------------------------------------------------------------------\ndef get_log_env(logfile, context, original_env):\n '''\n Environment for logging. With the ld logger.\n Keep the original environment unmodified as possible\n Only environment variables required for logging are changed\n '''\n new_env = original_env\n\n new_env[context.env_var_cc_logger_bin] = context.path_logger_bin\n\n new_env['LD_PRELOAD'] = context.logger_lib_name\n\n try:\n original_ld_library_path = new_env['LD_LIBRARY_PATH']\n new_env['LD_LIBRARY_PATH'] = context.path_logger_lib + \\\n ':' + original_ld_library_path\n except:\n new_env['LD_LIBRARY_PATH'] = context.path_logger_lib\n\n # set ld logger logfile\n new_env[context.env_var_cc_logger_file] = logfile\n\n return new_env\n\n\n# -----------------------------------------------------------------------------\ndef get_check_env(path_env_extra, ld_lib_path_extra):\n '''\n Extending the checker environment.\n Check environment is extended to find tools if they ar not on\n the default places\n '''\n new_env = os.environ.copy()\n\n if len(path_env_extra) > 0:\n extra_path = ':'.join(path_env_extra)\n LOG.debug_analyzer('Extending PATH environment variable with: ' + extra_path)\n\n try:\n new_env['PATH'] = extra_path + ':' + new_env['PATH']\n except:\n new_env['PATH'] = extra_path\n\n if len(ld_lib_path_extra) > 0:\n extra_lib = ':'.join(ld_lib_path_extra)\n LOG.debug_analyzer('Extending LD_LIBRARY_PATH environment variable with: ' + extra_lib)\n try:\n original_ld_library_path = new_env['LD_LIBRARY_PATH']\n new_env['LD_LIBRARY_PATH'] = extra_lib + ':' + original_ld_library_path\n except:\n new_env['LD_LIBRARY_PATH'] = extra_lib\n\n return new_env\n","lang_cluster":"C","diff_tag":0,"review_comment":"","id":13} {"diff_hunk":"@@ -19,6 +19,8 @@ package fv_test\n import (\n \t. \"github.com\/onsi\/ginkgo\"\n \t. \"github.com\/onsi\/gomega\"\n+\tlog \"github.com\/sirupsen\/logrus\"\n+\n \t\"github.com\/projectcalico\/felix\/fv\/containers\"\n \t\"github.com\/projectcalico\/felix\/fv\/metrics\"\n \t\"github.com\/projectcalico\/felix\/fv\/utils\"","source_code":"\/\/ +build fvtests\n\n\/\/ Copyright (c) 2017 Tigera, Inc. All rights reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage fv_test\n\nimport (\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/projectcalico\/felix\/fv\/containers\"\n\t\"github.com\/projectcalico\/felix\/fv\/metrics\"\n\t\"github.com\/projectcalico\/felix\/fv\/utils\"\n\t\"github.com\/projectcalico\/felix\/fv\/workload\"\n\t\"github.com\/projectcalico\/libcalico-go\/lib\/api\"\n\t\"github.com\/projectcalico\/libcalico-go\/lib\/client\"\n\t\"github.com\/projectcalico\/libcalico-go\/lib\/numorstring\"\n\tlog \"github.com\/sirupsen\/logrus\"\n)\n\nfunc RunEtcd() *containers.Container {\n\treturn containers.Run(\"etcd-fv\",\n\t\t\"quay.io\/coreos\/etcd\",\n\t\t\"etcd\",\n\t\t\"--advertise-client-urls\", \"http:\/\/127.0.0.1:2379\",\n\t\t\"--listen-client-urls\", \"http:\/\/0.0.0.0:2379\")\n}\n\nfunc RunFelix(etcdIP string) *containers.Container {\n\treturn containers.Run(\"felix-fv\",\n\t\t\"--privileged\",\n\t\t\"-e\", \"CALICO_DATASTORE_TYPE=etcdv2\",\n\t\t\"-e\", \"FELIX_DATASTORETYPE=etcdv2\",\n\t\t\"-e\", \"FELIX_ETCDENDPOINTS=http:\/\/\"+etcdIP+\":2379\",\n\t\t\"-e\", \"FELIX_PROMETHEUSMETRICSENABLED=true\",\n\t\t\"-e\", \"FELIX_USAGEREPORTINGENABLED=false\",\n\t\t\"-e\", \"FELIX_IPV6SUPPORT=false\",\n\t\t\"calico\/felix:latest\")\n}\n\nfunc GetEtcdClient(etcdIP string) *client.Client {\n\tclient, err := client.New(api.CalicoAPIConfig{\n\t\tSpec: api.CalicoAPIConfigSpec{\n\t\t\tDatastoreType: api.EtcdV2,\n\t\t\tEtcdConfig: api.EtcdConfig{\n\t\t\t\tEtcdEndpoints: \"http:\/\/\" + etcdIP + \":2379\",\n\t\t\t},\n\t\t},\n\t})\n\tExpect(err).NotTo(HaveOccurred())\n\treturn client\n}\n\nfunc MetricsPortReachable(felixName, felixIP string) bool {\n\t\/\/ Delete existing conntrack state for the metrics port.\n\tutils.Run(\"docker\", \"exec\", felixName,\n\t\t\"conntrack\", \"-L\")\n\tutils.Run(\"docker\", \"exec\", felixName,\n\t\t\"conntrack\", \"-L\", \"-p\", \"tcp\", \"--dport\", metrics.PortString())\n\tutils.RunMayFail(\"docker\", \"exec\", felixName,\n\t\t\"conntrack\", \"-D\", \"-p\", \"tcp\", \"--orig-port-dst\", metrics.PortString())\n\n\t\/\/ Now try to get a metric.\n\tm, err := metrics.GetFelixMetric(felixIP, \"felix_active_local_endpoints\")\n\tif err != nil {\n\t\tlog.WithError(err).Info(\"Metrics port not reachable\")\n\t\treturn false\n\t}\n\tlog.WithField(\"felix_active_local_endpoints\", m).Info(\"Metrics port reachable\")\n\treturn true\n}\n\n\/\/ Here we test reachability to a port number running on a Calico host itself, specifically Felix's\n\/\/ metrics port 9091, and how that is affected by policy, host endpoint and workload endpoint\n\/\/ configuration.\n\/\/\n\/\/ - When there is no policy or endpoint configuration, the port should be reachable.\n\/\/\n\/\/ - When there is a local workload endpoint, the port should be reachable. (Existence of workload\n\/\/ endpoints should make no difference to reachability to ports on the host itself.)\n\/\/\n\/\/ - When a host endpoint is configured for the host's interface (eth0), but not yet any policy, the\n\/\/ port should be unreachable.\n\/\/\n\/\/ - When pre-DNAT policy is then configured, to allow ingress to that port, it should be\n\/\/ reachable again.\n\nvar _ = Context(\"with initialized Felix and etcd datastore\", func() {\n\n\tvar (\n\t\tetcd *containers.Container\n\t\tfelix *containers.Container\n\t\tclient *client.Client\n\t\tmetricsPortReachable func() bool\n\t)\n\n\tBeforeEach(func() {\n\n\t\tetcd = RunEtcd()\n\n\t\tclient = GetEtcdClient(etcd.IP)\n\t\tEventually(client.EnsureInitialized, \"10s\", \"1s\").ShouldNot(HaveOccurred())\n\n\t\tfelix = RunFelix(etcd.IP)\n\n\t\tfelixNode := api.NewNode()\n\t\tfelixNode.Metadata.Name = felix.Hostname\n\t\t_, err := client.Nodes().Create(felixNode)\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tmetricsPortReachable = func() bool {\n\t\t\treturn MetricsPortReachable(felix.Name, felix.IP)\n\t\t}\n\t})\n\n\tAfterEach(func() {\n\n\t\tif CurrentGinkgoTestDescription().Failed {\n\t\t\tutils.Run(\"docker\", \"logs\", felix.Name)\n\t\t\tutils.Run(\"docker\", \"exec\", felix.Name, \"iptables-save\", \"-c\")\n\t\t}\n\t\tfelix.Stop()\n\n\t\tif CurrentGinkgoTestDescription().Failed {\n\t\t\tutils.Run(\"docker\", \"exec\", etcd.Name, \"etcdctl\", \"ls\", \"--recursive\", \"\/\")\n\t\t}\n\t\tetcd.Stop()\n\t})\n\n\tIt(\"with no endpoints or policy, port should be reachable\", func() {\n\t\tEventually(metricsPortReachable, \"10s\", \"1s\").Should(BeTrue())\n\t})\n\n\tIt(\"with a local workload, port should be reachable\", func() {\n\t\tw := workload.Run(felix, \"cali12345\", \"10.65.0.2\", \"8055\")\n\t\tw.Configure(client)\n\t\tEventually(metricsPortReachable, \"10s\", \"1s\").Should(BeTrue())\n\t\tw.Stop()\n\t\tEventually(metricsPortReachable, \"10s\", \"1s\").Should(BeTrue())\n\t})\n\n\tContext(\"with host endpoint defined\", func() {\n\n\t\tBeforeEach(func() {\n\t\t\thostEp := api.NewHostEndpoint()\n\t\t\thostEp.Metadata.Name = \"host-endpoint-1\"\n\t\t\thostEp.Metadata.Node = felix.Hostname\n\t\t\thostEp.Metadata.Labels = map[string]string{\"host-endpoint\": \"true\"}\n\t\t\thostEp.Spec.InterfaceName = \"eth0\"\n\t\t\t_, err := client.HostEndpoints().Create(hostEp)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t})\n\n\t\tIt(\"port should not be reachable\", func() {\n\t\t\tEventually(metricsPortReachable, \"10s\", \"1s\").Should(BeFalse())\n\t\t})\n\n\t\tContext(\"with pre-DNAT policy defined\", func() {\n\n\t\t\tBeforeEach(func() {\n\t\t\t\tpolicy := api.NewPolicy()\n\t\t\t\tpolicy.Metadata.Name = \"pre-dnat-policy-1\"\n\t\t\t\tpolicy.Spec.PreDNAT = true\n\t\t\t\tprotocol := numorstring.ProtocolFromString(\"tcp\")\n\t\t\t\tallowMetricsPortRule := api.Rule{\n\t\t\t\t\tAction: \"allow\",\n\t\t\t\t\tProtocol: &protocol,\n\t\t\t\t\tDestination: api.EntityRule{\n\t\t\t\t\t\tPorts: []numorstring.Port{numorstring.SinglePort(uint16(metrics.Port))},\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t\tpolicy.Spec.IngressRules = []api.Rule{allowMetricsPortRule}\n\t\t\t\tpolicy.Spec.Selector = \"host-endpoint=='true'\"\n\t\t\t\t_, err := client.Policies().Create(policy)\n\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t})\n\n\t\t\tIt(\"port should be reachable\", func() {\n\t\t\t\tEventually(metricsPortReachable, \"10s\", \"1s\").Should(BeTrue())\n\t\t\t})\n\t\t})\n\t})\n})\n","lang_cluster":"C","diff_tag":0,"review_comment":"","id":14} {"diff_hunk":"@@ -18,6 +18,9 @@ import portalocker\n import psutil\n import socket\n import stat\n+from libcodechecker.logger import get_logger\n+\n+LOG = get_logger('server')\n \n \n def __getInstanceDescriptorPath(folder=None):","source_code":"# -------------------------------------------------------------------------\n# The CodeChecker Infrastructure\n# This file is distributed under the University of Illinois Open Source\n# License. See LICENSE.TXT for details.\n# -------------------------------------------------------------------------\n\"\"\"\nInstance manager handles the state keeping of running CodeChecker instances\nfor a particular user on the local machine.\n\"\"\"\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\n\nimport getpass\nimport json\nimport os\nimport portalocker\nimport psutil\nimport socket\nimport stat\n\n\ndef __getInstanceDescriptorPath(folder=None):\n if not folder:\n folder = os.path.expanduser(\"~\")\n\n return os.path.join(folder, \".codechecker.instances.json\")\n\n\ndef __makeInstanceDescriptorFile(folder=None):\n descriptor = __getInstanceDescriptorPath(folder)\n if not os.path.exists(descriptor):\n with open(descriptor, 'w') as f:\n json.dump([], f)\n os.chmod(descriptor, stat.S_IRUSR | stat.S_IWUSR)\n\n\ndef __checkInstance(hostname, pid):\n \"\"\"Check if the given process on the system is a valid, running CodeChecker\n for the current user.\"\"\"\n\n # Instances running on a remote host with a filesystem shared with us can\n # not usually be checked (\/proc is rarely shared across computers...),\n # so we consider them \"alive\" servers.\n if hostname != socket.gethostname():\n return True\n\n try:\n proc = psutil.Process(pid)\n\n return \"CodeChecker.py\" in proc.cmdline()[1] and \\\n proc.username() == getpass.getuser()\n except psutil.NoSuchProcess:\n # If the process does not exist, it cannot be valid.\n return False\n\n\ndef __rewriteInstanceFile(append, remove, folder=None):\n \"\"\"This helper method reads the user's instance descriptor and manages it\n eliminating dead records, appending new ones and reserialising the file.\"\"\"\n\n __makeInstanceDescriptorFile(folder)\n with open(__getInstanceDescriptorPath(folder), 'r+') as f:\n portalocker.lock(f, portalocker.LOCK_EX)\n\n # After reading, check every instance if they are still valid and\n # make sure PID does not collide accidentally with the\n # to-be-registered instances, if any exists in the append list as it\n # would cause duplication.\n #\n # Also, we remove the records to the given PIDs, if any exists.\n append_pids = [i['pid'] for i in append]\n instances = [i for i in json.load(f)\n if i['pid'] not in append_pids and\n (i['hostname'] + \":\" + str(i['pid'])) not in remove and\n __checkInstance(i['hostname'], i['pid'])]\n\n instances = instances + append\n\n f.seek(0)\n f.truncate()\n json.dump(instances, f, indent=2)\n portalocker.unlock(f)\n\n\ndef register(pid, workspace, port, folder=None):\n \"\"\"\n Adds the specified CodeChecker server instance to the user's instance\n descriptor.\n \"\"\"\n\n __rewriteInstanceFile([{\"pid\": pid,\n \"hostname\": socket.gethostname(),\n \"workspace\": workspace,\n \"port\": port}],\n [],\n folder)\n\n\ndef unregister(pid, folder=None):\n \"\"\"\n Removes the specified CodeChecker server instance from the user's instance\n descriptor.\n \"\"\"\n\n __rewriteInstanceFile([], [socket.gethostname() + \":\" + str(pid)], folder)\n\n\ndef get_instances(folder=None):\n \"\"\"Returns the list of running servers for the current user.\"\"\"\n\n # This method does NOT write the descriptor file.\n\n descriptor = __getInstanceDescriptorPath(folder)\n instances = []\n if os.path.exists(descriptor):\n with open(descriptor, 'r') as f:\n portalocker.lock(f, portalocker.LOCK_SH)\n instances = [i for i in json.load(f) if __checkInstance(\n i['hostname'],\n i['pid'])]\n portalocker.unlock(f)\n\n return instances\n","lang_cluster":"C","diff_tag":0,"review_comment":"","id":15} {"diff_hunk":"@@ -86,7 +86,7 @@ func TestNATNoBackendFromHEP(t *testing.T) {\n \t)\n \tExpect(err).NotTo(HaveOccurred())\n \n-\trunBpfTest(t, \"calico_from_host_ep\", nil, func(bpfrun bpfProgRunFn) {\n+\trunBpfTest(t, \"calico_from_host_ep\", false, nil, func(bpfrun bpfProgRunFn) {\n \t\tres, err := bpfrun(pktBytes)\n \t\tExpect(err).NotTo(HaveOccurred())\n \t\tExpect(res.RetvalStr()).To(Equal(\"TC_ACT_UNSPEC\"), \"expected program to return TC_ACT_UNSPEC\")","source_code":"\/\/ Copyright (c) 2019-2021 Tigera, Inc. All rights reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage ut_test\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com\/google\/gopacket\"\n\t\"github.com\/google\/gopacket\/layers\"\n\t. \"github.com\/onsi\/gomega\"\n\n\t\"github.com\/projectcalico\/felix\/bpf\/nat\"\n)\n\nfunc TestICMPPortUnreachable(t *testing.T) {\n\tRegisterTestingT(t)\n\n\t_, ipv4, _, _, pktBytes, err := testPacketUDPDefault()\n\tExpect(err).NotTo(HaveOccurred())\n\n\trunBpfUnitTest(t, \"icmp_port_unreachable.c\", func(bpfrun bpfProgRunFn) {\n\t\tres, err := bpfrun(pktBytes)\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tExpect(res.Retval).To(Equal(0))\n\n\t\tExpect(res.dataOut).To(HaveLen(134)) \/\/ eth + ip + 64 + udp + ip + icmp\n\n\t\tpktR := gopacket.NewPacket(res.dataOut, layers.LayerTypeEthernet, gopacket.Default)\n\t\tfmt.Printf(\"pktR = %+v\\n\", pktR)\n\n\t\tcheckICMPPortUnreachable(pktR, ipv4)\n\t})\n\n}\n\nfunc TestNATNoBackendFromHEP(t *testing.T) {\n\tRegisterTestingT(t)\n\n\tiphdr := *ipv4Default\n\n\t_, ipv4, l4, _, pktBytes, err := testPacket(nil, &iphdr, nil, nil)\n\tExpect(err).NotTo(HaveOccurred())\n\n\tudp := l4.(*layers.UDP)\n\n\t\/\/ Test with count as 1 but no backend. This results in a NAT backend lookup failure\n\tnatkey := nat.NewNATKey(ipv4.DstIP, uint16(udp.DstPort), uint8(ipv4.Protocol)).AsBytes()\n\terr = natMap.Update(\n\t\tnatkey,\n\t\tnat.NewNATValue(0, 1, 0, 0).AsBytes(),\n\t)\n\tExpect(err).NotTo(HaveOccurred())\n\tdefer func() {\n\t\terr := natMap.Delete(natkey)\n\t\tExpect(err).NotTo(HaveOccurred())\n\t}()\n\n\trunBpfTest(t, \"calico_from_host_ep\", nil, func(bpfrun bpfProgRunFn) {\n\t\tres, err := bpfrun(pktBytes)\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tExpect(res.RetvalStr()).To(Equal(\"TC_ACT_UNSPEC\"), \"expected program to return TC_ACT_UNSPEC\")\n\n\t\tpktR := gopacket.NewPacket(res.dataOut, layers.LayerTypeEthernet, gopacket.Default)\n\t\tfmt.Printf(\"pktR = %+v\\n\", pktR)\n\n\t\tcheckICMPPortUnreachable(pktR, ipv4)\n\t})\n\n\t\/\/ Test with count as 0. This results in a no backend after frontend lookup as count is 0.\n\terr = natMap.Update(\n\t\tnatkey,\n\t\tnat.NewNATValue(0, 0, 0, 0).AsBytes(),\n\t)\n\tExpect(err).NotTo(HaveOccurred())\n\n\trunBpfTest(t, \"calico_from_host_ep\", nil, func(bpfrun bpfProgRunFn) {\n\t\tres, err := bpfrun(pktBytes)\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tExpect(res.RetvalStr()).To(Equal(\"TC_ACT_UNSPEC\"), \"expected program to return TC_ACT_UNSPEC\")\n\n\t\tpktR := gopacket.NewPacket(res.dataOut, layers.LayerTypeEthernet, gopacket.Default)\n\t\tfmt.Printf(\"pktR = %+v\\n\", pktR)\n\n\t\tcheckICMPPortUnreachable(pktR, ipv4)\n\t})\n}\n\nfunc checkICMPPortUnreachable(pktR gopacket.Packet, ipv4 *layers.IPv4) {\n\tipv4L := pktR.Layer(layers.LayerTypeIPv4)\n\tExpect(ipv4L).NotTo(BeNil())\n\tipv4R := ipv4L.(*layers.IPv4)\n\n\tExpect(ipv4R.Protocol).To(Equal(layers.IPProtocolICMPv4))\n\tExpect(ipv4R.SrcIP.String()).To(Equal(intfIP.String()))\n\tExpect(ipv4R.DstIP).To(Equal(ipv4.SrcIP))\n\n\ticmpL := pktR.Layer(layers.LayerTypeICMPv4)\n\tExpect(ipv4L).NotTo(BeNil())\n\ticmpR := icmpL.(*layers.ICMPv4)\n\n\tExpect(icmpR.TypeCode).To(Equal(\n\t\tlayers.CreateICMPv4TypeCode(\n\t\t\tlayers.ICMPv4TypeDestinationUnreachable,\n\t\t\tlayers.ICMPv4CodePort,\n\t\t)))\n}\n","lang_cluster":"C","diff_tag":0,"review_comment":"","id":16} {"diff_hunk":"@@ -60,7 +60,8 @@ class GenericSuppressHandler(suppress_handler.SuppressHandler):\n ret = suppress_file_handler.write_to_suppress_file(self.suppress_file,\n bug_id,\n file_name,\n- comment)\n+ comment,\n+ status)\n self.__revalidate_suppress_data()\n return ret\n ","source_code":"# -------------------------------------------------------------------------\n# The CodeChecker Infrastructure\n# This file is distributed under the University of Illinois Open Source\n# License. See LICENSE.TXT for details.\n# -------------------------------------------------------------------------\n\"\"\"\nHandler for suppressing a bug.\n\"\"\"\n\nimport os\n\nfrom libcodechecker import suppress_file_handler\nfrom libcodechecker import suppress_handler\nfrom libcodechecker.logger import get_logger\n\n# Warning! this logger should only be used in this module.\nLOG = get_logger('system')\n\n\nclass GenericSuppressHandler(suppress_handler.SuppressHandler):\n\n def __init__(self, suppress_file, allow_write):\n \"\"\"\n Create a new suppress handler with a suppress_file as backend.\n \"\"\"\n super(GenericSuppressHandler, self).__init__()\n\n self.__suppress_info = []\n self.__allow_write = allow_write\n\n if suppress_file:\n self.suppress_file = suppress_file\n self.__have_memory_backend = True\n self.__revalidate_suppress_data()\n else:\n self.__have_memory_backend = False\n self.__arrow_write = False\n\n if allow_write:\n raise ValueError(\"Can't create allow_write=True suppress \"\n \"handler without a backend file.\")\n\n def __revalidate_suppress_data(self):\n \"\"\"Reload the information in the suppress file to the memory.\"\"\"\n\n if not self.__have_memory_backend:\n # Do not load and have suppress data stored in memory if not\n # needed.\n return\n\n with open(self.suppress_file, 'r') as file_handle:\n self.__suppress_info = suppress_file_handler.\\\n get_suppress_data(file_handle)\n\n def store_suppress_bug_id(self, bug_id, file_name, comment):\n\n if not self.__allow_write:\n return True\n\n ret = suppress_file_handler.write_to_suppress_file(self.suppress_file,\n bug_id,\n file_name,\n comment)\n self.__revalidate_suppress_data()\n return ret\n\n def remove_suppress_bug_id(self, bug_id, file_name):\n\n if not self.__allow_write:\n return True\n\n ret = suppress_file_handler.remove_from_suppress_file(\n self.suppress_file,\n bug_id,\n file_name)\n self.__revalidate_suppress_data()\n return ret\n\n def get_suppressed(self, bug):\n\n return any([suppress for suppress in self.__suppress_info\n if suppress[0] == bug['hash_value'] and\n suppress[1] == os.path.basename(bug['file_path'])])\n","lang_cluster":"C","diff_tag":0,"review_comment":"","id":17} {"diff_hunk":"@@ -96,8 +96,8 @@ static void subcompositor_get_subsurface(struct wl_client *client,\n \t\tstruct wl_resource *resource, uint32_t id,\n \t\tstruct wl_resource *surface_resource,\n \t\tstruct wl_resource *parent_resource) {\n-\tstruct wlr_surface *surface = wl_resource_get_user_data(surface_resource);\n-\tstruct wlr_surface *parent = wl_resource_get_user_data(parent_resource);\n+\tstruct wlr_surface *surface = wlr_surface_from_resource(surface_resource);\n+\tstruct wlr_surface *parent = wlr_surface_from_resource(parent_resource);\n \n \tstatic const char msg[] = \"get_subsurface: wl_subsurface@\";\n ","source_code":"#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"util\/signal.h\"\n\nstatic void destroy_surface_listener(struct wl_listener *listener, void *data) {\n\twl_list_remove(wl_resource_get_link(data));\n}\n\nstatic void wl_compositor_create_surface(struct wl_client *client,\n\t\tstruct wl_resource *resource, uint32_t id) {\n\tstruct wlr_compositor *compositor = wl_resource_get_user_data(resource);\n\n\tstruct wl_resource *surface_resource = wl_resource_create(client,\n\t\t&wl_surface_interface, wl_resource_get_version(resource), id);\n\tif (surface_resource == NULL) {\n\t\twl_resource_post_no_memory(resource);\n\t\treturn;\n\t}\n\n\tstruct wlr_surface *surface = wlr_surface_create(surface_resource,\n\t\tcompositor->renderer);\n\tif (surface == NULL) {\n\t\twl_resource_destroy(surface_resource);\n\t\twl_resource_post_no_memory(resource);\n\t\treturn;\n\t}\n\tsurface->compositor_data = compositor;\n\tsurface->compositor_listener.notify = &destroy_surface_listener;\n\twl_resource_add_destroy_listener(surface_resource,\n\t\t&surface->compositor_listener);\n\n\twl_list_insert(&compositor->surfaces,\n\t\twl_resource_get_link(surface_resource));\n\twlr_signal_emit_safe(&compositor->events.new_surface, surface);\n}\n\nstatic void wl_compositor_create_region(struct wl_client *client,\n\t\tstruct wl_resource *resource, uint32_t id) {\n\twlr_region_create(client, resource, id);\n}\n\nstruct wl_compositor_interface wl_compositor_impl = {\n\t.create_surface = wl_compositor_create_surface,\n\t.create_region = wl_compositor_create_region\n};\n\nstatic void wl_compositor_destroy(struct wl_resource *resource) {\n\tstruct wlr_compositor *compositor = wl_resource_get_user_data(resource);\n\tstruct wl_resource *_resource = NULL;\n\twl_resource_for_each(_resource, &compositor->wl_resources) {\n\t\tif (_resource == resource) {\n\t\t\tstruct wl_list *link = wl_resource_get_link(_resource);\n\t\t\twl_list_remove(link);\n\t\t\tbreak;\n\t\t}\n\t}\n}\n\nstatic void wl_compositor_bind(struct wl_client *wl_client, void *data,\n\t\tuint32_t version, uint32_t id) {\n\tstruct wlr_compositor *compositor = data;\n\tassert(wl_client && compositor);\n\n\tstruct wl_resource *wl_resource =\n\t\twl_resource_create(wl_client, &wl_compositor_interface, version, id);\n\tif (wl_resource == NULL) {\n\t\twl_client_post_no_memory(wl_client);\n\t\treturn;\n\t}\n\twl_resource_set_implementation(wl_resource, &wl_compositor_impl,\n\t\tcompositor, wl_compositor_destroy);\n\twl_list_insert(&compositor->wl_resources,\n\t\twl_resource_get_link(wl_resource));\n}\n\nvoid wlr_compositor_destroy(struct wlr_compositor *compositor) {\n\tif (compositor == NULL) {\n\t\treturn;\n\t}\n\twl_list_remove(&compositor->display_destroy.link);\n\twl_global_destroy(compositor->wl_global);\n\tfree(compositor);\n}\n\nstatic void subcompositor_destroy(struct wl_client *client,\n\t\tstruct wl_resource *resource) {\n\twl_resource_destroy(resource);\n}\n\nstatic void subcompositor_get_subsurface(struct wl_client *client,\n\t\tstruct wl_resource *resource, uint32_t id,\n\t\tstruct wl_resource *surface_resource,\n\t\tstruct wl_resource *parent_resource) {\n\tstruct wlr_surface *surface = wl_resource_get_user_data(surface_resource);\n\tstruct wlr_surface *parent = wl_resource_get_user_data(parent_resource);\n\n\tstatic const char msg[] = \"get_subsurface: wl_subsurface@\";\n\n\tif (surface == parent) {\n\t\twl_resource_post_error(resource,\n\t\t\tWL_SUBCOMPOSITOR_ERROR_BAD_SURFACE,\n\t\t\t\"%s%d: wl_surface@%d cannot be its own parent\",\n\t\t\tmsg, id, wl_resource_get_id(surface_resource));\n\t\treturn;\n\t}\n\n\tif (surface->subsurface) {\n\t\twl_resource_post_error(resource,\n\t\t\tWL_SUBCOMPOSITOR_ERROR_BAD_SURFACE,\n\t\t\t\"%s%d: wl_surface@%d is already a sub-surface\",\n\t\t\tmsg, id, wl_resource_get_id(surface_resource));\n\t\treturn;\n\t}\n\n\tif (wlr_surface_get_main_surface(parent) == surface) {\n\t\twl_resource_post_error(resource,\n\t\t\tWL_SUBCOMPOSITOR_ERROR_BAD_SURFACE,\n\t\t\t\"%s%d: wl_surface@%d is an ancestor of parent\",\n\t\t\tmsg, id, wl_resource_get_id(surface_resource));\n\t\treturn;\n\t}\n\n\tif (wlr_surface_set_role(surface, \"wl_subsurface\", resource,\n\t\t\t\tWL_SUBCOMPOSITOR_ERROR_BAD_SURFACE) < 0) {\n\t\treturn;\n\t}\n\n\twlr_surface_make_subsurface(surface, parent, id);\n\tif (!surface->subsurface) {\n\t\twl_resource_post_no_memory(resource);\n\t\treturn;\n\t}\n}\n\n\nstatic const struct wl_subcompositor_interface subcompositor_interface = {\n\t.destroy = subcompositor_destroy,\n\t.get_subsurface = subcompositor_get_subsurface,\n};\n\nstatic void subcompositor_bind(struct wl_client *client, void *data,\n\t\tuint32_t version, uint32_t id) {\n\tstruct wlr_compositor *compositor = data;\n\tstruct wl_resource *resource =\n\t\twl_resource_create(client, &wl_subcompositor_interface, 1, id);\n\tif (resource == NULL) {\n\t\twl_client_post_no_memory(client);\n\t\treturn;\n\t}\n\twl_resource_set_implementation(resource, &subcompositor_interface,\n\t\tcompositor, NULL);\n}\n\nstatic void handle_display_destroy(struct wl_listener *listener, void *data) {\n\tstruct wlr_compositor *compositor =\n\t\twl_container_of(listener, compositor, display_destroy);\n\twlr_compositor_destroy(compositor);\n}\n\nstruct wlr_compositor *wlr_compositor_create(struct wl_display *display,\n\t\tstruct wlr_renderer *renderer) {\n\tstruct wlr_compositor *compositor =\n\t\tcalloc(1, sizeof(struct wlr_compositor));\n\tif (!compositor) {\n\t\twlr_log_errno(L_ERROR, \"Could not allocate wlr compositor\");\n\t\treturn NULL;\n\t}\n\n\tstruct wl_global *compositor_global = wl_global_create(display,\n\t\t&wl_compositor_interface, 4, compositor, wl_compositor_bind);\n\tif (!compositor_global) {\n\t\twlr_log_errno(L_ERROR, \"Could not allocate compositor global\");\n\t\tfree(compositor);\n\t\treturn NULL;\n\t}\n\tcompositor->wl_global = compositor_global;\n\tcompositor->renderer = renderer;\n\n\twl_global_create(display, &wl_subcompositor_interface, 1, compositor,\n\t\tsubcompositor_bind);\n\n\twl_list_init(&compositor->wl_resources);\n\twl_list_init(&compositor->surfaces);\n\twl_signal_init(&compositor->events.new_surface);\n\n\tcompositor->display_destroy.notify = handle_display_destroy;\n\twl_display_add_destroy_listener(display, &compositor->display_destroy);\n\n\treturn compositor;\n}\n","lang_cluster":"C","diff_tag":0,"review_comment":"","id":18} {"diff_hunk":"@@ -158,11 +158,11 @@ func (r *ruleRenderer) WorkloadEndpointToIptablesChains(epID *proto.WorkloadEndp\n \toutRules = append(outRules, r.DropRules(Match(), \"Drop if no profiles matched\")...)\n \n \ttoEndpointChain := Chain{\n-\t\tName: WorkloadEndpointChainName(WorkloadToEndpointPfx, endpoint),\n+\t\tName: EndpointChainName(WorkloadToEndpointPfx, endpoint.Name),\n \t\tRules: inRules,\n \t}\n \tfromEndpointChain := Chain{\n-\t\tName: WorkloadEndpointChainName(WorkloadFromEndpointPfx, endpoint),\n+\t\tName: EndpointChainName(WorkloadFromEndpointPfx, endpoint.Name),\n \t\tRules: outRules,\n \t}\n \treturn []*Chain{&toEndpointChain, &fromEndpointChain}","source_code":"\/\/ Copyright (c) 2016 Tigera, Inc. All rights reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage rules\n\nimport (\n\t\"github.com\/projectcalico\/felix\/go\/felix\/hashutils\"\n\t. \"github.com\/projectcalico\/felix\/go\/felix\/iptables\"\n\t\"github.com\/projectcalico\/felix\/go\/felix\/proto\"\n)\n\nfunc (r *ruleRenderer) WorkloadDispatchChains(endpoints map[proto.WorkloadEndpointID]*proto.WorkloadEndpoint) []*Chain {\n\ttoEndpointRules := make([]Rule, 0, len(endpoints)+1)\n\tfromEndpointRules := make([]Rule, 0, len(endpoints)+1)\n\tfor _, endpoint := range endpoints {\n\t\tfromEndpointRules = append(fromEndpointRules, Rule{\n\t\t\tMatch: Match().InInterface(endpoint.Name),\n\t\t\tAction: GotoAction{\n\t\t\t\tTarget: WorkloadEndpointChainName(WorkloadFromEndpointPfx, endpoint),\n\t\t\t},\n\t\t})\n\t\ttoEndpointRules = append(toEndpointRules, Rule{\n\t\t\tMatch: Match().OutInterface(endpoint.Name),\n\t\t\tAction: GotoAction{\n\t\t\t\tTarget: WorkloadEndpointChainName(WorkloadToEndpointPfx, endpoint),\n\t\t\t},\n\t\t})\n\t}\n\n\tfromEndpointRules = append(fromEndpointRules, Rule{\n\t\tAction: DropAction{},\n\t})\n\ttoEndpointRules = append(toEndpointRules, Rule{\n\t\tAction: DropAction{},\n\t})\n\n\tfromEndpointDispatchChain := Chain{\n\t\tName: DispatchFromWorkloadEndpoint,\n\t\tRules: fromEndpointRules,\n\t}\n\ttoEndpointDispatchChain := Chain{\n\t\tName: DispatchToWorkloadEndpoint,\n\t\tRules: toEndpointRules,\n\t}\n\n\treturn []*Chain{&toEndpointDispatchChain, &fromEndpointDispatchChain}\n}\n\nfunc (r *ruleRenderer) WorkloadEndpointToIptablesChains(epID *proto.WorkloadEndpointID, endpoint *proto.WorkloadEndpoint) []*Chain {\n\tinRules := []Rule{}\n\toutRules := []Rule{}\n\n\t\/\/ Start by ensuring that the accept mark bit is clear, policies set that bit to indicate\n\t\/\/ that they accepted the packet.\n\tinRules = append(inRules, Rule{\n\t\tAction: ClearMarkAction{\n\t\t\tMark: r.IptablesMarkAccept,\n\t\t},\n\t})\n\toutRules = append(outRules, Rule{\n\t\tAction: ClearMarkAction{\n\t\t\tMark: r.IptablesMarkAccept,\n\t\t},\n\t})\n\n\t\/\/ TODO(smc) Police the MAC?\n\n\tfor _, tier := range endpoint.Tiers {\n\t\t\/\/ For each tier, clear the \"accepted by tier\" mark.\n\t\tinRules = append(inRules, Rule{\n\t\t\tComment: \"Start of tier \" + tier.Name,\n\t\t\tAction: ClearMarkAction{\n\t\t\t\tMark: r.IptablesMarkNextTier,\n\t\t\t},\n\t\t})\n\t\toutRules = append(outRules, Rule{\n\t\t\tComment: \"Start of tier \" + tier.Name,\n\t\t\tAction: ClearMarkAction{\n\t\t\t\tMark: r.IptablesMarkNextTier,\n\t\t\t},\n\t\t})\n\t\t\/\/ Then, jump to each policy in turn.\n\t\tfor _, polID := range tier.Policies {\n\t\t\tinPolChainName := PolicyChainName(\n\t\t\t\tPolicyInboundPfx,\n\t\t\t\t&proto.PolicyID{Tier: tier.Name, Name: polID},\n\t\t\t)\n\t\t\tinRules = append(inRules,\n\t\t\t\tRule{\n\t\t\t\t\tMatch: Match().MarkClear(r.IptablesMarkNextTier),\n\t\t\t\t\tAction: JumpAction{Target: inPolChainName},\n\t\t\t\t},\n\t\t\t\t\/\/ If policy marked packet as accepted, it returns, setting the\n\t\t\t\t\/\/ accept mark bit. If that is set, return from this chain.\n\t\t\t\tRule{\n\t\t\t\t\tMatch: Match().MarkSet(r.IptablesMarkAccept),\n\t\t\t\t\tAction: ReturnAction{},\n\t\t\t\t\tComment: \"Return if policy accepted\",\n\t\t\t\t})\n\t\t\toutPolChainName := PolicyChainName(\n\t\t\t\tPolicyOutboundPfx,\n\t\t\t\t&proto.PolicyID{Tier: tier.Name, Name: polID},\n\t\t\t)\n\t\t\toutRules = append(outRules,\n\t\t\t\tRule{\n\t\t\t\t\tMatch: Match().MarkClear(r.IptablesMarkNextTier),\n\t\t\t\t\tAction: JumpAction{Target: outPolChainName},\n\t\t\t\t},\n\t\t\t\t\/\/ If policy marked packet as accepted, it returns, setting the\n\t\t\t\t\/\/ accept mark bit. If that is set, return from this chain.\n\t\t\t\tRule{\n\t\t\t\t\tMatch: Match().MarkSet(r.IptablesMarkAccept),\n\t\t\t\t\tAction: ReturnAction{},\n\t\t\t\t\tComment: \"Return if policy accepted\",\n\t\t\t\t})\n\t\t}\n\t\t\/\/ If no policy in the tier marked the packet as next-tier, drop the packet.\n\t\tinRules = append(inRules, r.DropRules(Match().MarkClear(r.IptablesMarkNextTier), \"Drop if no policies passed packet\")...)\n\t\toutRules = append(outRules, r.DropRules(Match().MarkClear(r.IptablesMarkNextTier), \"Drop if no policies passed packet\")...)\n\t}\n\n\t\/\/ Then, jump to each profile in turn.\n\tfor _, profileID := range endpoint.ProfileIds {\n\t\tinProfChainName := ProfileChainName(PolicyInboundPfx, &proto.ProfileID{Name: profileID})\n\t\toutProfChainName := ProfileChainName(PolicyOutboundPfx, &proto.ProfileID{Name: profileID})\n\t\tinRules = append(inRules,\n\t\t\tRule{Action: JumpAction{Target: inProfChainName}},\n\t\t\t\/\/ If policy marked packet as accepted, it returns, setting the\n\t\t\t\/\/ accept mark bit. If that is set, return from this chain.\n\t\t\tRule{\n\t\t\t\tMatch: Match().MarkSet(r.IptablesMarkAccept),\n\t\t\t\tAction: ReturnAction{},\n\t\t\t\tComment: \"Return if profile accepted\",\n\t\t\t})\n\t\toutRules = append(outRules,\n\t\t\tRule{Action: JumpAction{Target: outProfChainName}},\n\t\t\t\/\/ If policy marked packet as accepted, it returns, setting the\n\t\t\t\/\/ accept mark bit. If that is set, return from this chain.\n\t\t\tRule{\n\t\t\t\tMatch: Match().MarkSet(r.IptablesMarkAccept),\n\t\t\t\tAction: ReturnAction{},\n\t\t\t\tComment: \"Return if profile accepted\",\n\t\t\t})\n\t}\n\n\tinRules = append(inRules, r.DropRules(Match(), \"Drop if no profiles matched\")...)\n\toutRules = append(outRules, r.DropRules(Match(), \"Drop if no profiles matched\")...)\n\n\ttoEndpointChain := Chain{\n\t\tName: WorkloadEndpointChainName(WorkloadToEndpointPfx, endpoint),\n\t\tRules: inRules,\n\t}\n\tfromEndpointChain := Chain{\n\t\tName: WorkloadEndpointChainName(WorkloadFromEndpointPfx, endpoint),\n\t\tRules: outRules,\n\t}\n\treturn []*Chain{&toEndpointChain, &fromEndpointChain}\n}\n\nfunc (r *ruleRenderer) HostDispatchChains(map[proto.HostEndpointID]*proto.HostEndpoint) []*Chain {\n\tpanic(\"Not implemented\")\n\treturn nil\n}\n\nfunc (r *ruleRenderer) HostEndpointToIptablesChains(epID *proto.HostEndpointID, endpoint *proto.HostEndpoint) []*Chain {\n\tpanic(\"Not implemented\")\n\n\t\/\/ TODO(smc) Failsafe chains\n\n\treturn nil\n}\n\nfunc WorkloadEndpointChainName(prefix string, endpoint *proto.WorkloadEndpoint) string {\n\treturn hashutils.GetLengthLimitedID(\n\t\tprefix,\n\t\tendpoint.Name,\n\t\tMaxChainNameLength,\n\t)\n}\n","lang_cluster":"C","diff_tag":0,"review_comment":"","id":19} {"diff_hunk":"@@ -10,11 +10,14 @@ from __future__ import print_function\n from __future__ import division\n from __future__ import absolute_import\n \n+import errno\n import os\n import pickle\n import platform\n+import shutil\n import subprocess\n import sys\n+\n from uuid import uuid4\n \n from codechecker_common.logger import get_logger","source_code":"# -------------------------------------------------------------------------\n# The CodeChecker Infrastructure\n# This file is distributed under the University of Illinois Open Source\n# License. See LICENSE.TXT for details.\n# -------------------------------------------------------------------------\n\"\"\"\nBuild and log related functionality.\n\"\"\"\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\n\nimport os\nimport pickle\nimport platform\nimport subprocess\nimport sys\nfrom uuid import uuid4\n\nfrom codechecker_common.logger import get_logger\n\nfrom .. import env\nfrom . import host_check\n\nLOG = get_logger('buildlogger')\n\n\ndef execute_buildcmd(command, silent=False, env=None, cwd=None):\n \"\"\"\n Execute the the build command and continuously write\n the output from the process to the standard output.\n \"\"\"\n proc = subprocess.Popen(command,\n bufsize=-1,\n env=env,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n cwd=cwd,\n shell=True,\n universal_newlines=True)\n\n while True:\n line = proc.stdout.readline()\n if not line and proc.poll() is not None:\n break\n if not silent:\n print(line)\n\n return proc.returncode\n\n\ndef perform_build_command(logfile, command, context, keep_link, silent=False):\n \"\"\"\n Build the project and create a log file.\n \"\"\"\n LOG.info(\"Starting build ...\")\n\n try:\n original_env_file = os.environ['CODECHECKER_ORIGINAL_BUILD_ENV']\n LOG.debug_analyzer('Loading original build env from: %s',\n original_env_file)\n\n with open(original_env_file, 'rb') as env_file:\n original_env = pickle.load(env_file)\n\n except Exception as ex:\n LOG.warning(str(ex))\n LOG.warning('Failed to get saved original_env'\n 'using a current copy for logging.')\n original_env = os.environ.copy()\n\n # Run user's commands with intercept.\n if host_check.check_intercept(original_env):\n LOG.debug_analyzer(\"with intercept ...\")\n final_command = command\n command = ' '.join([\"intercept-build\",\n \"--cdb\", logfile,\n \"sh -c \\\"\" + final_command + \"\\\"\"])\n log_env = original_env\n LOG.debug_analyzer(command)\n\n # Run user's commands in shell.\n else:\n # TODO: better platform detection.\n if platform.system() == 'Linux':\n LOG.debug_analyzer(\"with ld logger ...\")\n open(logfile, 'a').close() # Same as linux's touch.\n log_env = env.get_log_env(logfile, context, original_env)\n if 'CC_LOGGER_GCC_LIKE' not in log_env:\n log_env['CC_LOGGER_GCC_LIKE'] = 'gcc:g++:clang:clang++:cc:c++'\n if keep_link or ('CC_LOGGER_KEEP_LINK' in log_env and\n log_env['CC_LOGGER_KEEP_LINK'] == 'true'):\n log_env['CC_LOGGER_KEEP_LINK'] = 'true'\n else:\n LOG.error(\"Intercept-build is required\"\n \" to run CodeChecker in OS X.\")\n sys.exit(1)\n\n LOG.debug_analyzer(log_env)\n try:\n ret_code = execute_buildcmd(command, silent, log_env)\n\n if ret_code == 0:\n LOG.info(\"Build finished successfully.\")\n LOG.debug_analyzer(\"The logfile is: %s\", logfile)\n else:\n LOG.info(\"Build failed.\")\n sys.exit(ret_code)\n\n except Exception as ex:\n LOG.error(\"Calling original build command failed.\")\n LOG.error(str(ex))\n sys.exit(1)\n finally:\n # Removing flock lock file.\n logfile_lock = logfile + '.lock'\n if os.path.exists(logfile_lock):\n os.remove(logfile_lock)\n\n\ndef default_compilation_db(workspace_path, run_name):\n \"\"\"\n Default compilation commands database file in the workspace.\n \"\"\"\n workspace_path = os.path.abspath(workspace_path)\n uid = str(uuid4())[:10] # 10 chars should be unique enough\n cmp_json_filename = 'compilation_commands_' + run_name + '_' \\\n + uid + '.json'\n compilation_commands = os.path.join(workspace_path, cmp_json_filename)\n return compilation_commands\n","lang_cluster":"C","diff_tag":0,"review_comment":"","id":20} {"diff_hunk":"@@ -4,6 +4,7 @@ This software is distributed under the GNU General Public License.\n See the file COPYING for details.\n *\/\n \n+#include \"dag.h\"\n #include \"dag_file.h\"\n \n #include \"xxmalloc.h\"","source_code":"\/*\nCopyright (C) 2014- The University of Notre Dame\nThis software is distributed under the GNU General Public License.\nSee the file COPYING for details.\n*\/\n\n#include \"dag_file.h\"\n\n#include \"xxmalloc.h\"\n#include \"list.h\"\n\n#include \n\nstruct dag_file * dag_file_create( const char *filename )\n{\n\tstruct dag_file *f = malloc(sizeof(*f));\n\tf->filename = xxstrdup(filename);\n\tf->needed_by = list_create();\n\tf->created_by = 0;\n\tf->ref_count = 0;\n\treturn f;\n}\n\nint dag_file_is_source( const struct dag_file *f )\n{\n\tif(f->created_by)\n\t\treturn 0;\n\telse\n\t\treturn 1;\n}\n\nint dag_file_is_sink( const struct dag_file *f )\n{\n\tif(list_size(f->needed_by) > 0)\n\t\treturn 0;\n\telse\n\t\treturn 1;\n}\n\n\/* vim: set noexpandtab tabstop=4: *\/\n","lang_cluster":"C","diff_tag":0,"review_comment":"","id":21} {"diff_hunk":"@@ -6,7 +6,9 @@ flux_msg_handler_create(3)\n NAME\n ----\n flux_msg_handler_create, flux_msg_handler_destroy,\n-flux_msg_handler_start, flux_msg_handler_stop - manage message handlers\n+flux_msg_handler_start, flux_msg_handler_stop,\n+flux_msg_handler_allow_rolemask,\n+flux_msg_handler_deny_rolemask - manage message handlers\n \n \n SYNOPSIS","source_code":"flux_msg_handler_create(3)\n==========================\n:doctype: manpage\n\n\nNAME\n----\nflux_msg_handler_create, flux_msg_handler_destroy,\nflux_msg_handler_start, flux_msg_handler_stop - manage message handlers\n\n\nSYNOPSIS\n--------\n #include \n\n typedef void (*flux_msg_handler_f)(flux_t *h,\n flux_msg_handler_t *mh,\n const flux_msg_t *msg,\n void *arg);\n\n flux_msg_handler_t *\n flux_msg_handler_create (flux_t *h,\n const struct flux_match match,\n flux_msg_handler_f callback,\n void *arg);\n\n void flux_msg_handler_destroy (flux_msg_handler_t *mh);\n\n void flux_msg_handler_start (flux_msg_handler_t *mh);\n\n void flux_msg_handler_stop (flux_msg_handler_t *mh);\n\n\nDESCRIPTION\n-----------\n\n`flux_msg_handler_create()` registers _callback_ to be invoked when\na message meeting _match_ criteria, as described in `flux_msg_cmp(3)`,\nis received on Flux broker handle _h_.\n\nThe message handler must be started with `flux_msg_handler_start()` in\norder to receive messages. Conversely, `flux_msg_handler_stop()` causes\nthe message handler to stop receiving messages. Starting and stopping\nare idempotent operations.\n\nThe handle _h_ is monitored for FLUX_POLLIN events on the flux_reactor_t\nassociated with the handle as described in `flux_set_reactor(3)`.\nThis internal \"handle watcher\" is started when the first message handler\nis started, and stopped when the last message handler is stopped.\n\nMessages arriving on _h_ are internally read and dispatched to matching\nmessage handlers. If multiple handlers match the message, the message\nis dispatched to the most recently registered handler.\n\nFLUX_MSGTYPE_REQUEST messages with no matching message handler\nare automatically sent an ENOSYS response by the dispatcher.\n\n`flux_msg_handler_destroy()` destroys a handler, after internally\nstopping it.\n\n\nCAVEATS\n-------\n\nFLUX_MSGTYPE_EVENT messages are received on the handle only as\nrequested by `flux_event_subscribe(3)`.\n\n`flux-broker(1)` only routes FLUX_MSGTYPE_REQUEST messages to comms\nmodules according to their registered service name, which is the same as\nthe module name. Other handle instances such as those on the local connector\ncannot yet receive requests.\n\n\nRETURN VALUE\n------------\n\n`flux_msg_handler_create()` returns a flux_msg_handler_t object on success.\nOn error, NULL is returned, and errno is set appropriately.\n\n\nERRORS\n------\n\nENOMEM::\nOut of memory.\n\n\nAUTHOR\n------\nThis page is maintained by the Flux community.\n\n\nRESOURCES\n---------\nGithub: \n\n\nCOPYRIGHT\n---------\ninclude::COPYRIGHT.adoc[]\n\n\nSEE ALSO\n---------\nflux_get_reactor(3), flux_reactor_start(3), flux_msg_cmp(3)\n","lang_cluster":"C","diff_tag":0,"review_comment":"","id":22} {"diff_hunk":"@@ -26,6 +26,7 @@ type Set interface {\n \tContains(interface{}) bool\n \tIter(func(item interface{}) error)\n \tCopy() Set\n+\tEquals(Set) bool\n }\n \n type empty struct{}","source_code":"\/\/ Copyright (c) 2016 Tigera, Inc. All rights reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage set\n\nimport (\n\t\"errors\"\n\tlog \"github.com\/Sirupsen\/logrus\"\n)\n\ntype Set interface {\n\tLen() int\n\tAdd(interface{})\n\tDiscard(interface{})\n\tContains(interface{}) bool\n\tIter(func(item interface{}) error)\n\tCopy() Set\n}\n\ntype empty struct{}\n\nvar emptyValue = empty{}\n\nvar (\n\tStopIteration = errors.New(\"Stop iteration\")\n\tRemoveItem = errors.New(\"Remove item\")\n)\n\nfunc New() Set {\n\treturn make(mapSet)\n}\n\nfunc Empty() Set {\n\treturn mapSet(nil)\n}\n\ntype mapSet map[interface{}]empty\n\nfunc (set mapSet) Len() int {\n\treturn len(set)\n}\n\nfunc (set mapSet) Add(item interface{}) {\n\tset[item] = emptyValue\n}\n\nfunc (set mapSet) Discard(item interface{}) {\n\tdelete(set, item)\n}\n\nfunc (set mapSet) Contains(item interface{}) bool {\n\t_, present := set[item]\n\treturn present\n}\n\nfunc (set mapSet) Iter(visitor func(item interface{}) error) {\nloop:\n\tfor item := range set {\n\t\terr := visitor(item)\n\t\tswitch err {\n\t\tcase StopIteration:\n\t\t\tbreak loop\n\t\tcase RemoveItem:\n\t\t\tdelete(set, item)\n\t\tcase nil:\n\t\t\tbreak\n\t\tdefault:\n\t\t\tlog.WithError(err).Panic(\"Unexpected iteration error\")\n\t\t}\n\t}\n}\n\nfunc (set mapSet) Copy() Set {\n\tcpy := New()\n\tfor item := range set {\n\t\tcpy.Add(item)\n\t}\n\treturn cpy\n}\n","lang_cluster":"C","diff_tag":0,"review_comment":"","id":23} {"diff_hunk":"@@ -70,16 +70,11 @@ def setup_package():\n # Extend the checker configuration with the server access.\n codechecker_cfg.update(server_access)\n \n- ret = project.clean(test_project, test_env)\n- if ret:\n- sys.exit(ret)\n-\n- ret = codechecker.check_and_store(codechecker_cfg,\n- test_project_name,\n- test_project_path)\n+ ret = codechecker.store(codechecker_cfg,\n+ test_project_name)\n if ret:\n sys.exit(1)\n- print(\"Analyzing test project was succcessful.\")\n+ print(\"Storing the base reports was succcessful.\")\n \n codechecker_cfg['run_names'] = [test_project_name]\n ","source_code":"# coding=utf-8\n# -----------------------------------------------------------------------------\n# The CodeChecker Infrastructure\n# This file is distributed under the University of Illinois Open Source\n# License. See LICENSE.TXT for details.\n# -----------------------------------------------------------------------------\n\n\"\"\"Setup for the package tests.\"\"\"\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\n\nimport os\nimport shutil\nimport sys\nimport uuid\n\nfrom libtest import codechecker\nfrom libtest import env\nfrom libtest import project\n\n\nTEST_WORKSPACE = None\n\n\ndef setup_package():\n \"\"\"Setup the environment for the tests. \"\"\"\n\n global TEST_WORKSPACE\n TEST_WORKSPACE = env.get_workspace('update')\n\n os.environ['TEST_WORKSPACE'] = TEST_WORKSPACE\n\n test_project = 'cpp'\n\n test_config = {}\n\n project_info = project.get_info(test_project)\n\n test_project_path = os.path.join(TEST_WORKSPACE, \"test_proj\")\n shutil.copytree(project.path(test_project), test_project_path)\n\n project_info['project_path'] = test_project_path\n\n test_project_name = project_info['name'] + '_' + uuid.uuid4().hex\n\n test_config['test_project'] = project_info\n\n suppress_file = None\n\n skip_list_file = None\n\n test_env = env.test_env(TEST_WORKSPACE)\n\n codechecker_cfg = {\n 'suppress_file': suppress_file,\n 'skip_list_file': skip_list_file,\n 'check_env': test_env,\n 'workspace': TEST_WORKSPACE,\n 'checkers': []\n }\n\n # Start or connect to the running CodeChecker server and get connection\n # details.\n print(\"This test uses a CodeChecker server... connecting...\")\n server_access = codechecker.start_or_get_server()\n server_access['viewer_product'] = 'update'\n codechecker.add_test_package_product(server_access, TEST_WORKSPACE)\n\n # Extend the checker configuration with the server access.\n codechecker_cfg.update(server_access)\n\n ret = project.clean(test_project, test_env)\n if ret:\n sys.exit(ret)\n\n ret = codechecker.check_and_store(codechecker_cfg,\n test_project_name,\n test_project_path)\n if ret:\n sys.exit(1)\n print(\"Analyzing test project was succcessful.\")\n\n codechecker_cfg['run_names'] = [test_project_name]\n\n test_config['codechecker_cfg'] = codechecker_cfg\n\n env.export_test_cfg(TEST_WORKSPACE, test_config)\n\n\ndef teardown_package():\n \"\"\"Clean up after the test.\"\"\"\n\n # TODO: if environment variable is set keep the workspace\n # and print out the path\n global TEST_WORKSPACE\n\n check_env = env.import_test_cfg(TEST_WORKSPACE)[\n 'codechecker_cfg']['check_env']\n codechecker.remove_test_package_product(TEST_WORKSPACE, check_env)\n\n print(\"Removing: \" + TEST_WORKSPACE)\n shutil.rmtree(TEST_WORKSPACE)\n","lang_cluster":"C","diff_tag":0,"review_comment":"","id":24} {"diff_hunk":"@@ -48,7 +48,7 @@ func MetricsPortReachable(felix *infrastructure.Felix) bool {\n }\n \n \/\/ Here we test reachability to a port number running on a Calico host itself, specifically Felix's\n-\/\/ metrics port 9091, and how that is affected by policy, host endpoint and workload endpoint\n+\/\/ metrics port 9091, and how that is affected by policy, host endpoint (eth0\/*) and workload endpoint\n \/\/ configuration.\n \/\/\n \/\/ - When there is no policy or endpoint configuration, the port should be reachable.","source_code":"\/\/ +build fvtests\n\n\/\/ Copyright (c) 2017-2018 Tigera, Inc. All rights reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage fv_test\n\nimport (\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\tlog \"github.com\/sirupsen\/logrus\"\n\n\t\"github.com\/projectcalico\/felix\/fv\/infrastructure\"\n\t\"github.com\/projectcalico\/felix\/fv\/metrics\"\n\t\"github.com\/projectcalico\/felix\/fv\/utils\"\n\t\"github.com\/projectcalico\/felix\/fv\/workload\"\n\t\"github.com\/projectcalico\/libcalico-go\/lib\/apiconfig\"\n\tapi \"github.com\/projectcalico\/libcalico-go\/lib\/apis\/v3\"\n\tclient \"github.com\/projectcalico\/libcalico-go\/lib\/clientv3\"\n\t\"github.com\/projectcalico\/libcalico-go\/lib\/numorstring\"\n)\n\nfunc MetricsPortReachable(felix *infrastructure.Felix) bool {\n\t\/\/ Delete existing conntrack state for the metrics port.\n\tfelix.Exec(\"conntrack\", \"-L\")\n\tfelix.Exec(\"conntrack\", \"-L\", \"-p\", \"tcp\", \"--dport\", metrics.PortString())\n\tfelix.ExecMayFail(\"conntrack\", \"-D\", \"-p\", \"tcp\", \"--orig-port-dst\", metrics.PortString())\n\n\t\/\/ Now try to get a metric.\n\tm, err := metrics.GetFelixMetric(felix.IP, \"felix_active_local_endpoints\")\n\tif err != nil {\n\t\tlog.WithError(err).Info(\"Metrics port not reachable\")\n\t\treturn false\n\t}\n\tlog.WithField(\"felix_active_local_endpoints\", m).Info(\"Metrics port reachable\")\n\treturn true\n}\n\n\/\/ Here we test reachability to a port number running on a Calico host itself, specifically Felix's\n\/\/ metrics port 9091, and how that is affected by policy, host endpoint and workload endpoint\n\/\/ configuration.\n\/\/\n\/\/ - When there is no policy or endpoint configuration, the port should be reachable.\n\/\/\n\/\/ - When there is a local workload endpoint, the port should be reachable. (Existence of workload\n\/\/ endpoints should make no difference to reachability to ports on the host itself.)\n\/\/\n\/\/ - When a host endpoint is configured for the host's interface (eth0), but not yet any policy, the\n\/\/ port should be unreachable.\n\/\/\n\/\/ - When pre-DNAT policy is then configured, to allow ingress to that port, it should be\n\/\/ reachable again.\n\nvar _ = infrastructure.DatastoreDescribe(\"with initialized Felix\", []apiconfig.DatastoreType{apiconfig.EtcdV3, apiconfig.Kubernetes}, func(getInfra infrastructure.InfraFactory) {\n\tvar (\n\t\tinfra infrastructure.DatastoreInfra\n\t\tfelix *infrastructure.Felix\n\t\tclient client.Interface\n\t\tmetricsPortReachable func() bool\n\t)\n\n\tBeforeEach(func() {\n\t\tinfra = getInfra()\n\n\t\tfelix, client = infrastructure.StartSingleNodeTopology(infrastructure.DefaultTopologyOptions(), infra)\n\n\t\tmetricsPortReachable = func() bool {\n\t\t\treturn MetricsPortReachable(felix)\n\t\t}\n\t})\n\n\tAfterEach(func() {\n\n\t\tif CurrentGinkgoTestDescription().Failed {\n\t\t\tinfra.DumpErrorData()\n\t\t\tfelix.Exec(\"iptables-save\", \"-c\")\n\t\t\tfelix.Exec(\"ip\", \"r\")\n\t\t\tfelix.Exec(\"ip\", \"a\")\n\t\t}\n\t\tfelix.Stop()\n\t\tinfra.Stop()\n\t})\n\n\tIt(\"with no endpoints or policy, port should be reachable\", func() {\n\t\tEventually(metricsPortReachable, \"10s\", \"1s\").Should(BeTrue())\n\t})\n\n\tIt(\"with a local workload, port should be reachable\", func() {\n\t\tw := workload.Run(felix, \"w\", \"default\", \"10.65.0.2\", \"8055\", \"tcp\")\n\t\tw.ConfigureInDatastore(infra)\n\t\tEventually(metricsPortReachable, \"10s\", \"1s\").Should(BeTrue(), \"Not reachable with workload running\")\n\t\tw.Stop()\n\t\tEventually(metricsPortReachable, \"10s\", \"1s\").Should(BeTrue(), \"With workload stopped, not reachable\")\n\t})\n\n\tContext(\"with host endpoint defined\", func() {\n\n\t\tBeforeEach(func() {\n\t\t\terr := infra.AddAllowToDatastore(\"host-endpoint=='true'\")\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\thostEp := api.NewHostEndpoint()\n\t\t\thostEp.Name = \"host-endpoint-1\"\n\t\t\thostEp.Labels = map[string]string{\"host-endpoint\": \"true\"}\n\t\t\thostEp.Spec.Node = felix.Hostname\n\t\t\thostEp.Spec.InterfaceName = \"eth0\"\n\t\t\t_, err = client.HostEndpoints().Create(utils.Ctx, hostEp, utils.NoOptions)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t})\n\n\t\tIt(\"port should not be reachable\", func() {\n\t\t\tEventually(metricsPortReachable, \"10s\", \"1s\").Should(BeFalse())\n\t\t})\n\n\t\tContext(\"with pre-DNAT policy defined\", func() {\n\n\t\t\tBeforeEach(func() {\n\t\t\t\t\/\/ Ensure the HostEndpoint has taken effect and is blocking traffic\n\t\t\t\tEventually(metricsPortReachable, \"10s\", \"1s\").Should(BeFalse())\n\t\t\t\tpolicy := api.NewGlobalNetworkPolicy()\n\t\t\t\tpolicy.Name = \"pre-dnat-policy-1\"\n\t\t\t\tpolicy.Spec.PreDNAT = true\n\t\t\t\tpolicy.Spec.ApplyOnForward = true\n\t\t\t\tprotocol := numorstring.ProtocolFromString(\"tcp\")\n\t\t\t\tallowMetricsPortRule := api.Rule{\n\t\t\t\t\tAction: api.Allow,\n\t\t\t\t\tProtocol: &protocol,\n\t\t\t\t\tDestination: api.EntityRule{\n\t\t\t\t\t\tPorts: []numorstring.Port{numorstring.SinglePort(uint16(metrics.Port))},\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t\tpolicy.Spec.Ingress = []api.Rule{allowMetricsPortRule}\n\t\t\t\tpolicy.Spec.Selector = \"host-endpoint=='true'\"\n\t\t\t\t_, err := client.GlobalNetworkPolicies().Create(utils.Ctx, policy, utils.NoOptions)\n\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t})\n\n\t\t\tIt(\"port should be reachable\", func() {\n\t\t\t\tEventually(metricsPortReachable, \"10s\", \"1s\").Should(BeTrue())\n\t\t\t})\n\t\t})\n\t})\n})\n","lang_cluster":"C","diff_tag":0,"review_comment":"","id":25} {"diff_hunk":"@@ -64,23 +64,40 @@ flux_future_t *flux_kvs_lookupat (flux_t *h, int flags, const char *key,\n {\n flux_future_t *f;\n json_t *obj = NULL;\n+ struct lookup_ctx *ctx;\n \n if (!h || !key || strlen (key) == 0 || validate_lookup_flags (flags) < 0) {\n errno = EINVAL;\n return NULL;\n }\n+ if (!(ctx = alloc_ctx ()))\n+ return NULL;\n+ ctx->flags = flags;\n if (!treeobj) {\n- f = flux_kvs_lookup (h, flags, key);\n+ if (!(f = flux_kvs_lookup (h, flags, key))) {\n+ free_ctx (ctx);\n+ return NULL;\n+ }\n }\n else {\n if (!(obj = json_loads (treeobj, 0, NULL))) {\n errno = EINVAL;\n return NULL;\n }\n- f = flux_rpc_pack (h, \"kvs.get\", FLUX_NODEID_ANY, 0, \"{s:s s:i s:O}\",\n- \"key\", key,\n- \"flags\", flags,\n- \"rootdir\", obj);\n+ if (!(f = flux_rpc_pack (h, \"kvs.get\", FLUX_NODEID_ANY, 0,\n+ \"{s:s s:i s:O}\", \"key\", key,\n+ \"flags\", flags,\n+ \"rootdir\", obj))) {\n+ free_ctx (ctx);\n+ json_decref (obj);\n+ return NULL;\n+ }\n+ }\n+ if (flux_future_aux_set (f, auxkey, ctx, (flux_free_f)free_ctx) < 0) {\n+ free_ctx (ctx);\n+ json_decref (obj);\n+ flux_future_destroy (f);\n+ return NULL;\n }\n json_decref (obj);\n return f;","source_code":"\/*****************************************************************************\\\n * Copyright (c) 2017 Lawrence Livermore National Security, LLC. Produced at\n * the Lawrence Livermore National Laboratory (cf, AUTHORS, DISCLAIMER.LLNS).\n * LLNL-CODE-658032 All rights reserved.\n *\n * This file is part of the Flux resource manager framework.\n * For details, see https:\/\/github.com\/flux-framework.\n *\n * This program is free software; you can redistribute it and\/or modify it\n * under the terms of the GNU General Public License as published by the Free\n * Software Foundation; either version 2 of the license, or (at your option)\n * any later version.\n *\n * Flux is distributed in the hope that it will be useful, but WITHOUT\n * ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY or\n * FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the\n * GNU General Public License for more details.\n *\n * You should have received a copy of the GNU General Public License along\n * with this program; if not, write to the Free Software Foundation, Inc.,\n * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.\n * See also: http:\/\/www.gnu.org\/licenses\/\n\\*****************************************************************************\/\n\n#if HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \"kvs_lookup.h\"\n\nstatic int validate_lookup_flags (int flags)\n{\n switch (flags) {\n case 0:\n case FLUX_KVS_TREEOBJ:\n case FLUX_KVS_READDIR:\n case FLUX_KVS_READDIR | FLUX_KVS_TREEOBJ:\n case FLUX_KVS_READLINK:\n return 0;\n default:\n return -1;\n }\n}\n\nflux_future_t *flux_kvs_lookup (flux_t *h, int flags, const char *key)\n{\n if (!h || !key || strlen (key) == 0 || validate_lookup_flags (flags) < 0) {\n errno = EINVAL;\n return NULL;\n }\n return flux_rpc_pack (h, \"kvs.get\", FLUX_NODEID_ANY, 0, \"{s:s s:i}\",\n \"key\", key,\n \"flags\", flags);\n}\n\nflux_future_t *flux_kvs_lookupat (flux_t *h, int flags, const char *key,\n const char *treeobj)\n{\n flux_future_t *f;\n json_t *obj = NULL;\n\n if (!h || !key || strlen (key) == 0 || validate_lookup_flags (flags) < 0) {\n errno = EINVAL;\n return NULL;\n }\n if (!treeobj) {\n f = flux_kvs_lookup (h, flags, key);\n }\n else {\n if (!(obj = json_loads (treeobj, 0, NULL))) {\n errno = EINVAL;\n return NULL;\n }\n f = flux_rpc_pack (h, \"kvs.get\", FLUX_NODEID_ANY, 0, \"{s:s s:i s:O}\",\n \"key\", key,\n \"flags\", flags,\n \"rootdir\", obj);\n }\n json_decref (obj);\n return f;\n}\n\nint flux_kvs_lookup_get (flux_future_t *f, const char **json_str)\n{\n const char *auxkey = \"flux::kvs_valstr\";\n json_t *obj;\n char *s;\n\n if (!(s = flux_future_aux_get (f, auxkey))) {\n if (flux_rpc_get_unpack (f, \"{s:o}\", \"val\", &obj) < 0)\n return -1;\n if (!(s = json_dumps (obj, JSON_COMPACT|JSON_ENCODE_ANY))) {\n errno = EINVAL;\n return -1;\n }\n if (flux_future_aux_set (f, auxkey, s, free) < 0) {\n free (s);\n errno = ENOMEM;\n return -1;\n }\n }\n if (json_str)\n *json_str = s;\n return 0;\n}\n\nint flux_kvs_lookup_get_unpack (flux_future_t *f, const char *fmt, ...)\n{\n va_list ap;\n json_t *obj;\n int rc;\n\n if (flux_rpc_get_unpack (f, \"{s:o}\", \"val\", &obj) < 0)\n return -1;\n va_start (ap, fmt);\n if ((rc = json_vunpack_ex (obj, NULL, 0, fmt, ap) < 0))\n errno = EPROTO;\n va_end (ap);\n\n return rc;\n}\n\n\n\/*\n * vi:tabstop=4 shiftwidth=4 expandtab\n *\/\n","lang_cluster":"C","diff_tag":0,"review_comment":"","id":26} {"diff_hunk":"@@ -170,6 +170,7 @@ def remove_dir(path):\n shutil.rmtree(path, onerror=error_handler)\n \n \n+# -------------------------------------------------------------------------\n def call_command(command, env=None):\n \"\"\" Call an external command and return with (output, return_code).\"\"\"\n ","source_code":"# -------------------------------------------------------------------------\n# The CodeChecker Infrastructure\n# This file is distributed under the University of Illinois Open Source\n# License. See LICENSE.TXT for details.\n# -------------------------------------------------------------------------\n\"\"\"\nUtil module.\n\"\"\"\n\nimport datetime\nimport glob\nimport hashlib\nimport ntpath\nimport os\nimport shutil\nimport socket\nimport subprocess\nimport sys\n\nfrom codechecker_lib.logger import LoggerFactory\n\n# WARNING! LOG should be only used in this module.\nLOG = LoggerFactory.get_new_logger('UTIL')\n\n\n# ---------------------------------------------------------------------\ndef get_free_port():\n \"\"\" Get a free port from the OS. \"\"\"\n\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind(('', 0))\n free_port = s.getsockname()[1]\n s.close()\n\n return free_port\n\n\n# ---------------------------------------------------------------------\ndef is_localhost(address):\n \"\"\"\n Check if address is one of the valid values and try to get the\n IP-addresses from the system.\n \"\"\"\n\n valid_values = ['localhost', '0.0.0.0', '*']\n\n try:\n valid_values.append(socket.gethostbyname('localhost'))\n except Exception:\n # Failed to get ip address for localhost.\n pass\n\n try:\n valid_values.append(socket.gethostbyname(socket.gethostname()))\n except Exception:\n # Failed to get ip address for host_name.\n pass\n\n return address in valid_values\n\n\n# ---------------------------------------------------------------------\ndef match_file_name(file_name, pattern):\n file_name_parts = file_name.split('--')\n\n if file_name_parts[0] == pattern:\n return True\n else:\n return False\n\n\n# ---------------------------------------------------------------------\ndef get_file_last_modification_time(file):\n \"\"\"\n Returns the last modification time of a file.\n \"\"\"\n return datetime.datetime.fromtimestamp(os.path.getmtime(file))\n\n\n# ---------------------------------------------------------------------\ndef get_env_var(env_var, needed=False):\n \"\"\"\n Read the environment variables and handle the exception if a necessary\n environment variable is missing.\n \"\"\"\n\n value = os.getenv(env_var)\n if needed and not value:\n LOG.critical('Failed to read necessary environment variable %s.'\n ' (Maybe CodeChecker was not configured properly.)'\n % env_var)\n sys.exit(1)\n\n return value\n\n\n# -------------------------------------------------------------------------\ndef get_tmp_dir_hash():\n \"\"\"Generate a hash based on the current time and process id.\"\"\"\n\n pid = os.getpid()\n time = datetime.datetime.now()\n\n data = str(pid) + str(time)\n\n dir_hash = hashlib.md5()\n dir_hash.update(data)\n\n LOG.debug('The generated temporary directory hash is %s.'\n % dir_hash.hexdigest())\n\n return dir_hash.hexdigest()\n\n\n# -------------------------------------------------------------------------\ndef get_file_name_from_path(path):\n \"\"\"Get the filename from a path.\"\"\"\n head, tail = ntpath.split(path)\n return head, tail\n\n\n# -------------------------------------------------------------------------\ndef get_obj_target(object_file_path):\n return os.path.split(os.path.abspath(dir))[-2]\n\n\n# -------------------------------------------------------------------------\ndef create_dir(path):\n \"\"\"Create a directory safely if it does not exist yet.\n This may be called from several processes or threads, creating the same\n directory, and it fails only if the directory is not created.\n \"\"\"\n\n if not os.path.isdir(path):\n try:\n LOG.debug('Creating directory %s.' % path)\n os.makedirs(path)\n except Exception as e:\n if not os.path.isdir(path):\n LOG.error('Failed to create directory %s.' % path)\n raise e\n\n return\n\n\n# -------------------------------------------------------------------------\ndef get_file_list(path, pattern):\n glob_pattern = os.path.join(path, pattern)\n return glob.glob(glob_pattern)\n\n\n# -------------------------------------------------------------------------\ndef remove_file_list(file_list):\n for rfile in file_list:\n LOG.debug(rfile)\n try:\n os.remove(rfile)\n except OSError:\n # Maybe another thread has already deleted it.\n LOG.debug('Failed to remove file %s.' % rfile)\n\n return\n\n\n# -------------------------------------------------------------------------\ndef remove_dir(path):\n def error_handler(*args):\n LOG.warning('Failed to remove directory %s.' % path)\n\n shutil.rmtree(path, onerror=error_handler)\n\n\ndef call_command(command, env=None):\n \"\"\" Call an external command and return with (output, return_code).\"\"\"\n\n try:\n LOG.debug('Run ' + ' '.join(command))\n out = subprocess.check_output(command,\n bufsize=-1,\n env=env,\n stderr=subprocess.STDOUT)\n LOG.debug(out)\n return out, 0\n except subprocess.CalledProcessError as ex:\n LOG.debug('Running command \"' + ' '.join(command) + '\" Failed.')\n LOG.debug(str(ex.returncode))\n LOG.debug(ex.output)\n return ex.output, ex.returncode\n\n\ndef get_default_workspace():\n \"\"\"\n Default workspace in the users home directory.\n \"\"\"\n workspace = os.path.join(os.path.expanduser(\"~\"), '.codechecker')\n return workspace\n","lang_cluster":"C","diff_tag":0,"review_comment":"","id":27} {"diff_hunk":"@@ -95,8 +95,8 @@ if __name__ == '__main__':\n f.write(\n json.dumps(\n prepare_compiler_target.prepare(\n- os.path.join(args.report_dir, \"compiler_target.json\"),\n- args.sources_root),\n+ os.path.join(pathOptions.report_dir, \"compiler_target.json\"),\n+ pathOptions.sources_root),\n indent=4))\n \n # ctu-collect","source_code":"#!\/usr\/bin\/env python\n# -------------------------------------------------------------------------\n# The CodeChecker Infrastructure\n# This file is distributed under the University of Illinois Open Source\n# License. See LICENSE.TXT for details.\n# -------------------------------------------------------------------------\nimport argparse\nimport json\nimport os\nimport platform\nimport subprocess\n\nimport prepare_compile_cmd\nimport prepare_compiler_includes\nimport prepare_compiler_target\nimport prepare_analyzer_cmd\n\n\ndef execute(cmd):\n print(\"Executing command: \" + ' '.join(cmd))\n try:\n proc = subprocess.Popen(cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n out, err = proc.communicate()\n\n print(\"stdout:\\n\\n\" + out.decode(\"utf-8\"))\n print(\"stderr:\\n\\n\" + err.decode(\"utf-8\"))\n\n if proc.returncode != 0:\n print('Unsuccessful run: \"' + ' '.join(cmd) + '\"')\n raise Exception(\"Unsuccessful run of command.\")\n return out\n except OSError:\n print('Failed to run: \"' + ' '.join(cmd) + '\"')\n raise\n\n\ndef get_triple_arch(analyze_command_file):\n with open(analyze_command_file) as f:\n cmd = f.readline()\n\n cmd = cmd.split()\n for flag in cmd:\n if flag.startswith('--target='):\n return flag[9:].split('-')[0] # 9 == len('--target=')\n\n return platform.machine()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Prepare all commands '\n 'to execute in local environmennt for debugging.')\n parser.add_argument(\n '--sources_root',\n default='.\/sources-root',\n help=\"Path of the source root.\")\n parser.add_argument(\n '--report_dir',\n default='..',\n help=\"Path of the report dir.\")\n parser.add_argument(\n '--clang',\n required=True,\n help=\"Path to the clang binary.\")\n parser.add_argument(\n '--clang_plugin_name', default=None,\n help=\"Name of the used clang plugin.\")\n parser.add_argument(\n '--clang_plugin_path', default=None,\n help=\"Path to the used clang plugin.\")\n args = parser.parse_args()\n\n compile_cmd_debug = \"compile_cmd_DEBUG.json\"\n with open(compile_cmd_debug, 'w') as f:\n f.write(\n json.dumps(\n prepare_compile_cmd.prepare(\n os.path.join(args.report_dir, \"compile_cmd.json\"),\n args.sources_root),\n indent=4))\n\n compiler_includes_debug = \"compiler_includes_DEBUG.json\"\n with open(compiler_includes_debug, 'w') as f:\n f.write(\n json.dumps(\n prepare_compiler_includes.prepare(\n os.path.join(args.report_dir, \"compiler_includes.json\"),\n args.sources_root),\n indent=4))\n\n compiler_target_debug = \"compiler_target_DEBUG.json\"\n with open(compiler_target_debug, 'wb') as f:\n f.write(\n json.dumps(\n prepare_compiler_target.prepare(\n os.path.join(args.report_dir, \"compiler_target.json\"),\n args.sources_root),\n indent=4))\n\n # ctu-collect\n out = execute([\"CodeChecker\", \"analyze\", \"--ctu-collect\",\n compile_cmd_debug,\n \"--compiler-includes-file\", compiler_includes_debug,\n \"--compiler-target-file\", compiler_target_debug,\n \"-o\", \"report_debug\",\n \"--verbose\", \"debug\"])\n\n analyzer_command_debug = \"analyzer-command_DEBUG\"\n target = get_triple_arch('.\/analyzer-command')\n with open(analyzer_command_debug, 'w') as f:\n f.write(\n prepare_analyzer_cmd.prepare(\n \".\/analyzer-command\",\n prepare_analyzer_cmd.PathOptions(\n args.sources_root,\n args.clang,\n args.clang_plugin_name,\n args.clang_plugin_path,\n \".\/report_debug\/ctu-dir\/\" + target)))\n\n print(\n \"Preparation of files for debugging is done. \"\n \"Now you can execute the generated analyzer command. \"\n \"E.g. $ bash % s\" %\n analyzer_command_debug)\n","lang_cluster":"C","diff_tag":0,"review_comment":"","id":28} {"diff_hunk":"@@ -97,16 +97,27 @@ def perform_analysis(args, context, actions, metadata):\n continue\n metadata['checkers'][analyzer].append(check)\n \n+ if ctu_collect:\n+ shutil.rmtree(ctu_dir, ignore_errors=True)\n+ elif ctu_analyze and not os.path.exists(ctu_dir):\n+ LOG.error(\"The given '\" + ctu_dir + \"' does not exist\")\n+ return\n+\n # Run analysis.\n LOG.info(\"Starting static analysis ...\")\n start_time = time.time()\n \n analysis_manager.start_workers(actions, context, config_map,\n args.jobs, args.output_path,\n- __get_skip_handler(args), metadata)\n+ __get_skip_handler(args), metadata,\n+ ctu_collect, ctu_analyze,\n+ ctu_dir, ctu_func_map_cmd)\n \n end_time = time.time()\n LOG.info(\"Analysis length: \" + str(end_time - start_time) + \" sec.\")\n \n metadata['timestamps'] = {'begin': start_time,\n 'end': end_time}\n+\n+ if ctu_collect and ctu_analyze:\n+ shutil.rmtree(ctu_dir, ignore_errors=True)","source_code":"# -------------------------------------------------------------------------\n# The CodeChecker Infrastructure\n# This file is distributed under the University of Illinois Open Source\n# License. See LICENSE.TXT for details.\n# -------------------------------------------------------------------------\n\"\"\"\nPrepare and start different analysis types\n\"\"\"\nimport copy\nimport shlex\nimport subprocess\nimport time\n\nfrom libcodechecker.logger import LoggerFactory\nfrom libcodechecker.analyze import analysis_manager\nfrom libcodechecker.analyze import analyzer_env\nfrom libcodechecker.analyze import skiplist_handler\nfrom libcodechecker.analyze.analyzers import analyzer_types\n\nLOG = LoggerFactory.get_new_logger('ANALYZER')\n\n\ndef prepare_actions(actions, enabled_analyzers):\n \"\"\"\n Set the analyzer type for each buildaction.\n Multiple actions if multiple source analyzers are set.\n \"\"\"\n res = []\n\n for ea in enabled_analyzers:\n for action in actions:\n new_action = copy.deepcopy(action)\n new_action.analyzer_type = ea\n res.append(new_action)\n return res\n\n\ndef __get_analyzer_version(context, analyzer_config_map):\n \"\"\"\n Get the path and the version of the analyzer binaries.\n \"\"\"\n check_env = analyzer_env.get_check_env(context.path_env_extra,\n context.ld_lib_path_extra)\n\n # Get the analyzer binaries from the config_map which\n # contains only the checked and available analyzers.\n versions = {}\n for _, analyzer_cfg in analyzer_config_map.items():\n analyzer_bin = analyzer_cfg.analyzer_binary\n version = [analyzer_bin, u' --version']\n try:\n output = subprocess.check_output(shlex.split(' '.join(version)),\n env=check_env)\n versions[analyzer_bin] = output\n except (subprocess.CalledProcessError, OSError) as oerr:\n LOG.warning(\"Failed to get analyzer version: \" + ' '.join(version))\n LOG.warning(oerr.strerror)\n\n return versions\n\n\ndef __get_skip_handler(args):\n try:\n if args.skipfile:\n LOG.debug_analyzer(\"Creating skiplist handler.\")\n return skiplist_handler.SkipListHandler(args.skipfile)\n except AttributeError:\n LOG.debug_analyzer('Skip file was not set in the command line')\n\n\ndef perform_analysis(args, context, actions, metadata):\n \"\"\"\n Perform static analysis via the given (or if not, all) analyzers,\n in the given analysis context for the supplied build actions.\n Additionally, insert statistical information into the metadata dict.\n \"\"\"\n\n analyzers = args.analyzers if 'analyzers' in args \\\n else analyzer_types.supported_analyzers\n analyzers, _ = analyzer_types.check_supported_analyzers(\n analyzers, context)\n\n actions = prepare_actions(actions, analyzers)\n config_map = analyzer_types.build_config_handlers(args, context, analyzers)\n\n # Save some metadata information.\n versions = __get_analyzer_version(context, config_map)\n metadata['versions'].update(versions)\n\n metadata['checkers'] = {}\n for analyzer in analyzers:\n metadata['checkers'][analyzer] = []\n\n for check, data in config_map[analyzer].checks().items():\n enabled, _ = data\n if not enabled:\n continue\n metadata['checkers'][analyzer].append(check)\n\n # Run analysis.\n LOG.info(\"Starting static analysis ...\")\n start_time = time.time()\n\n analysis_manager.start_workers(actions, context, config_map,\n args.jobs, args.output_path,\n __get_skip_handler(args), metadata)\n\n end_time = time.time()\n LOG.info(\"Analysis length: \" + str(end_time - start_time) + \" sec.\")\n\n metadata['timestamps'] = {'begin': start_time,\n 'end': end_time}\n","lang_cluster":"C","diff_tag":0,"review_comment":"","id":29} {"diff_hunk":"@@ -106,19 +106,12 @@ def handle_auth(host, port, username, login=False):\n sys.exit(1)\n \n \n-def setup_client(host, port, uri):\n- \"\"\"\n- Stup the thrift client and check API version and authentication needs.\n- \"\"\"\n- manager = session_manager.SessionManager_Client()\n- session_token = manager.getToken(host, port)\n-\n+def perform_auth_for_handler(manager, host, port, session_token):\n # Before actually communicating with the server,\n # we need to check authentication first.\n auth_client = authentication_helper.ThriftAuthHelper(host,\n port,\n- uri +\n- 'Authentication',\n+ '\/Authentication',\n session_token)\n try:\n auth_response = auth_client.getAuthParameters()","source_code":"# -------------------------------------------------------------------------\n# The CodeChecker Infrastructure\n# This file is distributed under the University of Illinois Open Source\n# License. See LICENSE.TXT for details.\n# -------------------------------------------------------------------------\n\nimport getpass\nimport sys\n\nfrom thrift.Thrift import TApplicationException\n\nimport shared\nfrom Authentication import ttypes as AuthTypes\n\nfrom libcodechecker import session_manager\nfrom libcodechecker.logger import LoggerFactory\n\nfrom . import thrift_helper\nfrom . import authentication_helper\n\nLOG = LoggerFactory.get_new_logger('CLIENT')\nSUPPORTED_API_VERSION = '6.0'\n\n\ndef check_api_version(client):\n \"\"\"\n Check if server API is supported by the client.\n \"\"\"\n\n version = client.getAPIVersion()\n supp_major_version = SUPPORTED_API_VERSION.split('.')[0]\n api_major_version = version.split('.')[0]\n\n # There is NO compatibility between major versions.\n return supp_major_version == api_major_version\n\n\ndef handle_auth(host, port, username, login=False):\n\n session = session_manager.SessionManager_Client()\n\n auth_token = session.getToken(host, port)\n\n auth_client = authentication_helper.ThriftAuthHelper(host,\n port,\n '\/Authentication',\n auth_token)\n\n if not login:\n logout_done = auth_client.destroySession()\n if logout_done:\n session.saveToken(host, port, None, True)\n LOG.info(\"Successfully logged out.\")\n return\n\n try:\n handshake = auth_client.getAuthParameters()\n\n if not handshake.requiresAuthentication:\n LOG.info(\"This server does not require privileged access.\")\n return\n\n if auth_token and handshake.sessionStillActive:\n LOG.info(\"You are already logged in.\")\n return\n else:\n LOG.info(\"Server requires authentication to access. Please use \"\n \"'CodeChecker cmd login' to authenticate.\")\n\n except TApplicationException:\n LOG.info(\"This server does not support privileged access.\")\n return\n\n methods = auth_client.getAcceptedAuthMethods()\n # Attempt username-password auth first.\n if 'Username:Password' in str(methods):\n\n # Try to use a previously saved credential from configuration file.\n saved_auth = session.getAuthString(host, port)\n\n if saved_auth:\n LOG.info(\"Logging in using preconfigured credentials...\")\n username = saved_auth.split(\":\")[0]\n pwd = saved_auth.split(\":\")[1]\n else:\n LOG.info(\"Logging in using credentials from command line...\")\n pwd = getpass.getpass(\"Please provide password for user '{0}'\"\n .format(username))\n\n LOG.debug(\"Trying to login as {0} to {1}:{2}\"\n .format(username, host, port))\n try:\n session_token = auth_client.performLogin(\"Username:Password\",\n username + \":\" +\n pwd)\n\n session.saveToken(host, port, session_token)\n LOG.info(\"Server reported successful authentication.\")\n except shared.ttypes.RequestFailed as reqfail:\n LOG.error(\"Authentication failed! Please check your credentials.\")\n LOG.error(reqfail.message)\n sys.exit(1)\n else:\n LOG.critical(\"No authentication methods were reported by the server \"\n \"that this client could support.\")\n sys.exit(1)\n\n\ndef setup_client(host, port, uri):\n \"\"\"\n Stup the thrift client and check API version and authentication needs.\n \"\"\"\n manager = session_manager.SessionManager_Client()\n session_token = manager.getToken(host, port)\n\n # Before actually communicating with the server,\n # we need to check authentication first.\n auth_client = authentication_helper.ThriftAuthHelper(host,\n port,\n uri +\n 'Authentication',\n session_token)\n try:\n auth_response = auth_client.getAuthParameters()\n except TApplicationException as tex:\n auth_response = AuthTypes.HandshakeInformation()\n auth_response.requiresAuthentication = False\n\n if auth_response.requiresAuthentication and \\\n not auth_response.sessionStillActive:\n print_err = False\n\n if manager.is_autologin_enabled():\n auto_auth_string = manager.getAuthString(host, port)\n if auto_auth_string:\n # Try to automatically log in with a saved credential\n # if it exists for the server.\n try:\n session_token = auth_client.performLogin(\n \"Username:Password\",\n auto_auth_string)\n manager.saveToken(host, port, session_token)\n LOG.info(\"Authenticated using pre-configured \"\n \"credentials.\")\n except shared.ttypes.RequestFailed:\n print_err = True\n else:\n print_err = True\n else:\n print_err = True\n\n if print_err:\n LOG.error(\"Access denied. This server requires authentication.\")\n LOG.error(\"Please log in onto the server using 'CodeChecker cmd \"\n \"login'.\")\n sys.exit(1)\n\n client = thrift_helper.ThriftClientHelper(host, port, uri, session_token)\n # Test if client can work with the server's API.\n if not check_api_version(client):\n LOG.critical(\"The server uses a newer version of the API which is \"\n \"incompatible with this client. Please update client.\")\n sys.exit(1)\n\n return client\n","lang_cluster":"C","diff_tag":0,"review_comment":"","id":30} {"diff_hunk":"@@ -74,9 +74,13 @@ func (poc *PolicySorter) Sorted() *tierInfo {\n \treturn tierInfo\n }\n \n+\/\/ Note: PolKV is really internal to the calc package. It is named with an initial capital so that\n+\/\/ the test package calc_test can also use it.\n type PolKV struct {\n-\tKey model.PolicyKey\n-\tValue *model.Policy\n+\tKey model.PolicyKey\n+\tValue *model.Policy\n+\tIngress *bool\n+\tEgress *bool\n }\n \n func (p PolKV) String() string {","source_code":"\/\/ Copyright (c) 2016-2017 Tigera, Inc. All rights reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage calc\n\nimport (\n\t\"fmt\"\n\t\"sort\"\n\n\tlog \"github.com\/sirupsen\/logrus\"\n\n\t\"github.com\/projectcalico\/libcalico-go\/lib\/backend\/api\"\n\t\"github.com\/projectcalico\/libcalico-go\/lib\/backend\/model\"\n)\n\ntype PolicySorter struct {\n\ttier *tierInfo\n}\n\nfunc NewPolicySorter() *PolicySorter {\n\treturn &PolicySorter{\n\t\ttier: &tierInfo{\n\t\t\tName: \"default\",\n\t\t\tPolicies: make(map[model.PolicyKey]*model.Policy),\n\t\t},\n\t}\n}\n\nfunc (poc *PolicySorter) OnUpdate(update api.Update) (dirty bool) {\n\tswitch key := update.Key.(type) {\n\tcase model.PolicyKey:\n\t\toldPolicy := poc.tier.Policies[key]\n\t\tif update.Value != nil {\n\t\t\tnewPolicy := update.Value.(*model.Policy)\n\t\t\tif oldPolicy == nil ||\n\t\t\t\toldPolicy.Order != newPolicy.Order ||\n\t\t\t\toldPolicy.DoNotTrack != newPolicy.DoNotTrack ||\n\t\t\t\toldPolicy.PreDNAT != newPolicy.PreDNAT {\n\t\t\t\tdirty = true\n\t\t\t}\n\t\t\tpoc.tier.Policies[key] = newPolicy\n\t\t} else {\n\t\t\tif oldPolicy != nil {\n\t\t\t\tdelete(poc.tier.Policies, key)\n\t\t\t\tdirty = true\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}\n\nfunc (poc *PolicySorter) Sorted() *tierInfo {\n\ttierInfo := poc.tier\n\ttierInfo.OrderedPolicies = make([]PolKV, 0, len(tierInfo.Policies))\n\tfor k, v := range tierInfo.Policies {\n\t\ttierInfo.OrderedPolicies = append(tierInfo.OrderedPolicies, PolKV{Key: k, Value: v})\n\t}\n\t\/\/ Note: using explicit Debugf() here rather than WithFields(); we want the []PolKV slice\n\t\/\/ to be stringified with %v rather than %#v (as used by WithField()).\n\tlog.Debugf(\"Order before sorting: %v\", tierInfo.OrderedPolicies)\n\tsort.Sort(PolicyByOrder(tierInfo.OrderedPolicies))\n\tlog.Debugf(\"Order after sorting: %v\", tierInfo.OrderedPolicies)\n\treturn tierInfo\n}\n\ntype PolKV struct {\n\tKey model.PolicyKey\n\tValue *model.Policy\n}\n\nfunc (p PolKV) String() string {\n\torderStr := \"nil policy\"\n\tif p.Value != nil {\n\t\tif p.Value.Order != nil {\n\t\t\torderStr = fmt.Sprintf(\"%v\", *p.Value.Order)\n\t\t} else {\n\t\t\torderStr = \"default\"\n\t\t}\n\t}\n\treturn fmt.Sprintf(\"%s(%s)\", p.Key.Name, orderStr)\n}\n\ntype PolicyByOrder []PolKV\n\nfunc (a PolicyByOrder) Len() int { return len(a) }\nfunc (a PolicyByOrder) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\nfunc (a PolicyByOrder) Less(i, j int) bool {\n\tbothNil := a[i].Value.Order == nil && a[j].Value.Order == nil\n\tbothSet := a[i].Value.Order != nil && a[j].Value.Order != nil\n\tordersEqual := bothNil || bothSet && (*a[i].Value.Order == *a[j].Value.Order)\n\n\tif ordersEqual {\n\t\t\/\/ Use name as tie-break.\n\t\tresult := a[i].Key.Name < a[j].Key.Name\n\t\treturn result\n\t}\n\n\t\/\/ nil order maps to \"infinity\"\n\tif a[i].Value.Order == nil {\n\t\treturn false\n\t} else if a[j].Value.Order == nil {\n\t\treturn true\n\t}\n\n\t\/\/ Otherwise, use numeric comparison.\n\treturn *a[i].Value.Order < *a[j].Value.Order\n}\n\ntype tierInfo struct {\n\tName string\n\tValid bool\n\tOrder *float64\n\tPolicies map[model.PolicyKey]*model.Policy\n\tOrderedPolicies []PolKV\n}\n\nfunc NewTierInfo(name string) *tierInfo {\n\treturn &tierInfo{\n\t\tName: name,\n\t\tPolicies: make(map[model.PolicyKey]*model.Policy),\n\t}\n}\n\nfunc (t tierInfo) String() string {\n\tpolicies := make([]string, len(t.OrderedPolicies))\n\tfor ii, pol := range t.OrderedPolicies {\n\t\tpolType := \"t\"\n\t\tif pol.Value != nil {\n\t\t\tif pol.Value.DoNotTrack {\n\t\t\t\tpolType = \"u\"\n\t\t\t} else if pol.Value.PreDNAT {\n\t\t\t\tpolType = \"p\"\n\t\t\t}\n\t\t}\n\t\tpolicies[ii] = fmt.Sprintf(\"%v(%v)\", pol.Key.Name, polType)\n\t}\n\treturn fmt.Sprintf(\"%v -> %v\", t.Name, policies)\n}\n","lang_cluster":"C","diff_tag":0,"review_comment":"","id":31} {"diff_hunk":"@@ -76,19 +76,14 @@ another key. _target_ need not exist.\n `flux_kvs_txn_put_raw()` sets _key_ to a value containing raw data\n referred to by _data_ of length _len_.\n \n+`flux_kvs_txn_put_treeobj()` sets _key_ to an RFC 11 object, encoded\n+as a JSON string.\n+\n \n FLAGS\n -----\n \n-The _flags_ argument may be zero, or a bitmask of one or more of the\n-following flags:\n-\n-FLUX_KVS_TREEOBJ::\n-The specified value is interpreted as an RFC 11 tree object (KVS meta data)\n-rather than the actual value. Currently the only way to obtain a valid\n-tree object is with `flux_kvs_lookup(3)` or `flux_kvs_lookupat(3)`. In\n-the future, other methods may be available. Note: this flag is only\n-valid for `flux_kvs_txn_put()` and `flux_kvs_txn_pack()`.\n+The _flags_ argument is currently unused and must be zero.\n \n \n include::JSON_PACK.adoc[]","source_code":"flux_kvs_txn_create(3)\n======================\n:doctype: manpage\n\n\nNAME\n----\nflux_kvs_txn_create, flux_kvs_txn_destroy, flux_kvs_txn_put, flux_kvs_txn_pack, flux_kvs_txn_vpack, flux_kvs_txn_mkdir, flux_kvs_txn_unlink, flux_kvs_txn_symlink, flux_kvs_txn_put_raw - operate on a KVS transaction object\n\n\nSYNOPSIS\n--------\n #include \n\n flux_kvs_txn_t *flux_kvs_txn_create (void);\n\n void flux_kvs_txn_destroy (flux_kvs_txn_t *txn);\n\n int flux_kvs_txn_put (flux_kvs_txn_t *txn, int flags,\n const char *key, const char *json_str);\n\n int flux_kvs_txn_pack (flux_kvs_txn_t *txn, int flags,\n const char *key, const char *fmt, ...);\n\n int flux_kvs_txn_vpack (flux_kvs_txn_t *txn, int flags,\n const char *key, const char *fmt, va_list ap);\n\n int flux_kvs_txn_mkdir (flux_kvs_txn_t *txn, int flags,\n const char *key);\n\n int flux_kvs_txn_unlink (flux_kvs_txn_t *txn, int flags,\n const char *key);\n\n int flux_kvs_txn_symlink (flux_kvs_txn_t *txn, int flags,\n const char *key, const char *target);\n\n int flux_kvs_txn_put_raw (flux_kvs_txn_t *txn, int flags,\n const char *key, const void *data, int len);\n\n\n\nDESCRIPTION\n-----------\n\n`flux_kvs_txn_create()` creates a KVS transaction object that may be\npassed to `flux_kvs_commit(3)` or `flux_kvs_fence(3)`. The transaction\nconsists of a list of operations that are applied to the KVS together,\nin order. The entire transaction either succeeds or fails. After commit\nor fence, the object must be destroyed with `flux_kvs_txn_destroy()`.\n\n`flux_kvs_txn_put()` sets _key_ to a value represented by _json_str_.\nIf _key_ does not exist it is created. _key_ is hierarchical, with period\n(\".\") used as a path separator. \".\" represents the root of the namespace\nand is optional at the beginning of _key_. Any path components in _key_\nthat do not exist is created. Any path components in _key_ that must be\nconverted to directories are overwritten. The value _json_str_ may be be\nany bare JSON value (except null), a JSON array, or a JSON object, encoded\nas a string. Alternatively, the FLUX_KVS_TREEOBJ flag may be specified\nindicating that the _json_str_ value is to be interpreted as an RFC 11\ntree object, as described in FLAGS below. A NULL _json_str_ value is\nequivalent to calling `flux_kvs_txn_unlink()` on _key_.\n\n`flux_kvs_txn_pack()` is identical to `flux_kvs_txn_put()`, except\n`json_pack()` style arguments (see below) are used to construct the\nvalue. `flux_kvs_txn_vpack()` is a variant that accepts a _va_list_\nargument.\n\n`flux_kvs_txn_mkdir()` sets _key_ to an empty directory.\n\n`flux_kvs_txn_unlink()` removes _key_. If _key_ is a directory,\nall its contents are removed as well.\n\n`flux_kvs_txn_symlink()` sets _key_ to a symbolic link pointing to _target_,\nanother key. _target_ need not exist.\n\n`flux_kvs_txn_put_raw()` sets _key_ to a value containing raw data\nreferred to by _data_ of length _len_.\n\n\nFLAGS\n-----\n\nThe _flags_ argument may be zero, or a bitmask of one or more of the\nfollowing flags:\n\nFLUX_KVS_TREEOBJ::\nThe specified value is interpreted as an RFC 11 tree object (KVS meta data)\nrather than the actual value. Currently the only way to obtain a valid\ntree object is with `flux_kvs_lookup(3)` or `flux_kvs_lookupat(3)`. In\nthe future, other methods may be available. Note: this flag is only\nvalid for `flux_kvs_txn_put()` and `flux_kvs_txn_pack()`.\n\n\ninclude::JSON_PACK.adoc[]\n\n\nRETURN VALUE\n------------\n\n`flux_kvs_txn_create()` returns a `flux_kvs_txn_t` object on success,\nor NULL on failure with errno set appropriately.\n\n`flux_kvs_txn_put()`, `flux_kvs_txn_pack()`, `flux_kvs_txn_mkdir()`,\n`flux_kvs_txn_unlink()`, `flux_kvs_txn_symlink()`, and `flux_kvs_txn_put_raw()`\nreturns 0 on success, or -1 on failure with errno set appropriately.\n\nERRORS\n------\n\nEINVAL::\nOne of the arguments was invalid.\n\nENOMEM::\nOut of memory.\n\n\nAUTHOR\n------\nThis page is maintained by the Flux community.\n\n\nRESOURCES\n---------\nGithub: \n\n\nCOPYRIGHT\n---------\ninclude::COPYRIGHT.adoc[]\n\n\nSEE ALSO\n---------\nflux_kvs_commit(3)\n\nhttps:\/\/github.com\/flux-framework\/rfc\/blob\/master\/spec_11.adoc[RFC 11: Key Value Store Tree Object Format v1]\n","lang_cluster":"C","diff_tag":0,"review_comment":"","id":32} {"diff_hunk":"@@ -16,6 +16,7 @@ from codechecker_lib import analysis_manager\n from codechecker_lib import client\n from codechecker_lib import logger\n from codechecker_lib import skiplist_handler\n+from codechecker_lib import analyzer_env\n from codechecker_lib.analyzers import analyzer_types\n \n LOG = logger.get_new_logger('ANALYZER')","source_code":"# -------------------------------------------------------------------------\n# The CodeChecker Infrastructure\n# This file is distributed under the University of Illinois Open Source\n# License. See LICENSE.TXT for details.\n# -------------------------------------------------------------------------\n\"\"\"\nPrepare and start different analisys types\n\"\"\"\nimport copy\nimport json\nimport os\nimport sys\nimport time\n\nfrom codechecker_lib import analysis_manager\nfrom codechecker_lib import client\nfrom codechecker_lib import logger\nfrom codechecker_lib import skiplist_handler\nfrom codechecker_lib.analyzers import analyzer_types\n\nLOG = logger.get_new_logger('ANALYZER')\n\n\ndef prepare_actions(actions, enabled_analyzers):\n \"\"\"\n Set the analyzer type for each buildaction.\n Multiple actions if multiple source analyzers are set.\n \"\"\"\n res = []\n\n for ea in enabled_analyzers:\n for action in actions:\n new_action = copy.deepcopy(action)\n new_action.analyzer_type = ea\n res.append(new_action)\n return res\n\n\ndef run_check(args, actions, context):\n \"\"\"\n Prepare:\n - analyzer config handlers\n - skiplist handling\n - analyzer severity levels\n\n Stores analysis related data to the database and starts the analysis.\n \"\"\"\n\n if args.jobs <= 0:\n args.jobs = 1\n\n LOG.debug_analyzer(\"Checking supported analyzers.\")\n enabled_analyzers = analyzer_types.check_supported_analyzers(\n args.analyzers,\n context)\n\n # Load severity map from config file.\n LOG.debug_analyzer(\"Loading checker severity map.\")\n if os.path.exists(context.checkers_severity_map_file):\n with open(context.checkers_severity_map_file, 'r') as sev_conf_file:\n severity_config = sev_conf_file.read()\n\n context.severity_map = json.loads(severity_config)\n\n actions = prepare_actions(actions, enabled_analyzers)\n\n package_version = context.version['major'] + '.' + context.version['minor']\n\n suppress_file = ''\n try:\n suppress_file = os.path.realpath(args.suppress)\n except AttributeError:\n LOG.debug_analyzer('Suppress file was not set in the command line')\n\n # Create one skip list handler shared between the analysis manager workers.\n skip_handler = None\n try:\n if args.skipfile:\n LOG.debug_analyzer(\"Creating skiplist handler.\")\n skip_handler = skiplist_handler.SkipListHandler(args.skipfile)\n except AttributeError:\n LOG.debug_analyzer('Skip file was not set in the command line')\n\n with client.get_connection() as connection:\n context.run_id = connection.add_checker_run(' '.join(sys.argv),\n args.name,\n package_version,\n args.force)\n\n # Clean previous suppress information.\n client.clean_suppress(connection, context.run_id)\n\n if os.path.exists(suppress_file):\n client.send_suppress(context.run_id, connection, suppress_file)\n\n analyzer_config_map = \\\n analyzer_types.build_config_handlers(args,\n context,\n enabled_analyzers,\n connection)\n if skip_handler:\n connection.add_skip_paths(context.run_id,\n skip_handler.get_skiplist())\n\n LOG.info(\"Static analysis is starting ...\")\n start_time = time.time()\n\n analysis_manager.start_workers(args,\n actions,\n context,\n analyzer_config_map,\n skip_handler)\n\n end_time = time.time()\n\n with client.get_connection() as connection:\n connection.finish_checker_run(context.run_id)\n\n LOG.info(\"Analysis length: \" + str(end_time - start_time) + \" sec.\")\n\n\ndef run_quick_check(args,\n context,\n actions):\n \"\"\"\n This function implements the \"quickcheck\" feature.\n No result is stored to a database.\n \"\"\"\n\n enabled_analyzers = analyzer_types.check_supported_analyzers(args.analyzers,\n context)\n\n actions = prepare_actions(actions, enabled_analyzers)\n\n analyzer_config_map = \\\n analyzer_types.build_config_handlers(args,\n context,\n enabled_analyzers)\n\n analysis_manager.start_workers(args, actions, context, analyzer_config_map,\n None, False)\n","lang_cluster":"C","diff_tag":0,"review_comment":"","id":33} {"diff_hunk":"@@ -100,11 +100,15 @@ func (epmm *DefaultEPMarkManager) GetEndpointMark(ep string) (uint32, error) {\n \t\treturn 0, errors.New(\"No mark left for endpoint\")\n \t}\n \n-\tmark, err := epmm.markBitsManager.MapNumberToMark(prospect)\n+\treturn epmm.allocateOnePosition(ep, prospect)\n+}\n+\n+func (epmm *DefaultEPMarkManager) allocateOnePosition(ep string, pos int) (uint32, error) {\n+\tmark, err := epmm.markBitsManager.MapNumberToMark(pos)\n \tif err != nil {\n \t\treturn 0, err\n \t}\n-\tepmm.setMark(ep, prospect, mark)\n+\tepmm.setMark(ep, pos, mark)\n \treturn mark, nil\n }\n ","source_code":"\/\/ Copyright (c) 2018 Tigera, Inc. All rights reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage rules\n\nimport (\n\t\"errors\"\n\t\"hash\/fnv\"\n\t\"io\"\n\n\t\"github.com\/projectcalico\/felix\/markbits\"\n)\n\n\/\/ Endpoint Mark Mapper (EPM) provides set of functions to manage allocation\/free endpoint mark bit\n\/\/ given a mark bit mask. Note: This is not thread safe.\ntype EndpointMarkMapper interface {\n\tGetMask() uint32\n\tGetEndpointMark(ep string) (uint32, error)\n\tReleaseEndpointMark(ep string)\n\tSetEndpointMark(ep string, mark uint32) error\n}\n\ntype DefaultEPMarkManager struct {\n\tmarkBitsManager *markbits.MarkBitsManager\n\tmaxPosition int\n\n\thash32 HashCalculator32\n\n\tactiveEndpointToPosition map[string]int\n\tactiveEndpointToMark map[string]uint32\n\tactivePositionToEndpoint map[int]string\n\tactiveMarkToEndpoint map[uint32]string\n}\n\nfunc NewEndpointMarkMapper(markMask uint32) EndpointMarkMapper {\n\treturn NewEndpointMarkMapperWithShim(markMask, fnv.New32())\n}\n\nfunc NewEndpointMarkMapperWithShim(markMask uint32, hash32 HashCalculator32) EndpointMarkMapper {\n\tmarkBitsManager := markbits.NewMarkBitsManager(markMask, \"endpoint-iptable-mark\")\n\n\treturn &DefaultEPMarkManager{\n\t\tmarkBitsManager: markBitsManager,\n\t\tmaxPosition: markBitsManager.CurrentFreeNumberOfMark(), \/\/ This includes zero\n\t\thash32: hash32,\n\t\tactiveEndpointToPosition: map[string]int{},\n\t\tactiveEndpointToMark: map[string]uint32{},\n\t\tactivePositionToEndpoint: map[int]string{},\n\t\tactiveMarkToEndpoint: map[uint32]string{},\n\t}\n}\n\nfunc (epmm *DefaultEPMarkManager) GetMask() uint32 {\n\treturn epmm.markBitsManager.GetMask()\n}\n\nfunc (epmm *DefaultEPMarkManager) GetEndpointMark(ep string) (uint32, error) {\n\tlength := len(ep)\n\tif length == 0 {\n\t\treturn 0, errors.New(\"Invalid endpoint name\")\n\t}\n\n\t\/\/ Return current mark for Endpoint if it already has one.\n\tif mark, ok := epmm.activeEndpointToMark[ep]; ok {\n\t\treturn mark, nil\n\t}\n\n\t\/\/ Try to allocate a position based on hash from endpoint name.\n\tepmm.hash32.Write([]byte(ep))\n\ttotal := int(epmm.hash32.Sum32())\n\tepmm.hash32.Reset()\n\n\tvar prospect int\n\tgotOne := false\n\tfor i := 0; i < epmm.maxPosition; i++ {\n\t\tprospect = (total + i) % epmm.maxPosition\n\t\tif prospect == 0 {\n\t\t\t\/\/ Make sure we get non zero position number.\n\t\t\tcontinue\n\t\t}\n\t\t_, alreadyAlloced := epmm.activePositionToEndpoint[prospect]\n\t\tif !alreadyAlloced {\n\t\t\tgotOne = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif !gotOne {\n\t\treturn 0, errors.New(\"No mark left for endpoint\")\n\t}\n\n\tmark, err := epmm.markBitsManager.MapNumberToMark(prospect)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tepmm.setMark(ep, prospect, mark)\n\treturn mark, nil\n}\n\nfunc (epmm *DefaultEPMarkManager) ReleaseEndpointMark(ep string) {\n\tif mark, ok := epmm.activeEndpointToMark[ep]; ok {\n\t\tepmm.deleteMark(ep, epmm.activeEndpointToPosition[ep], mark)\n\t}\n}\n\n\/\/ This is used to set a mark for an endpoint from previous allocated mark.\n\/\/ The endpoint should not have a mark already.\nfunc (epmm *DefaultEPMarkManager) SetEndpointMark(ep string, mark uint32) error {\n\tif currentMark, ok := epmm.activeEndpointToMark[ep]; ok {\n\t\t\/\/ We got a endpoint with mark already.\n\t\tif currentMark != mark {\n\t\t\treturn errors.New(\"Different mark already exists\")\n\t\t}\n\t\treturn nil\n\t}\n\tif currentEP, ok := epmm.activeMarkToEndpoint[mark]; ok {\n\t\t\/\/ We got a mark with endpoint already.\n\t\tif currentEP != ep {\n\t\t\treturn errors.New(\"Endpoint with this mark already exists\")\n\t\t}\n\t\treturn nil\n\t}\n\n\tpos, err := epmm.markBitsManager.MapMarkToNumber(mark)\n\tif err != nil {\n\t\treturn err\n\t}\n\tepmm.setMark(ep, pos, mark)\n\treturn nil\n}\n\nfunc (epmm *DefaultEPMarkManager) deleteMark(ep string, pos int, mark uint32) {\n\tdelete(epmm.activePositionToEndpoint, pos)\n\tdelete(epmm.activeMarkToEndpoint, mark)\n\tdelete(epmm.activeEndpointToPosition, ep)\n\tdelete(epmm.activeEndpointToMark, ep)\n}\n\nfunc (epmm *DefaultEPMarkManager) setMark(ep string, pos int, mark uint32) {\n\tepmm.activePositionToEndpoint[pos] = ep\n\tepmm.activeEndpointToPosition[ep] = pos\n\tepmm.activeEndpointToMark[ep] = mark\n\tepmm.activeMarkToEndpoint[mark] = ep\n}\n\n\/\/ This interface has subset of functions of built in hash32 interface.\ntype HashCalculator32 interface {\n\t\/\/ Write (via the embedded io.Writer interface) adds more data to the running hash.\n\t\/\/ It never returns an error.\n\tio.Writer\n\n\t\/\/ Sum32 returns a hash result of uint32.\n\t\/\/ It does not change the underlying hash state.\n\tSum32() uint32\n\n\t\/\/ Reset resets the Hash to its initial state.\n\tReset()\n}\n","lang_cluster":"C","diff_tag":0,"review_comment":"","id":34} {"diff_hunk":"@@ -31,11 +31,11 @@ static struct wlr_input_device *allocate_device(struct wlr_backend_state *state,\n \t\/\/ TODO: any way to retrieve those information?\n \tint vendor = 0;\n \tint product = 0;\n-\tconst char *name = \"unknown;wayland\";\n+\tconst char *name = \"wayland\";\n \tstruct wlr_input_device *wlr_device = wlr_input_device_create(\n \t\ttype, &input_device_impl, devstate,\n \t\tname, vendor, product);\n-\tif(!wlr_device) {\n+\tif (!wlr_device) {\n \t\tfree(devstate);\n \t\treturn NULL;\n \t}","source_code":"#define _XOPEN_SOURCE 500\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"backend\/wayland.h\"\n\nstatic void wlr_wl_device_destroy(struct wlr_input_device_state *state) {\n\tfree(state);\n}\n\nstatic struct wlr_input_device_impl input_device_impl = {\n\t.destroy = wlr_wl_device_destroy\n};\n\nstatic struct wlr_input_device *allocate_device(struct wlr_backend_state *state,\n\t\tenum wlr_input_device_type type) {\n\tstruct wlr_input_device_state *devstate =\n\t\tcalloc(1, sizeof(struct wlr_input_device_state));\n\tif(!devstate) {\n\t\twlr_log(L_ERROR, \"Allocation failed: %s\", strerror(errno));\n\t\treturn NULL;\n\t}\n\n\t\/\/ TODO: any way to retrieve those information?\n\tint vendor = 0;\n\tint product = 0;\n\tconst char *name = \"unknown;wayland\";\n\tstruct wlr_input_device *wlr_device = wlr_input_device_create(\n\t\ttype, &input_device_impl, devstate,\n\t\tname, vendor, product);\n\tif(!wlr_device) {\n\t\tfree(devstate);\n\t\treturn NULL;\n\t}\n\n\tlist_add(state->devices, wlr_device);\n\treturn wlr_device;\n}\n\nstatic void seat_handle_capabilities(void *data, struct wl_seat *wl_seat,\n\t\tenum wl_seat_capability caps) {\n\tstruct wlr_backend_state *state = data;\n\tassert(state->seat == wl_seat);\n\n\t\/\/ TODO: add listeners and receive input\n\tif ((caps & WL_SEAT_CAPABILITY_POINTER)) {\n\t\twlr_log(L_DEBUG, \"seat %p offered pointer\", wl_seat);\n\t\tstruct wl_pointer *wl_pointer = wl_seat_get_pointer(wl_seat);\n\n\t\tstruct wlr_input_device *wlr_device = allocate_device(state,\n\t\t\tWLR_INPUT_DEVICE_POINTER);\n\t\tif(!wlr_device) {\n\t\t\twl_pointer_destroy(wl_pointer);\n\t\t\twlr_log(L_ERROR, \"Unable to allocate wl_pointer device\");\n\t\t\treturn;\n\t\t}\n\n\t\twlr_device->pointer = wlr_pointer_create(NULL, NULL);\n\t\tlist_add(state->devices, wlr_device);\n\t\twl_signal_emit(&state->backend->events.input_add, wlr_device);\n\t}\n\tif ((caps & WL_SEAT_CAPABILITY_KEYBOARD)) {\n\t\twlr_log(L_DEBUG, \"seat %p offered keyboard\", wl_seat);\n\t\tstruct wl_keyboard *wl_keyboard = wl_seat_get_keyboard(wl_seat);\n\t\tstruct wlr_input_device *wlr_device = allocate_device(state,\n\t\t\tWLR_INPUT_DEVICE_KEYBOARD);\n\t\tif(!wlr_device) {\n\t\t\twl_keyboard_release(wl_keyboard);\n\t\t\twlr_log(L_ERROR, \"Unable to allocate wl_pointer device\");\n\t\t\treturn;\n\t\t}\n\n\t\twlr_device->keyboard = wlr_keyboard_create(NULL, NULL);\n\t\tlist_add(state->devices, wlr_device);\n\t\twl_signal_emit(&state->backend->events.input_add, wlr_device);\n\t}\n\n\t\/\/ TODO: touch\n}\n\nstatic void seat_handle_name(void *data, struct wl_seat *wl_seat, const char *name) {\n\tstruct wlr_backend_state *state = data;\n\tassert(state->seat == wl_seat);\n\tstate->seatName = strdup(name);\n}\n\nconst struct wl_seat_listener seat_listener = {\n\t.capabilities = seat_handle_capabilities,\n\t.name = seat_handle_name,\n};\n","lang_cluster":"C","diff_tag":0,"review_comment":"","id":35} {"diff_hunk":"@@ -166,7 +166,7 @@ class PlistToDB(ResultHandler):\n connection.finish_build_action(analysis_id, msg)\n return 1\n \n- self.__store_bugs(files, bugs, connection, analysis_id)\n+ self.__store_bugs(files, reports, connection, analysis_id)\n \n connection.finish_build_action(analysis_id, self.analyzer_stderr)\n ","source_code":"# -------------------------------------------------------------------------\n# The CodeChecker Infrastructure\n# This file is distributed under the University of Illinois Open Source\n# License. See LICENSE.TXT for details.\n# -------------------------------------------------------------------------\n\nfrom abc import ABCMeta\nimport ntpath\nimport os\nimport zlib\n\nimport shared\n\nfrom libcodechecker import client\nfrom libcodechecker import logger\nfrom libcodechecker import suppress_handler\nfrom libcodechecker.analyze import plist_parser\nfrom libcodechecker.analyze.analyzers.result_handler_base import ResultHandler\nfrom libcodechecker.logger import LoggerFactory\n\nLOG = LoggerFactory.get_new_logger('PLIST TO DB')\n\n\nclass PlistToDB(ResultHandler):\n \"\"\"\n Result handler for processing a plist file with the\n analysis results and stores them to the database.\n \"\"\"\n\n __metaclass__ = ABCMeta\n\n def __init__(self, buildaction, workspace, run_id):\n super(PlistToDB, self).__init__(buildaction, workspace)\n self.__run_id = run_id\n\n def __store_bugs(self, files, bugs, connection, analisys_id):\n file_ids = {}\n # Send content of file to the server if needed.\n for file_name in files:\n file_descriptor = connection.need_file_content(self.__run_id,\n file_name)\n file_ids[file_name] = file_descriptor.fileId\n\n # Sometimes the file doesn't exist, e.g. when the input of the\n # analysis is pure plist files.\n if not os.path.isfile(file_name):\n LOG.debug(file_name + ' not found, and will not be stored.')\n continue\n\n if file_descriptor.needed:\n with open(file_name, 'r') as source_file:\n file_content = source_file.read()\n compressed_file = zlib.compress(file_content,\n zlib.Z_BEST_COMPRESSION)\n # TODO: we may not use the file content in the end\n # depending on skippaths.\n LOG.debug('storing file content to the database')\n connection.add_file_content(file_descriptor.fileId,\n compressed_file)\n\n # Skipping bugs in header files handled here.\n report_ids = []\n for bug in bugs:\n events = bug.events()\n\n # Skip list handler can be None if no config file is set.\n if self.skiplist_handler:\n if events and self.skiplist_handler.should_skip(\n events[-1].start_pos.file_path):\n # Issue #20: this bug is in a file which should be skipped\n LOG.debug(bug.hash_value + ' is skipped (in ' +\n events[-1].start_pos.file_path + \")\")\n continue\n\n # Create remaining data for bugs and send them to the server.\n bug_paths = []\n for path in bug.paths():\n bug_paths.append(\n shared.ttypes.BugPathPos(path.start_pos.line,\n path.start_pos.col,\n path.end_pos.line,\n path.end_pos.col,\n file_ids[\n path.start_pos.file_path]))\n\n bug_events = []\n for event in bug.events():\n bug_events.append(shared.ttypes.BugPathEvent(\n event.start_pos.line,\n event.start_pos.col,\n event.end_pos.line,\n event.end_pos.col,\n event.msg,\n file_ids[event.start_pos.file_path]))\n\n bug_hash = bug.hash_value\n\n severity_name = self.severity_map.get(bug.checker_name,\n 'UNSPECIFIED')\n severity = shared.ttypes.Severity._NAMES_TO_VALUES[severity_name]\n\n sp_handler = suppress_handler.SourceSuppressHandler(bug)\n\n # Check for suppress comment.\n supp = sp_handler.get_suppressed()\n if supp:\n connection.add_suppress_bug(self.__run_id, [supp])\n\n LOG.debug('Storing check results to the database.')\n\n report_id = connection.add_report(analisys_id,\n file_ids[bug.file_path],\n bug_hash,\n bug.msg,\n bug_paths,\n bug_events,\n bug.checker_name,\n bug.category,\n bug.type,\n severity,\n supp is not None)\n\n report_ids.append(report_id)\n\n def handle_results(self):\n \"\"\"\n Send the plist content to the database.\n Server API calls should be used in one connection.\n - addBuildAction\n - addReport\n - needFileContent\n - addFileContent\n - finishBuildAction\n \"\"\"\n\n with client.get_connection() as connection:\n\n LOG.debug('Storing original build and analyzer command '\n 'to the database.')\n\n _, source_file_name = ntpath.split(self.analyzed_source_file)\n\n if LoggerFactory.get_log_level() == logger.DEBUG:\n analyzer_cmd = ' '.join(self.analyzer_cmd)\n else:\n analyzer_cmd = ''\n\n build_cmd_hash = self.buildaction.original_command_hash\n analysis_id = \\\n connection.add_build_action(self.__run_id,\n build_cmd_hash,\n analyzer_cmd,\n self.buildaction.analyzer_type,\n source_file_name)\n\n assert self.analyzer_returncode == 0\n\n plist_file = self.analyzer_result_file\n\n try:\n files, bugs = plist_parser.parse_plist(plist_file)\n except Exception as ex:\n LOG.debug(str(ex))\n msg = 'Parsing the generated result file failed.'\n LOG.error(msg + ' ' + plist_file)\n connection.finish_build_action(analysis_id, msg)\n return 1\n\n self.__store_bugs(files, bugs, connection, analysis_id)\n\n connection.finish_build_action(analysis_id, self.analyzer_stderr)\n\n def postprocess_result(self):\n \"\"\"\n No postprocessing required for plists.\n \"\"\"\n pass\n","lang_cluster":"C","diff_tag":0,"review_comment":"","id":36} {"diff_hunk":"@@ -90,9 +90,7 @@ int main()\n int* p = 0;\n \n i = *p + 42;\n-}\"\"\"\n- elif version == 4:\n- source = \"\"\"\n+}\"\"\", \"\"\"\n \n \n int main()","source_code":"#\n# -----------------------------------------------------------------------------\n# The CodeChecker Infrastructure\n# This file is distributed under the University of Illinois Open Source\n# License. See LICENSE.TXT for details.\n# -----------------------------------------------------------------------------\n\n\"\"\" detection_status function test. \"\"\"\nimport json\nimport os\nimport unittest\n\nimport shared\n\nfrom libtest import codechecker\nfrom libtest import env\n\n\nclass TestDetectionStatus(unittest.TestCase):\n\n def setUp(self):\n # TEST_WORKSPACE is automatically set by test package __init__.py .\n self.test_workspace = os.environ['TEST_WORKSPACE']\n\n test_class = self.__class__.__name__\n print('Running ' + test_class + ' tests in ' + self.test_workspace)\n\n self._codechecker_cfg = env.import_codechecker_cfg(self.test_workspace)\n\n # Get the CodeChecker cmd if needed for the tests.\n self._codechecker_cmd = env.codechecker_cmd()\n self._test_dir = os.path.join(self.test_workspace, 'test_files')\n\n try:\n os.makedirs(self._test_dir)\n except os.error:\n # Directory already exists.\n pass\n\n # Setup a viewer client to test viewer API calls.\n self._cc_client = env.setup_viewer_client(self.test_workspace)\n self.assertIsNotNone(self._cc_client)\n\n # Change working dir to testfile dir so CodeChecker can be run easily.\n self.__old_pwd = os.getcwd()\n os.chdir(self._test_dir)\n\n self._source_file = \"main.cpp\"\n\n # Init project dir.\n makefile = \"all:\\n\\t$(CXX) -c main.cpp -o \/dev\/null\\n\"\n project_info = {\n \"name\": \"hello\",\n \"clean_cmd\": \"\",\n \"build_cmd\": \"make\"\n }\n\n with open(os.path.join(self._test_dir, 'Makefile'), 'w') as f:\n f.write(makefile)\n with open(os.path.join(self._test_dir, 'project_info.json'), 'w') as f:\n json.dump(project_info, f)\n\n def tearDown(self):\n \"\"\"Restore environment after tests have ran.\"\"\"\n os.chdir(self.__old_pwd)\n\n def _create_source_file(self, version):\n if version == 1:\n source = \"\"\"\nint main()\n{\n int i = 1 \/ 0;\n}\"\"\"\n elif version == 2:\n source = \"\"\"\nint main()\n{\n int i = 1 \/ 0;\n\n int* p = 0;\n\n i = *p + 42;\n}\"\"\"\n elif version == 3:\n source = \"\"\"\nint main()\n{\n int i = 1 \/ 2;\n\n int* p = 0;\n\n i = *p + 42;\n}\"\"\"\n elif version == 4:\n source = \"\"\"\n\n\nint main()\n{\n int i = 1 \/ 0;\n\n int* p = 0;\n\n i = *p + 42;\n}\"\"\"\n\n with open(os.path.join(self._test_dir, self._source_file), 'w') as f:\n f.write(source)\n\n codechecker.check(self._codechecker_cfg,\n 'hello',\n self._test_dir)\n\n def test_same_file_change(self):\n \"\"\"\n This tests the change of the detection status of bugs when the file\n content changes.\n \"\"\"\n\n # Check the first file version\n self._create_source_file(1)\n\n runs = self._cc_client.getRunData(None)\n run_id = max(map(lambda run: run.runId, runs))\n\n reports = self._cc_client.getRunResults([run_id], 100, 0, [], [])\n print(reports)\n self.assertEqual(len(reports), 2)\n self.assertTrue(all(map(\n lambda r: r.detectionStatus == shared.ttypes.DetectionStatus.NEW,\n reports)))\n\n # Check the second file version\n self._create_source_file(2)\n reports = self._cc_client.getRunResults([run_id], 100, 0, [], [])\n for report in reports:\n if report.detectionStatus == \\\n shared.ttypes.DetectionStatus.UNRESOLVED:\n self.assertIn(report.bugHash,\n ['209be2f6905590d99853ce01d52a78e0',\n 'e8f47588c8095f02a53e338984ce52ba'])\n elif report.detectionStatus == \\\n shared.ttypes.DetectionStatus.NEW:\n self.assertIn(report.bugHash,\n ['cbd629ba2ee25c41cdbf5e2e336b1b1c'])\n else:\n self.assertTrue(False)\n\n # Check the third file version\n self._create_source_file(3)\n reports = self._cc_client.getRunResults([run_id], 100, 0, [], [])\n for report in reports:\n if report.detectionStatus == \\\n shared.ttypes.DetectionStatus.RESOLVED:\n self.assertIn(report.bugHash,\n ['209be2f6905590d99853ce01d52a78e0',\n 'e8f47588c8095f02a53e338984ce52ba'])\n elif report.detectionStatus == \\\n shared.ttypes.DetectionStatus.NEW:\n self.assertIn(report.bugHash,\n ['ac147b31a745d91be093bd70bbc5567c'])\n elif report.detectionStatus == \\\n shared.ttypes.DetectionStatus.UNRESOLVED:\n self.assertIn(report.bugHash,\n ['cbd629ba2ee25c41cdbf5e2e336b1b1c'])\n else:\n self.assertTrue(False)\n\n # Check the second file version again\n self._create_source_file(2)\n reports = self._cc_client.getRunResults([run_id], 100, 0, [], [])\n for report in reports:\n if report.detectionStatus == \\\n shared.ttypes.DetectionStatus.UNRESOLVED:\n self.assertIn(report.bugHash,\n ['cbd629ba2ee25c41cdbf5e2e336b1b1c'])\n elif report.detectionStatus == \\\n shared.ttypes.DetectionStatus.REOPENED:\n self.assertIn(report.bugHash,\n ['209be2f6905590d99853ce01d52a78e0',\n 'e8f47588c8095f02a53e338984ce52ba'])\n elif report.detectionStatus == \\\n shared.ttypes.DetectionStatus.RESOLVED:\n self.assertIn(report.bugHash,\n ['ac147b31a745d91be093bd70bbc5567c'])\n\n # Check the fourth file version\n self._create_source_file(4)\n reports = self._cc_client.getRunResults([run_id], 100, 0, [], [])\n for report in reports:\n if report.detectionStatus == \\\n shared.ttypes.DetectionStatus.UNRESOLVED:\n self.assertIn(report.bugHash,\n ['209be2f6905590d99853ce01d52a78e0',\n 'e8f47588c8095f02a53e338984ce52ba',\n 'cbd629ba2ee25c41cdbf5e2e336b1b1c'])\n elif report.detectionStatus == \\\n shared.ttypes.DetectionStatus.RESOLVED:\n self.assertIn(report.bugHash,\n ['ac147b31a745d91be093bd70bbc5567c'])\n","lang_cluster":"C","diff_tag":0,"review_comment":"","id":37} {"diff_hunk":"@@ -44,9 +44,8 @@\n \n \/* global list of tokens we've seen *\/\n static struct _fpga_feature_token *ftoken_root;\n-\/** Mutex to protect feature tokens *\/\n-pthread_mutex_t ftoken_lock = PTHREAD_MUTEX_INITIALIZER;\n \n+extern pthread_mutex_t global_lock;\n \/**\n * @brief Add entry to linked list for feature tokens\n *\tWill allocate memory (which is freed by feature_token_cleanup())","source_code":"\/\/ Copyright(c) 2017-2018, Intel Corporation\n\/\/\n\/\/ Redistribution and use in source and binary forms, with or without\n\/\/ modification, are permitted provided that the following conditions are met:\n\/\/\n\/\/ * Redistributions of source code must retain the above copyright notice,\n\/\/ this list of conditions and the following disclaimer.\n\/\/ * Redistributions in binary form must reproduce the above copyright notice,\n\/\/ this list of conditions and the following disclaimer in the documentation\n\/\/ and\/or other materials provided with the distribution.\n\/\/ * Neither the name of Intel Corporation nor the names of its contributors\n\/\/ may be used to endorse or promote products derived from this software\n\/\/ without specific prior written permission.\n\/\/\n\/\/ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n\/\/ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n\/\/ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n\/\/ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n\/\/ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n\/\/ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n\/\/ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n\/\/ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n\/\/ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n\/\/ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n\/\/ POSSIBILITY OF SUCH DAMAGE.\n\n#ifdef HAVE_CONFIG_H\n#include \n#endif \/\/ HAVE_CONFIG_H\n#include \n#ifndef _GNU_SOURCE\n#define _GNU_SOURCE\n#endif\n#include \n#include \n#include \n#include \n#include \n#undef _GNU_SOURCE\n\n#include \"safe_string\/safe_string.h\"\n#include \"types_int.h\"\n#include \"feature_token_list_int.h\"\n\n\/* global list of tokens we've seen *\/\nstatic struct _fpga_feature_token *ftoken_root;\n\/** Mutex to protect feature tokens *\/\npthread_mutex_t ftoken_lock = PTHREAD_MUTEX_INITIALIZER;\n\n\/**\n * @brief Add entry to linked list for feature tokens\n *\tWill allocate memory (which is freed by feature_token_cleanup())\n *\n * @param type\n * @param guid\n * @param handle\n *\n * @return\n *\/\nstruct _fpga_feature_token *feature_token_add(uint32_t type, uint32_t mmio_num, fpga_guid guid,\n\t\t\t\t\t uint64_t offset, fpga_handle handle)\n{\n\tstruct _fpga_feature_token *tmp;\n\terrno_t e;\n\tint err = 0;\n\n\tif (pthread_mutex_lock(&ftoken_lock)) {\n\t\tFPGA_ERR(\"Failed to lock feature token mutex\");\n\t\treturn NULL;\n\t}\n\n\t\/* Prevent duplicate entries. *\/\n\tfor (tmp = ftoken_root; NULL != tmp; tmp = tmp->next) {\n\t\tif ((uuid_compare(guid, tmp->feature_guid)) == 0) {\n\t\t\terr = pthread_mutex_unlock(&ftoken_lock);\n\t\t\tif (err) {\n\t\t\t\tFPGA_ERR(\"pthread_mutex_unlock() failed: %S\",\n\t\t\t\t\t strerror(err));\n\t\t\t}\n\t\t\treturn tmp;\n\t\t}\n\t}\n\n\ttmp = (struct _fpga_feature_token *)malloc(\n\t\tsizeof(struct _fpga_feature_token));\n\tif (NULL == tmp) {\n\t\tFPGA_ERR(\"Failed to allocate memory for fhandle\");\n\t\treturn NULL;\n\t}\n\n\tuuid_clear(tmp->feature_guid);\n\ttmp->magic = FPGA_FEATURE_TOKEN_MAGIC;\n\ttmp->feature_type = type;\n\ttmp->mmio_num = mmio_num;\n\ttmp->csr_offset = offset;\n\ttmp->handle = handle;\n\ttmp->next = NULL;\n\n\te = memcpy_s(tmp->feature_guid, sizeof(fpga_guid), guid,\n\t\t sizeof(fpga_guid));\n\n\tif (EOK != e) {\n\t\tFPGA_ERR(\"memcpy_s failed\");\n\t\tgoto out_free;\n\t}\n\n\ttmp->next = ftoken_root;\n\tftoken_root = tmp;\n\n\terr = pthread_mutex_unlock(&ftoken_lock);\n\tif (err) {\n\t\tFPGA_ERR(\"pthread_mutex_unlock() failed: %S\", strerror(err));\n\t\tgoto out_free;\n\t}\n\n\treturn tmp;\n\nout_free:\n\tfree(tmp);\n\terr = pthread_mutex_unlock(&ftoken_lock);\n\tif (err) {\n\t\tFPGA_ERR(\"pthread_mutex_unlock() failed: %S\", strerror(err));\n\t}\n\treturn NULL;\n}\n\n\/*\n * Clean up remaining entries in linked list\n * Will delete all remaining entries\n *\/\nvoid feature_token_cleanup(void)\n{\n\tint err = 0;\n\tstruct _fpga_feature_token *current = ftoken_root;\n\terr = pthread_mutex_lock(&ftoken_lock);\n\tif (err) {\n\t\tFPGA_ERR(\"pthread_mutex_lock() failed: %s\", strerror(err));\n\t\treturn;\n\t}\n\n\tif (!ftoken_root)\n\t\tgoto out_unlock;\n\n\twhile (current) {\n\t\tstruct _fpga_feature_token *tmp = current;\n\t\tcurrent = current->next;\n\n\t\t\/\/ invalidate magic (just in case)\n\t\ttmp->magic = FPGA_INVALID_MAGIC;\n\t\tfree(tmp);\n\t\ttmp = NULL;\n\t}\n\n\tftoken_root = NULL;\n\nout_unlock:\n\terr = pthread_mutex_unlock(&ftoken_lock);\n\tif (err) {\n\t\tFPGA_ERR(\"pthread_mutex_unlock() failed: %s\", strerror(err));\n\t}\n}\n","lang_cluster":"C","diff_tag":0,"review_comment":"","id":38} {"diff_hunk":"@@ -176,11 +176,13 @@ int main() {\n \t\tfree(keymap);\n \t\tbreak;\n \t}\n+\tstate.xwayland = wlr_xwayland_create(compositor.display, state.wlr_compositor);\n \n \tcompositor.keyboard_key_cb = handle_keyboard_key;\n \n \twl_display_run(compositor.display);\n \n+\twlr_xwayland_destroy(state.xwayland);\n \tclose(state.keymap_fd);\n \twlr_seat_destroy(state.wl_seat);\n \twlr_data_device_manager_destroy(state.data_device_manager);","source_code":"#define _POSIX_C_SOURCE 199309L\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"wlr\/types\/wlr_compositor.h\"\n#include \n#include \n#include \"shared.h\"\n\n\/\/ TODO: move to common header?\nint os_create_anonymous_file(off_t size);\n\nstruct sample_state {\n\tstruct wlr_renderer *renderer;\n\tstruct wlr_compositor *wlr_compositor;\n\tstruct wlr_wl_shell *wl_shell;\n\tstruct wlr_seat *wl_seat;\n\tstruct wlr_xdg_shell_v6 *xdg_shell;\n\tstruct wlr_data_device_manager *data_device_manager;\n\tstruct wl_resource *focus;\n\tstruct wl_listener keyboard_bound;\n\tint keymap_fd;\n\tsize_t keymap_size;\n\tuint32_t serial;\n};\n\n\/*\n * Convert timespec to milliseconds\n *\/\nstatic inline int64_t timespec_to_msec(const struct timespec *a) {\n\treturn (int64_t)a->tv_sec * 1000 + a->tv_nsec \/ 1000000;\n}\n\nstatic void output_frame_handle_surface(struct sample_state *sample,\n\t\tstruct wlr_output *wlr_output, struct timespec *ts,\n\t\tstruct wl_resource *_res) {\n\tstruct wlr_surface *surface = wl_resource_get_user_data(_res);\n\tfloat matrix[16];\n\tfloat transform[16];\n\twlr_surface_flush_damage(surface);\n\tif (surface->texture->valid) {\n\t\twlr_matrix_translate(&transform, 200, 200, 0);\n\t\twlr_surface_get_matrix(surface, &matrix,\n\t\t\t&wlr_output->transform_matrix, &transform);\n\t\twlr_render_with_matrix(sample->renderer, surface->texture, &matrix);\n\n\t\tstruct wlr_frame_callback *cb, *cnext;\n\t\twl_list_for_each_safe(cb, cnext, &surface->frame_callback_list, link) {\n\t\t\twl_callback_send_done(cb->resource, timespec_to_msec(ts));\n\t\t\twl_resource_destroy(cb->resource);\n\t\t}\n\t}\n}\nstatic void handle_output_frame(struct output_state *output, struct timespec *ts) {\n\tstruct compositor_state *state = output->compositor;\n\tstruct sample_state *sample = state->data;\n\tstruct wlr_output *wlr_output = output->output;\n\n\twlr_output_make_current(wlr_output);\n\twlr_renderer_begin(sample->renderer, wlr_output);\n\n\tstruct wlr_wl_shell_surface *wl_shell_surface;\n\twl_list_for_each(wl_shell_surface, &sample->wl_shell->surfaces, link) {\n\t\toutput_frame_handle_surface(sample, wlr_output, ts, wl_shell_surface->surface);\n\t}\n\tstruct wlr_xdg_surface_v6 *xdg_surface;\n\twl_list_for_each(xdg_surface, &sample->xdg_shell->surfaces, link) {\n\t\toutput_frame_handle_surface(sample, wlr_output, ts, xdg_surface->surface);\n\t}\n\n\twlr_renderer_end(sample->renderer);\n\twlr_output_swap_buffers(wlr_output);\n}\n\nstatic void handle_keyboard_key(struct keyboard_state *keyboard, uint32_t keycode,\n\t \txkb_keysym_t sym, enum wlr_key_state key_state) {\n\tstruct compositor_state *state = keyboard->compositor;\n\tstruct sample_state *sample = state->data;\n\n\tstruct wl_resource *res = NULL;\n\tstruct wlr_seat_handle *seat_handle = NULL;\n\twl_list_for_each(res, &sample->wlr_compositor->surfaces, link) {\n\t\tbreak;\n\t}\n\n\tif (res) {\n\t\tseat_handle = wlr_seat_handle_for_client(sample->wl_seat,\n\t\t\twl_resource_get_client(res));\n\t}\n\n\tif (res != sample->focus && seat_handle && seat_handle->keyboard) {\n\t\tstruct wl_array keys;\n\t\twl_array_init(&keys);\n\t\twl_keyboard_send_enter(seat_handle->keyboard, ++sample->serial, res, &keys);\n\t\tsample->focus = res;\n\t}\n\n\tif (seat_handle && seat_handle->keyboard) {\n\t\tuint32_t depressed = xkb_state_serialize_mods(keyboard->xkb_state,\n\t\t\tXKB_STATE_MODS_DEPRESSED);\n\t\tuint32_t latched = xkb_state_serialize_mods(keyboard->xkb_state,\n\t\t\tXKB_STATE_MODS_LATCHED);\n\t\tuint32_t locked = xkb_state_serialize_mods(keyboard->xkb_state,\n\t\t\tXKB_STATE_MODS_LOCKED);\n\t\tuint32_t group = xkb_state_serialize_layout(keyboard->xkb_state,\n\t\t\tXKB_STATE_LAYOUT_EFFECTIVE);\n\t\twl_keyboard_send_modifiers(seat_handle->keyboard, ++sample->serial, depressed,\n\t\t\tlatched, locked, group);\n\t\twl_keyboard_send_key(seat_handle->keyboard, ++sample->serial, 0, keycode, key_state);\n\t}\n}\n\nstatic void handle_keyboard_bound(struct wl_listener *listener, void *data) {\n\tstruct wlr_seat_handle *handle = data;\n\tstruct sample_state *state = wl_container_of(listener, state, keyboard_bound);\n\n\twl_keyboard_send_keymap(handle->keyboard, WL_KEYBOARD_KEYMAP_FORMAT_XKB_V1,\n\t\tstate->keymap_fd, state->keymap_size);\n\n\tif (wl_resource_get_version(handle->keyboard) >= 2) {\n\t\twl_keyboard_send_repeat_info(handle->keyboard, 25, 600);\n\t}\n}\n\nint main() {\n\tstruct sample_state state = { 0 };\n\tstruct compositor_state compositor = { 0,\n\t\t.data = &state,\n\t\t.output_frame_cb = handle_output_frame,\n\t};\n\tcompositor_init(&compositor);\n\n\tstate.renderer = wlr_gles2_renderer_create(compositor.backend);\n\tif (!state.renderer) {\n\t\twlr_log(L_ERROR, \"Could not start compositor, OOM\");\n\t\texit(EXIT_FAILURE);\n\t}\n\twl_display_init_shm(compositor.display);\n\tstate.wlr_compositor = wlr_compositor_create(compositor.display, state.renderer);\n\tstate.wl_shell = wlr_wl_shell_create(compositor.display);\n\tstate.xdg_shell = wlr_xdg_shell_v6_create(compositor.display);\n\tstate.data_device_manager = wlr_data_device_manager_create(compositor.display);\n\n\tstate.wl_seat = wlr_seat_create(compositor.display, \"seat0\");\n\tstate.keyboard_bound.notify = handle_keyboard_bound;\n\twl_signal_add(&state.wl_seat->events.keyboard_bound, &state.keyboard_bound);\n\twlr_seat_set_capabilities(state.wl_seat, WL_SEAT_CAPABILITY_KEYBOARD\n\t\t| WL_SEAT_CAPABILITY_POINTER | WL_SEAT_CAPABILITY_TOUCH);\n\n\tstruct keyboard_state *kbstate;\n\twl_list_for_each(kbstate, &compositor.keyboards, link) {\n\t\tchar *keymap = xkb_keymap_get_as_string(kbstate->keymap,\n\t\t\tXKB_KEYMAP_FORMAT_TEXT_V1);\n\t\tstate.keymap_size = strlen(keymap);\n\t\tstate.keymap_fd = os_create_anonymous_file(state.keymap_size);\n\t\tvoid *ptr = mmap(NULL, state.keymap_size,\n\t\t\t\t PROT_READ | PROT_WRITE,\n\t\t\t\t MAP_SHARED, state.keymap_fd, 0);\n\t\tstrcpy(ptr, keymap);\n\t\tfree(keymap);\n\t\tbreak;\n\t}\n\n\tcompositor.keyboard_key_cb = handle_keyboard_key;\n\n\twl_display_run(compositor.display);\n\n\tclose(state.keymap_fd);\n\twlr_seat_destroy(state.wl_seat);\n\twlr_data_device_manager_destroy(state.data_device_manager);\n\twlr_xdg_shell_v6_destroy(state.xdg_shell);\n\twlr_wl_shell_destroy(state.wl_shell);\n\twlr_compositor_destroy(state.wlr_compositor);\n\twlr_renderer_destroy(state.renderer);\n\tcompositor_fini(&compositor);\n}\n","lang_cluster":"C","diff_tag":0,"review_comment":"","id":39} {"diff_hunk":"@@ -51,12 +51,12 @@ static void send_chunk(h2o_ostream_t *_self, h2o_req_t *req, h2o_iovec_t *inbufs\n outbufcnt += inbufcnt;\n if (state != H2O_SEND_STATE_ERROR) {\n outbufs[outbufcnt].base = \"\\r\\n0\\r\\n\\r\\n\";\n- outbufs[outbufcnt].len = state == H2O_SEND_STATE_FINAL ? (req->send_server_timing ? 5 : 7) : 2;\n+ outbufs[outbufcnt].len = state == H2O_SEND_STATE_FINAL ? (req->send_server_timing_trailer ? 5 : 7) : 2;\n outbufcnt++;\n }\n } else if (state == H2O_SEND_STATE_FINAL) {\n outbufs[outbufcnt].base = \"0\\r\\n\\r\\n\";\n- outbufs[outbufcnt].len = req->send_server_timing ? 3 : 5;\n+ outbufs[outbufcnt].len = req->send_server_timing_trailer ? 3 : 5;\n outbufcnt++;\n }\n ","source_code":"\/*\n * Copyright (c) 2014 DeNA Co., Ltd.\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and\/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n *\/\n#include \n#include \n#include \n#include \"h2o.h\"\n\ntypedef struct st_chunked_encoder_t {\n h2o_ostream_t super;\n char buf[64];\n} chunked_encoder_t;\n\nstatic void send_chunk(h2o_ostream_t *_self, h2o_req_t *req, h2o_iovec_t *inbufs, size_t inbufcnt, h2o_send_state_t state)\n{\n chunked_encoder_t *self = (void *)_self;\n h2o_iovec_t *outbufs = alloca(sizeof(h2o_iovec_t) * (inbufcnt + 2));\n size_t chunk_size, outbufcnt = 0, i;\n\n \/* calc chunk size *\/\n chunk_size = 0;\n for (i = 0; i != inbufcnt; ++i)\n chunk_size += inbufs[i].len;\n req->bytes_sent += chunk_size;\n\n \/* create chunk header and output data *\/\n if (chunk_size != 0) {\n outbufs[outbufcnt].base = self->buf;\n outbufs[outbufcnt].len = sprintf(self->buf, \"%zx\\r\\n\", chunk_size);\n assert(outbufs[outbufcnt].len < sizeof(self->buf));\n outbufcnt++;\n memcpy(outbufs + outbufcnt, inbufs, sizeof(h2o_iovec_t) * inbufcnt);\n outbufcnt += inbufcnt;\n if (state != H2O_SEND_STATE_ERROR) {\n outbufs[outbufcnt].base = \"\\r\\n0\\r\\n\\r\\n\";\n outbufs[outbufcnt].len = state == H2O_SEND_STATE_FINAL ? (req->send_server_timing ? 5 : 7) : 2;\n outbufcnt++;\n }\n } else if (state == H2O_SEND_STATE_FINAL) {\n outbufs[outbufcnt].base = \"0\\r\\n\\r\\n\";\n outbufs[outbufcnt].len = req->send_server_timing ? 3 : 5;\n outbufcnt++;\n }\n\n \/* if state is error, send a broken chunk to pass the error down to the browser *\/\n if (state == H2O_SEND_STATE_ERROR) {\n outbufs[outbufcnt].base = \"\\r\\n1\\r\\n\";\n outbufs[outbufcnt].len = 5;\n outbufcnt++;\n }\n\n h2o_ostream_send_next(&self->super, req, outbufs, outbufcnt, state);\n}\n\nstatic void on_setup_ostream(h2o_filter_t *self, h2o_req_t *req, h2o_ostream_t **slot)\n{\n chunked_encoder_t *encoder;\n\n \/* TODO: make chunked filter a submodule of lib\/http1.c so that we could eliminate this flag, protocol version checks, etc. *\/\n if (req->is_subrequest)\n goto Next;\n\n \/* do nothing if not HTTP\/1.1 or content-length is known *\/\n if (req->res.content_length != SIZE_MAX || req->version != 0x101)\n goto Next;\n \/* RFC 2616 4.4 states that the following status codes (and response to a HEAD method) should not include message body *\/\n if ((100 <= req->res.status && req->res.status <= 199) || req->res.status == 204 || req->res.status == 304)\n goto Next;\n else if (h2o_memis(req->input.method.base, req->input.method.len, H2O_STRLIT(\"HEAD\")))\n goto Next;\n \/* we cannot handle certain responses (like 101 switching protocols) *\/\n if (req->res.status != 200) {\n req->http1_is_persistent = 0;\n goto Next;\n }\n \/* skip if content-encoding header is being set *\/\n if (h2o_find_header(&req->res.headers, H2O_TOKEN_TRANSFER_ENCODING, -1) != -1)\n goto Next;\n\n \/* set content-encoding header *\/\n h2o_add_header(&req->pool, &req->res.headers, H2O_TOKEN_TRANSFER_ENCODING, NULL, H2O_STRLIT(\"chunked\"));\n\n \/* set the flag that tells finalostream that req->bytes_sent is already counted *\/\n req->bytes_counted_by_ostream = 1;\n\n \/* setup filter *\/\n encoder = (void *)h2o_add_ostream(req, H2O_ALIGNOF(*encoder), sizeof(*encoder), slot);\n encoder->super.do_send = send_chunk;\n slot = &encoder->super.next;\n\nNext:\n h2o_setup_next_ostream(req, slot);\n}\n\nvoid h2o_chunked_register(h2o_pathconf_t *pathconf)\n{\n h2o_filter_t *self = h2o_create_filter(pathconf, sizeof(*self));\n self->on_setup_ostream = on_setup_ostream;\n}\n","lang_cluster":"C","diff_tag":0,"review_comment":"","id":40} {"diff_hunk":"@@ -139,22 +139,30 @@ class TestSuppress(unittest.TestCase):\n 'status': rw_status}\n \n run_results = get_all_run_results(self._cc_client, runid)\n+ logging.debug(\"Run results:\")\n+ [logging.debug(x) for x in run_results]\n self.assertIsNotNone(run_results)\n self.assertNotEqual(len(run_results), 0)\n \n for bug_hash in hash_to_suppress_msgs:\n- expected = hash_to_suppress_msgs[bug_hash]\n- report = [x for x in run_results if x.bugHash == bug_hash][0]\n+ expected_data = hash_to_suppress_msgs[bug_hash]\n+ report_data_of_bug = [\n+ report_data for report_data in run_results\n+ if report_data.bugHash == bug_hash]\n+ self.assertEqual(len(report_data_of_bug), 1)\n+ report_data = report_data_of_bug[0]\n \n # Check the stored suppress comment\n- self.assertEqual(report.reviewData.comment, expected['message'])\n- self.assertEqual(report.reviewData.status, expected['status'])\n+ self.assertEqual(report_data.reviewData.comment,\n+ expected_data['message'])\n+ self.assertEqual(report_data.reviewData.status,\n+ expected_data['status'])\n \n # Change review status to confirmed bug.\n review_comment = \"This is really a bug\"\n status = ReviewStatus.CONFIRMED\n success = self._cc_client.changeReviewStatus(\n- report.reportId, status, review_comment)\n+ report_data.reportId, status, review_comment)\n \n self.assertTrue(success)\n logging.debug(\"Bug review status changed successfully\")","source_code":"# -------------------------------------------------------------------------\n#\n# Part of the CodeChecker project, under the Apache License v2.0 with\n# LLVM Exceptions. See LICENSE for license information.\n# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n#\n# -------------------------------------------------------------------------\n\"\"\"\nTest source-code level suppression data writing to suppress file.\n\"\"\"\n\n\nimport logging\nimport os\nimport shlex\nimport sys\nimport subprocess\nfrom subprocess import CalledProcessError\nimport unittest\n\nfrom codechecker_api.codeCheckerDBAccess_v6.ttypes import ReviewStatus\n\nfrom libtest import env\nfrom libtest import codechecker\nfrom libtest.thrift_client_to_db import get_all_run_results\n\n\ndef call_cmd(command, cwd, env):\n try:\n print(' '.join(command))\n proc = subprocess.Popen(\n shlex.split(' '.join(command)),\n cwd=cwd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n env=env, encoding=\"utf-8\", errors=\"ignore\")\n out, err = proc.communicate()\n print(out)\n print(err)\n return proc.returncode\n except CalledProcessError as cerr:\n print(\"Failed to call:\\n\" + ' '.join(cerr.cmd))\n return cerr.returncode\n\n\nclass TestSuppress(unittest.TestCase):\n \"\"\"\n Test source-code level suppression data writing to suppress file.\n \"\"\"\n\n def setUp(self):\n self._test_workspace = os.environ['TEST_WORKSPACE']\n\n self._testproject_data = env.setup_test_proj_cfg(self._test_workspace)\n self.assertIsNotNone(self._testproject_data)\n\n self._test_project_path = self._testproject_data['project_path']\n\n self._cc_client = env.setup_viewer_client(self._test_workspace)\n self.assertIsNotNone(self._cc_client)\n\n # Get the run names which belong to this test\n run_names = env.get_run_names(self._test_workspace)\n\n runs = self._cc_client.getRunData(None, None, 0, None)\n\n test_runs = [run for run in runs if run.name in run_names]\n\n self.assertEqual(len(test_runs), 1,\n 'There should be only one run for this test.')\n self._runid = test_runs[0].runId\n self._run_name = test_runs[0].name\n\n def test_suppress_import(self):\n \"\"\"\n Test the suppress file importing.\n \"\"\"\n\n generated_file = os.path.join(self._test_workspace,\n \"generated.suppress\")\n\n extract_cmd = ['CodeChecker', 'parse',\n os.path.join(self._test_workspace, \"reports\"),\n \"--suppress\", generated_file,\n \"--export-source-suppress\"\n ]\n\n ret = call_cmd(extract_cmd,\n self._test_project_path,\n env.test_env(self._test_workspace))\n self.assertEqual(ret, 2, \"Failed to generate suppress file.\")\n\n codechecker_cfg = env.import_test_cfg(\n self._test_workspace)['codechecker_cfg']\n\n product_url = env.parts_to_url(codechecker_cfg)\n import_cmd = ['CodeChecker', 'cmd', 'suppress', '-i', generated_file,\n '--url', product_url, self._run_name]\n\n print(import_cmd)\n ret = call_cmd(import_cmd,\n self._test_project_path,\n env.test_env(self._test_workspace))\n self.assertEqual(ret, 0, \"Failed to import suppress file.\")\n\n def test_suppress_comment_in_db(self):\n \"\"\"\n Exported source suppress comment stored as a review status in the db.\n \"\"\"\n runid = self._runid\n logging.debug(\"Get all run results from the db for runid: \" +\n str(runid))\n\n hash_to_suppress_msgs = {}\n with open(os.path.join(self._test_project_path, \"suppress.expected\"),\n 'r', encoding=\"utf-8\", errors=\"ignore\") as expected:\n for line in expected:\n src_code_info = line.strip().split('||')\n\n status = None\n if len(src_code_info) == 4:\n # Newest source code comment format where status is given.\n bug_hash, _, msg, status = src_code_info\n elif len(src_code_info) == 3:\n # Old format where review status is not given.\n bug_hash, _, msg = src_code_info\n else:\n # Oldest source code comment format where status and file\n # name are not given.\n bug_hash, msg = src_code_info\n\n rw_status = ReviewStatus.FALSE_POSITIVE\n if status == 'confirmed':\n rw_status = ReviewStatus.CONFIRMED\n elif status == 'intentional':\n rw_status = ReviewStatus.INTENTIONAL\n\n hash_to_suppress_msgs[bug_hash] = {'message': msg,\n 'status': rw_status}\n\n run_results = get_all_run_results(self._cc_client, runid)\n self.assertIsNotNone(run_results)\n self.assertNotEqual(len(run_results), 0)\n\n for bug_hash in hash_to_suppress_msgs:\n expected = hash_to_suppress_msgs[bug_hash]\n report = [x for x in run_results if x.bugHash == bug_hash][0]\n\n # Check the stored suppress comment\n self.assertEqual(report.reviewData.comment, expected['message'])\n self.assertEqual(report.reviewData.status, expected['status'])\n\n # Change review status to confirmed bug.\n review_comment = \"This is really a bug\"\n status = ReviewStatus.CONFIRMED\n success = self._cc_client.changeReviewStatus(\n report.reportId, status, review_comment)\n\n self.assertTrue(success)\n logging.debug(\"Bug review status changed successfully\")\n\n # Get the results to compare.\n updated_results = get_all_run_results(self._cc_client, self._runid)\n self.assertIsNotNone(updated_results)\n self.assertNotEqual(len(updated_results), 0)\n\n for bug_hash in hash_to_suppress_msgs:\n report = [x for x in updated_results if x.bugHash == bug_hash][0]\n\n # Check the stored suppress comment\n self.assertEqual(report.reviewData.comment, \"This is really a bug\")\n self.assertEqual(report.reviewData.status, ReviewStatus.CONFIRMED)\n\n # Check the same project again.\n codechecker_cfg = env.import_test_cfg(\n self._test_workspace)['codechecker_cfg']\n\n initial_test_project_name = self._run_name\n\n ret = codechecker.check_and_store(codechecker_cfg,\n initial_test_project_name,\n self._test_project_path)\n if ret:\n sys.exit(1)\n\n # Get the results to compare.\n updated_results = get_all_run_results(self._cc_client, self._runid)\n self.assertIsNotNone(updated_results)\n self.assertNotEqual(len(updated_results), 0)\n\n for bug_hash in hash_to_suppress_msgs:\n expected = hash_to_suppress_msgs[bug_hash]\n report = [x for x in updated_results if x.bugHash == bug_hash][0]\n\n # Check that source code comments in the database are changed back\n # after storage.\n self.assertEqual(report.reviewData.comment, expected['message'])\n self.assertEqual(report.reviewData.status, expected['status'])\n","lang_cluster":"C","diff_tag":0,"review_comment":"","id":41} {"diff_hunk":"@@ -43,6 +43,9 @@ const (\n \n \tIPSetIDAllHostIPs = \"all-hosts\"\n \n+\tChainFipDnat = ChainNamePrefix + \"-fip-dnat\"\n+\tChainFipSnat = ChainNamePrefix + \"-fip-snat\"\n+\n \tPolicyInboundPfx = ChainNamePrefix + \"pi-\"\n \tPolicyOutboundPfx = ChainNamePrefix + \"po-\"\n ","source_code":"\/\/ Copyright (c) 2016-2017 Tigera, Inc. All rights reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage rules\n\nimport (\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/projectcalico\/felix\/go\/felix\/ipsets\"\n\t\"github.com\/projectcalico\/felix\/go\/felix\/iptables\"\n\t\"github.com\/projectcalico\/felix\/go\/felix\/proto\"\n\t\"net\"\n\t\"strings\"\n)\n\nconst (\n\tChainNamePrefix = \"cali\"\n\tIPSetNamePrefix = \"cali\"\n\n\tChainFilterInput = ChainNamePrefix + \"-INPUT\"\n\tChainFilterForward = ChainNamePrefix + \"-FORWARD\"\n\tChainFilterOutput = ChainNamePrefix + \"-OUTPUT\"\n\n\tChainFailsafeIn = ChainNamePrefix + \"-failsafe-in\"\n\tChainFailsafeOut = ChainNamePrefix + \"-failsafe-out\"\n\n\tChainNATPrerouting = ChainNamePrefix + \"-PREROUTING\"\n\tChainNATPostrouting = ChainNamePrefix + \"-POSTROUTING\"\n\tChainNATOutgoing = ChainNamePrefix + \"-nat-outgoing\"\n\n\tIPSetIDNATOutgoingAllPools = \"all-ipam-pools\"\n\tIPSetIDNATOutgoingMasqPools = \"masq-ipam-pools\"\n\n\tIPSetIDAllHostIPs = \"all-hosts\"\n\n\tPolicyInboundPfx = ChainNamePrefix + \"pi-\"\n\tPolicyOutboundPfx = ChainNamePrefix + \"po-\"\n\n\tChainWorkloadToHost = ChainNamePrefix + \"-wl-to-host\"\n\tChainFromWorkloadDispatch = ChainNamePrefix + \"-from-wl-dispatch\"\n\tChainToWorkloadDispatch = ChainNamePrefix + \"-to-wl-dispatch\"\n\n\tChainDispatchToHostEndpoint = ChainNamePrefix + \"-to-host-endpoint\"\n\tChainDispatchFromHostEndpoint = ChainNamePrefix + \"-from-host-endpoint\"\n\n\tWorkloadToEndpointPfx = ChainNamePrefix + \"tw-\"\n\tWorkloadFromEndpointPfx = ChainNamePrefix + \"fw-\"\n\n\tHostToEndpointPfx = ChainNamePrefix + \"th-\"\n\tHostFromEndpointPfx = ChainNamePrefix + \"fh-\"\n\n\tRuleHashPrefix = \"cali:\"\n\n\t\/\/ HistoricNATRuleInsertRegex is a regex pattern to match to match\n\t\/\/ special-case rules inserted by old versions of felix. Specifically,\n\t\/\/ Python felix used to insert a masquerade rule directly into the\n\t\/\/ POSTROUTING chain.\n\t\/\/\n\t\/\/ Note: this regex depends on the output format of iptables-save so,\n\t\/\/ where possible, it's best to match only on part of the rule that\n\t\/\/ we're sure can't change (such as the ipset name in the masquerade\n\t\/\/ rule).\n\tHistoricInsertedNATRuleRegex = `-A POSTROUTING .* felix-masq-ipam-pools .*|` +\n\t\t`-A POSTROUTING -o tunl0 -m addrtype ! --src-type LOCAL --limit-iface-out -m addrtype --src-type LOCAL -j MASQUERADE`\n)\n\nvar (\n\t\/\/ AllHistoricChainNamePrefixes lists all the prefixes that we've used for chains. Keeping\n\t\/\/ track of the old names lets us clean them up.\n\tAllHistoricChainNamePrefixes = []string{\"felix-\", \"cali\"}\n\t\/\/ AllHistoricIPSetNamePrefixes, similarly contains all the prefixes we've ever used for IP\n\t\/\/ sets.\n\tAllHistoricIPSetNamePrefixes = []string{\"felix-\", \"cali\"}\n\t\/\/ LegacyV4IPSetNames contains some extra IP set names that were used in older versions of\n\t\/\/ Felix and don't fit our versioned pattern.\n\tLegacyV4IPSetNames = []string{\"felix-masq-ipam-pools\", \"felix-all-ipam-pools\"}\n)\n\ntype RuleRenderer interface {\n\tStaticFilterTableChains(ipVersion uint8) []*iptables.Chain\n\tStaticNATTableChains(ipVersion uint8) []*iptables.Chain\n\n\tWorkloadDispatchChains(map[proto.WorkloadEndpointID]*proto.WorkloadEndpoint) []*iptables.Chain\n\tWorkloadEndpointToIptablesChains(epID *proto.WorkloadEndpointID, endpoint *proto.WorkloadEndpoint) []*iptables.Chain\n\n\tHostDispatchChains(map[string]proto.HostEndpointID) []*iptables.Chain\n\tHostEndpointToIptablesChains(ifaceName string, endpoint *proto.HostEndpoint) []*iptables.Chain\n\n\tPolicyToIptablesChains(policyID *proto.PolicyID, policy *proto.Policy, ipVersion uint8) []*iptables.Chain\n\tProfileToIptablesChains(profileID *proto.ProfileID, policy *proto.Profile, ipVersion uint8) []*iptables.Chain\n\tProtoRuleToIptablesRules(pRule *proto.Rule, ipVersion uint8) []iptables.Rule\n\n\tNATOutgoingChain(active bool, ipVersion uint8) *iptables.Chain\n}\n\ntype DefaultRuleRenderer struct {\n\tConfig\n\n\tdropActions []iptables.Action\n\tinputAcceptActions []iptables.Action\n}\n\nfunc (r *DefaultRuleRenderer) ipSetConfig(ipVersion uint8) *ipsets.IPVersionConfig {\n\tif ipVersion == 4 {\n\t\treturn r.IPSetConfigV4\n\t} else if ipVersion == 6 {\n\t\treturn r.IPSetConfigV6\n\t} else {\n\t\tlog.WithField(\"version\", ipVersion).Panic(\"Unknown IP version\")\n\t\treturn nil\n\t}\n}\n\ntype Config struct {\n\tIPSetConfigV4 *ipsets.IPVersionConfig\n\tIPSetConfigV6 *ipsets.IPVersionConfig\n\n\tWorkloadIfacePrefixes []string\n\n\tIptablesMarkAccept uint32\n\tIptablesMarkNextTier uint32\n\n\tOpenStackMetadataIP net.IP\n\tOpenStackMetadataPort uint16\n\tOpenStackSpecialCasesEnabled bool\n\n\tIPIPEnabled bool\n\tIPIPTunnelAddress net.IP\n\n\tDropLogPrefix string\n\tActionOnDrop string\n\tEndpointToHostAction string\n\n\tFailsafeInboundHostPorts []uint16\n\tFailsafeOutboundHostPorts []uint16\n}\n\nfunc NewRenderer(config Config) RuleRenderer {\n\tlog.WithField(\"config\", config).Info(\"Creating rule renderer.\")\n\t\/\/ Convert configured actions to rule slices. First, what should we actually do when we'd\n\t\/\/ normally drop a packet? For sandbox mode, we support allowing the packet instead, or\n\t\/\/ logging it.\n\tvar dropActions []iptables.Action\n\tif strings.HasPrefix(config.ActionOnDrop, \"LOG-\") {\n\t\tlog.Warn(\"Action on drop includes LOG. All dropped packets will be logged.\")\n\t\tlogPrefix := \"calico-drop\"\n\t\tif config.DropLogPrefix != \"\" {\n\t\t\tlogPrefix = config.DropLogPrefix\n\t\t}\n\t\tdropActions = append(dropActions, iptables.LogAction{Prefix: logPrefix})\n\t}\n\tif strings.HasSuffix(config.ActionOnDrop, \"ACCEPT\") {\n\t\tlog.Warn(\"Action on drop set to ACCEPT. Calico security is disabled!\")\n\t\tdropActions = append(dropActions, iptables.AcceptAction{})\n\t} else {\n\t\tdropActions = append(dropActions, iptables.DropAction{})\n\t}\n\n\t\/\/ Second, what should we do with packets that come from workloads to the host itself.\n\tvar inputAcceptActions []iptables.Action\n\tswitch config.EndpointToHostAction {\n\tcase \"DROP\":\n\t\tlog.Info(\"Workload to host packets will be dropped.\")\n\t\tinputAcceptActions = dropActions\n\tcase \"ACCEPT\":\n\t\tlog.Info(\"Workload to host packets will be accepted.\")\n\t\tinputAcceptActions = []iptables.Action{iptables.AcceptAction{}}\n\tdefault:\n\t\tlog.Info(\"Workload to host packets will be returned to INPUT chain.\")\n\t\tinputAcceptActions = []iptables.Action{iptables.ReturnAction{}}\n\t}\n\n\treturn &DefaultRuleRenderer{\n\t\tConfig: config,\n\t\tdropActions: dropActions,\n\t\tinputAcceptActions: inputAcceptActions,\n\t}\n}\n","lang_cluster":"C","diff_tag":0,"review_comment":"","id":42} {"diff_hunk":"@@ -176,7 +176,7 @@ class PlistToDB(ResultHandler):\n \n assert self.analyzer_returncode == 0\n \n- plist_file = self.get_analyzer_result_file()\n+ plist_file = self.analyzer_result_file\n \n try:\n files, bugs = plist_parser.parse_plist(plist_file)","source_code":"# -------------------------------------------------------------------------\n# The CodeChecker Infrastructure\n# This file is distributed under the University of Illinois Open Source\n# License. See LICENSE.TXT for details.\n# -------------------------------------------------------------------------\n\nimport ntpath\nimport os\nimport zlib\nfrom abc import ABCMeta\n\nimport shared\n\nfrom codechecker_lib import client\nfrom codechecker_lib import plist_parser\nfrom codechecker_lib import suppress_handler\nfrom codechecker_lib.logger import LoggerFactory\nfrom codechecker_lib.analyzers.result_handler_base import ResultHandler\n\nLOG = LoggerFactory.get_new_logger('PLIST TO DB')\n\n\nclass PlistToDB(ResultHandler):\n \"\"\"\n Result handler for processing a plist file with the\n analysis results and stores them to the database.\n \"\"\"\n\n __metaclass__ = ABCMeta\n\n def __init__(self, buildaction, workspace, run_id):\n super(PlistToDB, self).__init__(buildaction, workspace)\n self.__run_id = run_id\n\n def __store_bugs(self, files, bugs, connection, analisys_id):\n file_ids = {}\n # Send content of file to the server if needed.\n for file_name in files:\n file_descriptor = connection.need_file_content(self.__run_id,\n file_name)\n file_ids[file_name] = file_descriptor.fileId\n\n # Sometimes the file doesn't exist, e.g. when the input of the\n # analysis is pure plist files.\n if not os.path.isfile(file_name):\n LOG.debug(file_name + ' not found, and will not be stored.')\n continue\n\n if file_descriptor.needed:\n with open(file_name, 'r') as source_file:\n file_content = source_file.read()\n compressed_file = zlib.compress(file_content,\n zlib.Z_BEST_COMPRESSION)\n # TODO: we may not use the file content in the end\n # depending on skippaths.\n LOG.debug('storing file content to the database')\n connection.add_file_content(file_descriptor.fileId,\n compressed_file)\n\n # Skipping bugs in header files handled here.\n report_ids = []\n for bug in bugs:\n events = bug.events()\n\n # Skip list handler can be None if no config file is set.\n if self.skiplist_handler:\n if events and self.skiplist_handler.should_skip(\n events[-1].start_pos.file_path):\n # Issue #20: this bug is in a file which should be skipped\n LOG.debug(bug.hash_value + ' is skipped (in ' +\n events[-1].start_pos.file_path + \")\")\n continue\n\n # Create remaining data for bugs and send them to the server.\n bug_paths = []\n for path in bug.paths():\n bug_paths.append(\n shared.ttypes.BugPathPos(path.start_pos.line,\n path.start_pos.col,\n path.end_pos.line,\n path.end_pos.col,\n file_ids[\n path.start_pos.file_path]))\n\n bug_events = []\n for event in bug.events():\n bug_events.append(shared.ttypes.BugPathEvent(\n event.start_pos.line,\n event.start_pos.col,\n event.end_pos.line,\n event.end_pos.col,\n event.msg,\n file_ids[event.start_pos.file_path]))\n\n bug_hash = bug.hash_value\n\n severity_name = self.severity_map.get(bug.checker_name,\n 'UNSPECIFIED')\n severity = shared.ttypes.Severity._NAMES_TO_VALUES[severity_name]\n\n sp_handler = suppress_handler.SourceSuppressHandler(bug)\n\n # Check for suppress comment.\n supp = sp_handler.get_suppressed()\n if supp:\n connection.add_suppress_bug(self.__run_id, [supp])\n\n LOG.debug('Storing check results to the database.')\n\n report_id = connection.add_report(analisys_id,\n file_ids[bug.file_path],\n bug_hash,\n bug.msg,\n bug_paths,\n bug_events,\n bug.checker_name,\n bug.category,\n bug.type,\n severity,\n supp is not None)\n\n report_ids.append(report_id)\n\n def handle_plist(self, plist):\n with client.get_connection() as connection:\n # TODO: When the analyzer name can be read from PList, then it\n # should be passed too.\n # TODO: File name should be read from the PList and passed.\n analysis_id = connection. \\\n add_build_action(self.__run_id,\n plist,\n 'Build action from plist',\n '',\n '')\n\n try:\n files, bugs = plist_parser.parse_plist(plist)\n except Exception as ex:\n msg = 'Parsing the generated result file failed.'\n LOG.error(msg + ' ' + plist)\n LOG.error(str(ex))\n connection.finish_build_action(analysis_id, msg)\n return 1\n\n self.__store_bugs(files, bugs, connection, analysis_id)\n\n connection.finish_build_action(analysis_id, self.analyzer_stderr)\n\n def handle_results(self):\n \"\"\"\n Send the plist content to the database.\n Server API calls should be used in one connection.\n - addBuildAction\n - addReport\n - needFileContent\n - addFileContent\n - finishBuildAction\n \"\"\"\n\n with client.get_connection() as connection:\n\n LOG.debug('Storing original build and analyzer command '\n 'to the database.')\n\n _, source_file_name = ntpath.split(self.analyzed_source_file)\n\n analysis_id = \\\n connection.add_build_action(self.__run_id,\n self.buildaction.original_command,\n ' '.join(\n self.analyzer_cmd),\n self.buildaction.analyzer_type,\n source_file_name)\n\n # Store buildaction and analyzer command to the database.\n\n assert self.analyzer_returncode == 0\n\n plist_file = self.get_analyzer_result_file()\n\n try:\n files, bugs = plist_parser.parse_plist(plist_file)\n except Exception as ex:\n LOG.debug(str(ex))\n msg = 'Parsing the generated result file failed.'\n LOG.error(msg + ' ' + plist_file)\n connection.finish_build_action(analysis_id, msg)\n return 1\n\n self.__store_bugs(files, bugs, connection, analysis_id)\n\n connection.finish_build_action(analysis_id, self.analyzer_stderr)\n","lang_cluster":"C","diff_tag":0,"review_comment":"","id":43} {"diff_hunk":"@@ -54,25 +54,30 @@ class PlistToHtmlTest(unittest.TestCase):\n plist_file.truncate()\n plist_file.write(new_content)\n \n- def __test_html_builder(self, proj: str):\n+ def __test_html_builder(self, proj: str) -> str:\n \"\"\"\n Test building html file from the given proj's plist file.\n \"\"\"\n- proj_dir = os.path.join(self.test_workspace, 'test_files', proj)\n- plist_file = os.path.join(proj_dir, f\"{proj}.plist\")\n-\n- reports = report_file.get_reports(plist_file)\n+ html_builder = report_to_html.HtmlBuilder(self.layout_dir)\n \n+ proj_dir = os.path.join(self.test_workspace, 'test_files', proj)\n output_dir = os.path.join(proj_dir, 'html')\n if not os.path.exists(output_dir):\n os.mkdir(output_dir)\n \n- output_path = os.path.join(output_dir, f\"{proj}.plist.html\")\n+ processed_path_hashes = set()\n+ for file_path in glob.glob(os.path.join(proj_dir, f\"*.plist\")):\n+ file_name = os.path.basename(file_path)\n+ output_path = os.path.join(output_dir, f\"{file_name}.html\")\n \n- html_builder = report_to_html.HtmlBuilder(self.layout_dir)\n- report_to_html.convert(plist_file, reports, output_dir, html_builder)\n+ reports = report_file.get_reports(file_path)\n+ reports = reports_helper.skip(\n+ reports, processed_path_hashes)\n+\n+ report_to_html.convert(\n+ file_path, reports, output_dir, html_builder)\n \n- self.assertTrue(os.path.exists(output_path))\n+ self.assertTrue(os.path.exists(output_path))\n \n html_builder.create_index_html(output_dir)\n html_builder.create_statistics_html(output_dir)","source_code":"# -------------------------------------------------------------------------\n#\n# Part of the CodeChecker project, under the Apache License v2.0 with\n# LLVM Exceptions. See LICENSE for license information.\n# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n#\n# -------------------------------------------------------------------------\n\n\nimport os\nimport shutil\nimport unittest\n\nfrom typing import ClassVar\n\nfrom libtest import env\n\nfrom codechecker_report_converter.report.output.html import \\\n html as report_to_html\nfrom codechecker_report_converter.report import report_file\n\n\ndef get_project_path(test_project) -> str:\n \"\"\" Return project path for the given project. \"\"\"\n return os.path.join(env.test_proj_root(), test_project)\n\n\nclass PlistToHtmlTest(unittest.TestCase):\n test_workspace: ClassVar[str]\n layout_dir: ClassVar[str]\n\n @classmethod\n def setUpClass(self):\n \"\"\" Initialize test files. \"\"\"\n self.test_workspace = os.environ['TEST_WORKSPACE']\n self.layout_dir = os.environ['LAYOUT_DIR']\n\n test_file_dir_path = os.path.join(self.test_workspace, \"test_files\")\n\n test_projects = ['notes', 'macros', 'simple']\n for test_project in test_projects:\n test_project_path = os.path.join(test_file_dir_path, test_project)\n shutil.copytree(get_project_path(test_project), test_project_path)\n\n for test_file in os.listdir(test_project_path):\n if test_file.endswith(\".plist\"):\n test_file_path = os.path.join(test_project_path, test_file)\n with open(test_file_path, 'r+',\n encoding='utf-8', errors='ignore') as plist_file:\n content = plist_file.read()\n new_content = content.replace(\"$FILE_PATH$\",\n test_project_path)\n plist_file.seek(0)\n plist_file.truncate()\n plist_file.write(new_content)\n\n def __test_html_builder(self, proj: str):\n \"\"\"\n Test building html file from the given proj's plist file.\n \"\"\"\n proj_dir = os.path.join(self.test_workspace, 'test_files', proj)\n plist_file = os.path.join(proj_dir, f\"{proj}.plist\")\n\n reports = report_file.get_reports(plist_file)\n\n output_dir = os.path.join(proj_dir, 'html')\n if not os.path.exists(output_dir):\n os.mkdir(output_dir)\n\n output_path = os.path.join(output_dir, f\"{proj}.plist.html\")\n\n html_builder = report_to_html.HtmlBuilder(self.layout_dir)\n report_to_html.convert(plist_file, reports, output_dir, html_builder)\n\n self.assertTrue(os.path.exists(output_path))\n\n html_builder.create_index_html(output_dir)\n html_builder.create_statistics_html(output_dir)\n\n index_html = os.path.join(output_dir, 'index.html')\n self.assertTrue(os.path.exists(index_html))\n\n def test_get_report_data_notes(self):\n \"\"\" Get report data for plist which contains notes. \"\"\"\n proj_notes = os.path.join(self.test_workspace, 'test_files', 'notes')\n plist_file = os.path.join(proj_notes, 'notes.plist')\n\n reports = report_file.get_reports(plist_file)\n\n html_builder = report_to_html.HtmlBuilder(self.layout_dir)\n html_builder._add_html_reports(reports)\n\n self.assertEqual(len(html_builder.files), 1)\n\n html_reports = html_builder.html_reports\n self.assertEqual(len(html_reports), 1)\n\n report = html_reports[0]\n self.assertEqual(len(report['notes']), 1)\n self.assertEqual(len(report['macros']), 0)\n self.assertGreaterEqual(len(report['events']), 1)\n self.assertEqual(report['checkerName'], 'alpha.clone.CloneChecker')\n\n def test_get_report_data_macros(self):\n \"\"\" Get report data for plist which contains macro expansion. \"\"\"\n proj_macros = os.path.join(self.test_workspace, 'test_files', 'macros')\n plist_file = os.path.join(proj_macros, 'macros.plist')\n\n reports = report_file.get_reports(plist_file)\n\n html_builder = report_to_html.HtmlBuilder(self.layout_dir)\n html_builder._add_html_reports(reports)\n\n self.assertEqual(len(html_builder.files), 1)\n\n html_reports = html_builder.html_reports\n self.assertEqual(len(html_reports), 1)\n\n report = html_reports[0]\n self.assertEqual(len(reports), 1)\n\n report = html_reports[0]\n self.assertEqual(len(report['notes']), 0)\n self.assertEqual(len(report['macros']), 1)\n self.assertGreaterEqual(len(report['events']), 1)\n self.assertEqual(report['checkerName'], 'core.NullDereference')\n\n def test_get_report_data_simple(self):\n \"\"\" Get report data for plist which contains simple reports. \"\"\"\n proj_simple = os.path.join(self.test_workspace, 'test_files', 'simple')\n plist_file = os.path.join(proj_simple, 'simple.plist')\n\n reports = report_file.get_reports(plist_file)\n\n html_builder = report_to_html.HtmlBuilder(self.layout_dir)\n html_builder._add_html_reports(reports)\n\n self.assertEqual(len(html_builder.files), 1)\n\n html_reports = html_builder.html_reports\n self.assertEqual(len(html_reports), 2)\n\n dead_stores = [r for r in html_reports if\n r['checkerName'] == 'deadcode.DeadStores'][0]\n self.assertEqual(len(dead_stores['notes']), 0)\n self.assertEqual(len(dead_stores['macros']), 0)\n self.assertGreaterEqual(len(dead_stores['events']), 1)\n\n divide_zero = [r for r in html_reports if\n r['checkerName'] == 'core.DivideZero'][0]\n self.assertEqual(len(divide_zero['notes']), 0)\n self.assertEqual(len(divide_zero['macros']), 0)\n self.assertGreaterEqual(len(divide_zero['events']), 1)\n\n def test_html_builder(self):\n \"\"\" Test building html files from plist files on multiple projects. \"\"\"\n self.__test_html_builder('notes')\n self.__test_html_builder('macros')\n self.__test_html_builder('simple')\n","lang_cluster":"C","diff_tag":0,"review_comment":"","id":44} {"diff_hunk":"@@ -31,6 +31,7 @@ type Set interface {\n \tIter(func(item interface{}) error)\n \tCopy() Set\n \tEquals(Set) bool\n+\tContainsAll(Set) bool\n }\n \n type empty struct{}","source_code":"\/\/ Copyright (c) 2016-2017 Tigera, Inc. All rights reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage set\n\nimport (\n\t\"errors\"\n\t\"reflect\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n)\n\ntype Set interface {\n\tLen() int\n\tAdd(interface{})\n\tAddAll(itemArray interface{})\n\tDiscard(interface{})\n\tClear()\n\tContains(interface{}) bool\n\tIter(func(item interface{}) error)\n\tCopy() Set\n\tEquals(Set) bool\n}\n\ntype empty struct{}\n\nvar emptyValue = empty{}\n\nvar (\n\tStopIteration = errors.New(\"Stop iteration\")\n\tRemoveItem = errors.New(\"Remove item\")\n)\n\nfunc New() Set {\n\treturn make(mapSet)\n}\n\nfunc From(members ...interface{}) Set {\n\ts := New()\n\ts.AddAll(members)\n\treturn s\n}\n\nfunc FromArray(membersArray interface{}) Set {\n\ts := New()\n\ts.AddAll(membersArray)\n\treturn s\n}\n\nfunc Empty() Set {\n\treturn mapSet(nil)\n}\n\ntype mapSet map[interface{}]empty\n\nfunc (set mapSet) Len() int {\n\treturn len(set)\n}\n\nfunc (set mapSet) Add(item interface{}) {\n\tset[item] = emptyValue\n}\n\nfunc (set mapSet) AddAll(itemArray interface{}) {\n\n\tarrVal := reflect.ValueOf(itemArray)\n\tfor i := 0; i < arrVal.Len(); i++ {\n\t\tset.Add(arrVal.Index(i).Interface())\n\t}\n}\n\nfunc (set mapSet) Discard(item interface{}) {\n\tdelete(set, item)\n}\n\nfunc (set mapSet) Clear() {\n\tfor item := range set {\n\t\tdelete(set, item)\n\t}\n}\n\nfunc (set mapSet) Contains(item interface{}) bool {\n\t_, present := set[item]\n\treturn present\n}\n\nfunc (set mapSet) Iter(visitor func(item interface{}) error) {\nloop:\n\tfor item := range set {\n\t\terr := visitor(item)\n\t\tswitch err {\n\t\tcase StopIteration:\n\t\t\tbreak loop\n\t\tcase RemoveItem:\n\t\t\tdelete(set, item)\n\t\tcase nil:\n\t\t\tbreak\n\t\tdefault:\n\t\t\tlog.WithError(err).Panic(\"Unexpected iteration error\")\n\t\t}\n\t}\n}\n\nfunc (set mapSet) Copy() Set {\n\tcpy := New()\n\tfor item := range set {\n\t\tcpy.Add(item)\n\t}\n\treturn cpy\n}\n\nfunc (set mapSet) Equals(other Set) bool {\n\tif set.Len() != other.Len() {\n\t\treturn false\n\t}\n\tfor item := range set {\n\t\tif !other.Contains(item) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n","lang_cluster":"C","diff_tag":0,"review_comment":"","id":45} {"diff_hunk":"@@ -14,7 +14,7 @@ from thrift.protocol import TJSONProtocol\n from thrift.protocol.TProtocol import TProtocolException\n \n import shared\n-from Authentication import codeCheckerAuthentication\n+from Authentication_v6 import codeCheckerAuthentication\n \n from libcodechecker import session_manager\n ","source_code":"# -------------------------------------------------------------------------\n# The CodeChecker Infrastructure\n# This file is distributed under the University of Illinois Open Source\n# License. See LICENSE.TXT for details.\n# -------------------------------------------------------------------------\n\nimport os\nimport sys\n# import datetime\nimport socket\n\nfrom thrift.transport import THttpClient\nfrom thrift.protocol import TJSONProtocol\nfrom thrift.protocol.TProtocol import TProtocolException\n\nimport shared\nfrom Authentication import codeCheckerAuthentication\n\nfrom libcodechecker import session_manager\n\n\nclass ThriftAuthHelper():\n def __init__(self, host, port, uri, session_token=None):\n self.__host = host\n self.__port = port\n self.transport = THttpClient.THttpClient(self.__host, self.__port, uri)\n self.protocol = TJSONProtocol.TJSONProtocol(self.transport)\n self.client = codeCheckerAuthentication.Client(self.protocol)\n\n if session_token:\n headers = {'Cookie': session_manager.SESSION_COOKIE_NAME +\n \"=\" + session_token}\n self.transport.setCustomHeaders(headers)\n\n # ------------------------------------------------------------\n\n def ThriftClientCall(function):\n # print type(function)\n funcName = function.__name__\n\n def wrapper(self, *args, **kwargs):\n # print('['+host+':'+str(port)+'] >>>>> ['+funcName+']')\n # before = datetime.datetime.now()\n self.transport.open()\n func = getattr(self.client, funcName)\n try:\n res = func(*args, **kwargs)\n\n except shared.ttypes.RequestFailed as reqfailure:\n if reqfailure.errorCode == shared.ttypes.ErrorCode.DATABASE:\n print('Database error on server')\n print(str(reqfailure.message))\n elif reqfailure.errorCode ==\\\n shared.ttypes.ErrorCode.AUTH_DENIED:\n print('Authentication denied.')\n raise reqfailure\n elif reqfailure.errorCode ==\\\n shared.ttypes.ErrorCode.UNAUTHORIZED:\n print('Unauthorised.')\n raise reqfailure\n else:\n print('Other error')\n print(str(reqfailure))\n\n sys.exit(1)\n except TProtocolException as ex:\n print(\"Connection failed to {0}:{1}\"\n .format(self.__host, self.__port))\n sys.exit(1)\n except socket.error as serr:\n errCause = os.strerror(serr.errno)\n print(errCause)\n print(str(serr))\n sys.exit(1)\n\n # after = datetime.datetime.now()\n # timediff = after - before\n # diff = timediff.microseconds\/1000\n # print('['+str(diff)+'ms] <<<<< ['+host+':'+str(port)+']')\n # print res\n self.transport.close()\n return res\n\n return wrapper\n\n # ============= Authentication and session handling =============\n @ThriftClientCall\n def getAuthParameters(self):\n pass\n\n @ThriftClientCall\n def getAcceptedAuthMethods(self):\n pass\n\n @ThriftClientCall\n def performLogin(self, auth_method, auth_string):\n pass\n\n @ThriftClientCall\n def destroySession(self):\n pass\n\n # ============= Authorization, permission management =============\n @ThriftClientCall\n def getPermissions(self, scope):\n pass\n\n @ThriftClientCall\n def getPermissionsForUser(self, scope, extra_params, filter):\n pass\n\n @ThriftClientCall\n def getAuthorisedNames(self, permission, extra_params):\n pass\n\n @ThriftClientCall\n def addPermission(self, permission, auth_name, is_group, extra_params):\n pass\n\n @ThriftClientCall\n def removePermission(self, permission, auth_name, is_group, extra_params):\n pass\n\n @ThriftClientCall\n def hasPermission(self, permission, extra_params):\n pass\n","lang_cluster":"C","diff_tag":0,"review_comment":"","id":46} {"diff_hunk":"@@ -1,5 +1,5 @@\n \/* **********************************************************\n- * Copyright (c) 2010-2019 Google, Inc. All rights reserved.\n+ * Copyright (c) 2010-2021 Google, Inc. All rights reserved.\n * Copyright (c) 2003-2010 VMware, Inc. All rights reserved.\n * **********************************************************\/\n ","source_code":"\/* **********************************************************\n * Copyright (c) 2010-2019 Google, Inc. All rights reserved.\n * Copyright (c) 2003-2010 VMware, Inc. All rights reserved.\n * **********************************************************\/\n\n\/*\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are met:\n *\n * * Redistributions of source code must retain the above copyright notice,\n * this list of conditions and the following disclaimer.\n *\n * * Redistributions in binary form must reproduce the above copyright notice,\n * this list of conditions and the following disclaimer in the documentation\n * and\/or other materials provided with the distribution.\n *\n * * Neither the name of VMware, Inc. nor the names of its contributors may be\n * used to endorse or promote products derived from this software without\n * specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n * ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE\n * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\n * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH\n * DAMAGE.\n *\/\n\n\/* Copyright (c) 2003-2007 Determina Corp. *\/\n\n\/*\n * ntdll_shared.c\n * Routines for calling Windows system calls via the ntdll.dll wrappers,\n * meant for sharing beyond the core DR library.\n * Xref i#1409 on splitting files up and eliminating the NOT_DYNAMORIO_CORE\n * ifdefs that today select portions of files.\n *\/\n#include \"configure.h\"\n#ifdef NOT_DYNAMORIO_CORE\n# include \"globals_shared.h\"\n# define ASSERT(x)\n# define ASSERT_CURIOSITY(x)\n# define ASSERT_NOT_REACHED()\n# define ASSERT_NOT_IMPLEMENTED(x)\n# define DODEBUG(x)\n# define DOCHECK(n, x)\n# define DEBUG_DECLARE(x)\n# pragma warning(disable : 4210) \/\/ nonstd extension: function given file scope\n# pragma warning(disable : 4204) \/\/ nonstd extension: non-constant aggregate\n \/\/ initializer\n# define INVALID_FILE INVALID_HANDLE_VALUE\n# define STATUS_NOT_IMPLEMENTED ((NTSTATUS)0xC0000002L)\n#else\n\/* we include globals.h mainly for ASSERT, even though we're\n * used by preinject.\n * preinject just defines its own d_r_internal_error!\n *\/\n# include \"..\/globals.h\"\n# include \"..\/module_shared.h\"\n#endif\n\n\/* We have to hack away things we use here that won't work for non-core *\/\n#if defined(NOT_DYNAMORIO_CORE_PROPER) || defined(NOT_DYNAMORIO_CORE)\n# undef ASSERT_OWN_NO_LOCKS\n# define ASSERT_OWN_NO_LOCKS() \/* who cares if not the core *\/\n#endif\n\n#include \"ntdll_shared.h\"\n\n#ifndef X64\nNTSTATUS\nnt_wow64_read_virtual_memory64(HANDLE process, uint64 base, void *buffer,\n size_t buffer_length, size_t *bytes_read)\n{\n \/* This syscall was added in 2003 so we can't statically link. *\/\n typedef NTSTATUS(NTAPI * NtWow64ReadVirtualMemory64_t)(\n HANDLE ProcessHandle, IN PVOID64 BaseAddress, OUT PVOID Buffer,\n IN ULONGLONG BufferSize, OUT PULONGLONG NumberOfBytesRead);\n static NtWow64ReadVirtualMemory64_t ntcall;\n NTSTATUS res;\n if (ntcall == NULL) {\n# if !defined(NOT_DYNAMORIO_CORE) && !defined(NOT_DYNAMORIO_CORE_PROPER)\n \/* The first call may not be during init so we have to unprot *\/\n if (dynamo_initialized)\n SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);\n# endif\n ntcall = (NtWow64ReadVirtualMemory64_t)\n# ifdef NOT_DYNAMORIO_CORE\n GetProcAddress(GetModuleHandle(\"ntdll.dll\"), \"NtWow64ReadVirtualMemory64\");\n# else\n d_r_get_proc_address(get_ntdll_base(), \"NtWow64ReadVirtualMemory64\");\n# endif\n# if !defined(NOT_DYNAMORIO_CORE) && !defined(NOT_DYNAMORIO_CORE_PROPER)\n if (dynamo_initialized)\n SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);\n# endif\n }\n if (ntcall == NULL) {\n \/* We do not need to fall back to NtReadVirtualMemory, b\/c\n * NtWow64ReadVirtualMemory64 was added in xp64==2003 and so should\n * always exist if we are in a WOW64 process: and we should only be\n * called from a WOW64 process.\n *\/\n ASSERT_NOT_REACHED();\n res = STATUS_NOT_IMPLEMENTED;\n } else {\n uint64 len;\n res = ntcall(process, (PVOID64)base, buffer, (ULONGLONG)buffer_length, &len);\n if (bytes_read != NULL)\n *bytes_read = (size_t)len;\n }\n return res;\n}\n#endif\n","lang_cluster":"C","diff_tag":0,"review_comment":"","id":47} {"diff_hunk":"@@ -126,7 +126,7 @@ class AnalyzerConfigHandler(object):\n else:\n # Turn default checkers on.\n for checker in checker_config['default']:\n- self.set_checker_enabled(checker, True)\n+ self.set_checker_enabled(checker)\n \n # If enable_all is given, almost all checkers should be enabled.\n if enable_all:","source_code":"# -------------------------------------------------------------------------\n# The CodeChecker Infrastructure\n# This file is distributed under the University of Illinois Open Source\n# License. See LICENSE.TXT for details.\n# -------------------------------------------------------------------------\n\"\"\"\nStatic analyzer configuration handler.\n\"\"\"\n\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\n\nfrom abc import ABCMeta\nimport collections\nimport os\nimport platform\nimport sys\n\nfrom codechecker_common.logger import get_logger\n\nLOG = get_logger('system')\n\n\nclass AnalyzerConfigHandler(object):\n \"\"\"\n Handle the checker configurations and enabled disabled checkers lists.\n \"\"\"\n __metaclass__ = ABCMeta\n\n def __init__(self):\n\n self.analyzer_binary = None\n self.analyzer_plugins_dir = None\n self.compiler_resource_dir = ''\n self.analyzer_extra_arguments = []\n self.checker_config = ''\n self.report_hash = None\n\n # The key is the checker name, the value is a tuple.\n # False if disabled (should be by default).\n # True if checker is enabled.\n # (False\/True, 'checker_description')\n self.__available_checkers = collections.OrderedDict()\n\n @property\n def analyzer_plugins(self):\n \"\"\"\n Full path of the analyzer plugins.\n \"\"\"\n plugin_dir = self.analyzer_plugins_dir\n if not plugin_dir or not os.path.exists(plugin_dir):\n return []\n\n analyzer_plugins = [os.path.join(plugin_dir, f)\n for f in os.listdir(plugin_dir)\n if os.path.isfile(os.path.join(plugin_dir, f))\n and f.endswith(\".so\")]\n return analyzer_plugins\n\n def add_checker(self, checker_name, enabled, description):\n \"\"\"\n Add additional checker.\n Tuple of (checker_name, True or False).\n \"\"\"\n self.__available_checkers[checker_name] = (enabled, description)\n\n def set_checker_enabled(self, checker_name, enabled=True):\n \"\"\"\n Enable checker, keep description if already set.\n \"\"\"\n for ch_name, values in self.__available_checkers.items():\n if ch_name.startswith(checker_name) or \\\n ch_name.endswith(checker_name):\n _, description = values\n self.__available_checkers[ch_name] = (enabled, description)\n\n def checks(self):\n \"\"\"\n Return the checkers.\n \"\"\"\n return self.__available_checkers\n\n def __gen_name_variations(self):\n \"\"\"\n Generate all applicable name variations from the given checker list.\n \"\"\"\n checker_names = (name for name in self.__available_checkers)\n reserved_names = []\n\n for name in checker_names:\n delim = '.' if '.' in name else '-'\n parts = name.split(delim)\n # Creates a list of variations from a checker name, e.g.\n # ['security', 'security.insecureAPI', 'security.insecureAPI.gets']\n # from 'security.insecureAPI.gets' or\n # ['misc', 'misc-dangling', 'misc-dangling-handle']\n # from 'misc-dangling-handle'.\n v = [delim.join(parts[:(i + 1)]) for i in range(len(parts))]\n reserved_names += v\n\n return reserved_names\n\n def initialize_checkers(self,\n available_profiles,\n package_root,\n checkers,\n checker_config=None,\n cmdline_checkers=None,\n enable_all=False):\n \"\"\"\n Initializes the checker list for the specified config handler based on\n given checker profiles, commandline arguments and the\n analyzer-retrieved checker list.\n \"\"\"\n\n # By default disable all checkers.\n for checker_name, description in checkers:\n self.add_checker(checker_name, False, description)\n\n # Set default enabled or disabled checkers, based on the config file.\n if checker_config:\n # Check whether a default profile exists.\n if 'default' not in checker_config:\n LOG.warning(\"No default profile found!\")\n else:\n # Turn default checkers on.\n for checker in checker_config['default']:\n self.set_checker_enabled(checker, True)\n\n # If enable_all is given, almost all checkers should be enabled.\n if enable_all:\n for checker_name, enabled in checkers:\n if not checker_name.startswith(\"alpha.\") and \\\n not checker_name.startswith(\"debug.\") and \\\n not checker_name.startswith(\"osx.\"):\n # There are a few exceptions, though, which still need to\n # be manually enabled by the user: alpha and debug.\n self.set_checker_enabled(checker_name)\n\n if checker_name.startswith(\"osx.\") and \\\n platform.system() == 'Darwin':\n # OSX checkers are only enable-all'd if we are on OSX.\n self.set_checker_enabled(checker_name)\n\n # Set user defined enabled or disabled checkers from the command line.\n if cmdline_checkers:\n\n # Construct a list of reserved checker names.\n # (It is used to check if a profile name is valid.)\n reserved_names = self.__gen_name_variations()\n\n for identifier, enabled in cmdline_checkers:\n\n # The identifier is a profile name.\n if identifier in available_profiles:\n profile_name = identifier\n\n if profile_name == \"list\":\n LOG.error(\"'list' is a reserved profile keyword. \")\n LOG.error(\"Please choose another profile name in \"\n \"'%s'\/config\/config.json and rebuild.\",\n package_root)\n sys.exit(1)\n\n if profile_name in reserved_names:\n LOG.warning(\"Profile name '%s' conflicts with a \"\n \"checker(-group) name.\", profile_name)\n\n # Enable or disable all checkers belonging to the profile.\n for checker in checker_config[profile_name]:\n self.set_checker_enabled(checker, enabled)\n\n # The identifier is a checker(-group) name.\n else:\n checker_name = identifier\n self.set_checker_enabled(checker_name, enabled)\n","lang_cluster":"C","diff_tag":0,"review_comment":"","id":48} {"diff_hunk":"@@ -24,7 +24,7 @@ class ClangSAConfigHandler(config_handler.AnalyzerConfigHandler):\n Configuration handler for the clang static analyzer.\n \"\"\"\n \n- def __init__(self, environ):\n+ def __init__(self, environ, analyzer_binary):\n super(ClangSAConfigHandler, self).__init__()\n self.ctu_dir = ''\n self.ctu_on_demand = False","source_code":"# -------------------------------------------------------------------------\n#\n# Part of the CodeChecker project, under the Apache License v2.0 with\n# LLVM Exceptions. See LICENSE for license information.\n# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n#\n# -------------------------------------------------------------------------\n\"\"\"\nClang Static analyzer configuration handler.\n\"\"\"\nimport os\n\nfrom codechecker_analyzer import env\nfrom codechecker_common.logger import get_logger\nfrom .ctu_autodetection import CTUAutodetection\n\nfrom .. import config_handler\n\nLOG = get_logger('analyzer.clangsa')\n\n\nclass ClangSAConfigHandler(config_handler.AnalyzerConfigHandler):\n \"\"\"\n Configuration handler for the clang static analyzer.\n \"\"\"\n\n def __init__(self, environ):\n super(ClangSAConfigHandler, self).__init__()\n self.ctu_dir = ''\n self.ctu_on_demand = False\n self.log_file = ''\n self.path_env_extra = ''\n self.ld_lib_path_extra = ''\n self.enable_z3 = False\n self.enable_z3_refutation = False\n self.environ = environ\n self.version_info = None\n\n @property\n def analyzer_plugins(self):\n \"\"\" Full path of the analyzer plugins. \"\"\"\n plugin_dir = self.analyzer_plugins_dir\n\n clangsa_plugin_dir = env.get_clangsa_plugin_dir()\n is_analyzer_from_path = env.is_analyzer_from_path()\n if is_analyzer_from_path:\n if not clangsa_plugin_dir:\n return []\n\n # If the CC_ANALYZERS_FROM_PATH and CC_CLANGSA_PLUGIN_DIR\n # environment variables are set we will use this value as the\n # plugin directory.\n plugin_dir = clangsa_plugin_dir\n\n if not plugin_dir or not os.path.exists(plugin_dir):\n return []\n\n return [os.path.join(plugin_dir, f)\n for f in os.listdir(plugin_dir)\n if os.path.isfile(os.path.join(plugin_dir, f))\n and f.endswith(\".so\")]\n\n @property\n def ctu_capability(self):\n return CTUAutodetection(self.analyzer_binary, self.environ)\n","lang_cluster":"C","diff_tag":0,"review_comment":"","id":49} {"diff_hunk":"@@ -69,7 +69,10 @@ char *gpu_name_get()\n \t}\n \n \tchar *gpu_name = get_line(pipe);\n-\tfclose(pipe);\n+\n+\tstring_chomp(gpu_name);\n+\n+\tpclose(pipe);\n \n \treturn gpu_name;\n }","source_code":"\/*\nCopyright (C) 2013- The University of Notre Dame\nThis software is distributed under the GNU General Public License.\nSee the file COPYING for details.\n*\/\n\n#include \"gpu_info.h\"\n#include \"stringtools.h\"\n#include \"get_line.h\"\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n\n\n#define GPU_AUTODETECT \"cctools_gpu_autodetect\"\n\nint gpu_info_get()\n{\n\tint pipefd[2];\n\tpipe(pipefd);\n\n\tpid_t pid = fork();\n\n\tif(pid<0) {\n\t\treturn 0;\n\t} else if(pid==0) {\n\t\tclose(pipefd[0]);\n\t\tdup2(pipefd[1], fileno(stdout));\n\t\tchar *args[] = {GPU_AUTODETECT, NULL};\n\t\tif(!access(GPU_AUTODETECT, R_OK|X_OK)){\n\t\t\texecv(GPU_AUTODETECT, args);\n\t\t} else {\n\t\t\texecvp(GPU_AUTODETECT, args);\n\t\t}\n\t\t_exit(0);\n\t} else {\n\t\tclose(pipefd[1]);\n\t\tint status = 0;\n\t\tint gpu_count = 0;\n\t\tchar buffer[10]; \/* Enough characters to hold a decimal representation of a 32 bit int. *\/\n\t\tif(read(pipefd[0], buffer, 10)){\n\t\t\twaitpid(pid, &status, 0);\n\t\t\tgpu_count = atoi(buffer);\n\t\t}\n\n\t\tclose(pipefd[0]);\n\t\treturn gpu_count;\n\t}\n}\n\nchar *gpu_name_get()\n{\n\tchar *nvidia_cmd = \"\/bin\/nvidia-smi\";\n\n\tif(access(nvidia_cmd, X_OK) != 0) {\n\t\treturn NULL;\n\t}\n\n\tFILE *pipe = popen(\"\/bin\/nvidia-smi --query-gpu=gpu_name --format=csv,noheader\", \"r\");\n\tif(!pipe) {\n\t\treturn NULL;\n\t}\n\n\tchar *gpu_name = get_line(pipe);\n\tfclose(pipe);\n\n\treturn gpu_name;\n}\n\n\/* vim: set noexpandtab tabstop=4: *\/\n","lang_cluster":"C","diff_tag":0,"review_comment":"","id":50} {"diff_hunk":"@@ -1,5 +1,4 @@\n using System;\n-using System.Reflection;\n using System.Threading;\n using System.Threading.Tasks;\n using Datadog.Trace.ClrProfiler.Emit;","source_code":"using System;\nusing System.Reflection;\nusing System.Threading;\nusing System.Threading.Tasks;\nusing Datadog.Trace.ClrProfiler.Emit;\nusing Datadog.Trace.ClrProfiler.Helpers;\nusing Datadog.Trace.Logging;\n\nnamespace Datadog.Trace.ClrProfiler.Integrations\n{\n \/\/\/ \n \/\/\/ Traces an Elasticsearch pipeline\n \/\/\/ <\/summary>\n public static class ElasticsearchNet5Integration\n {\n private const string IntegrationName = \"ElasticsearchNet5\";\n private const string Version5 = \"5\";\n private const string ElasticsearchAssembly = \"Elasticsearch.Net\";\n private const string RequestPipelineInterface = \"Elasticsearch.Net.IRequestPipeline\";\n\n private static readonly ILog Log = LogProvider.GetLogger(typeof(ElasticsearchNet5Integration));\n private static readonly Type ElasticsearchResponseType = Type.GetType(\"Elasticsearch.Net.ElasticsearchResponse`1, Elasticsearch.Net\", throwOnError: false);\n\n \/\/\/ \n \/\/\/ Traces a synchronous call to Elasticsearch.\n \/\/\/ <\/summary>\n \/\/\/ The type of the response<\/typeparam>\n \/\/\/ The pipeline for the original method<\/param>\n \/\/\/ The request data<\/param>\n \/\/\/ The OpCode used in the original method call.<\/param>\n \/\/\/ The mdToken of the original method call.<\/param>\n \/\/\/ A pointer to the module version GUID.<\/param>\n \/\/\/ The original result<\/returns>\n [InterceptMethod(\n CallerAssembly = ElasticsearchAssembly,\n TargetAssembly = ElasticsearchAssembly,\n TargetType = RequestPipelineInterface,\n TargetSignatureTypes = new[] { \"Elasticsearch.Net.ElasticsearchResponse`1\", \"Elasticsearch.Net.RequestData\" },\n TargetMinimumVersion = Version5,\n TargetMaximumVersion = Version5)]\n public static object CallElasticsearch(\n object pipeline,\n object requestData,\n int opCode,\n int mdToken,\n long moduleVersionPtr)\n {\n const string methodName = nameof(CallElasticsearch);\n Func callElasticSearch;\n var pipelineType = pipeline.GetType();\n var genericArgument = typeof(TResponse);\n\n try\n {\n callElasticSearch =\n MethodBuilder>\n .Start(moduleVersionPtr, mdToken, opCode, methodName)\n .WithConcreteType(pipelineType)\n .WithMethodGenerics(genericArgument)\n .WithParameters(requestData)\n .Build();\n }\n catch (Exception ex)\n {\n \/\/ profiled app will not continue working as expected without this method\n Log.ErrorException($\"Error retrieving {pipelineType.Name}.{methodName}(RequestData requestData)\", ex);\n throw;\n }\n\n using (var scope = ElasticsearchNetCommon.CreateScope(Tracer.Instance, IntegrationName, pipeline, requestData))\n {\n try\n {\n return callElasticSearch(pipeline, requestData);\n }\n catch (Exception ex) when (scope?.Span.SetExceptionForFilter(ex) ?? false)\n {\n \/\/ unreachable code\n throw;\n }\n }\n }\n\n \/\/\/ \n \/\/\/ Traces an asynchronous call to Elasticsearch.\n \/\/\/ <\/summary>\n \/\/\/ Type type of the response<\/typeparam>\n \/\/\/ The pipeline for the original method<\/param>\n \/\/\/ The request data<\/param>\n \/\/\/ A cancellation token<\/param>\n \/\/\/ The OpCode used in the original method call.<\/param>\n \/\/\/ The mdToken of the original method call.<\/param>\n \/\/\/ A pointer to the module version GUID.<\/param>\n \/\/\/ The original result<\/returns>\n [InterceptMethod(\n CallerAssembly = ElasticsearchAssembly,\n TargetAssembly = ElasticsearchAssembly,\n TargetType = RequestPipelineInterface,\n TargetSignatureTypes = new[] { \"System.Threading.Tasks.Task`1>\", \"Elasticsearch.Net.RequestData\", ClrNames.CancellationToken },\n TargetMinimumVersion = Version5,\n TargetMaximumVersion = Version5)]\n public static object CallElasticsearchAsync(\n object pipeline,\n object requestData,\n object cancellationTokenSource,\n int opCode,\n int mdToken,\n long moduleVersionPtr)\n {\n var tokenSource = cancellationTokenSource as CancellationTokenSource;\n var cancellationToken = tokenSource?.Token ?? CancellationToken.None;\n\n var genericArgument = typeof(TResponse);\n var genericResponseType = ElasticsearchResponseType.MakeGenericType(genericArgument);\n\n Func instrumentedMethod;\n\n try\n {\n instrumentedMethod =\n MethodBuilder>\n .Start(moduleVersionPtr, mdToken, opCode, nameof(CallElasticsearchAsync))\n .WithConcreteType(pipeline.GetType())\n .WithMethodGenerics(genericArgument)\n .WithParameters(requestData, cancellationToken)\n .ForceMethodDefinitionResolution()\n .Build();\n }\n catch (Exception ex)\n {\n Log.ErrorException($\"Error resolving Elasticsearch.Net.IRequestPipeline.{nameof(CallElasticsearchAsync)}(...)\", ex);\n throw;\n }\n\n return AsyncHelper.InvokeGenericTaskDelegate(\n owningType: ElasticsearchNetCommon.RequestPipelineType,\n taskResultType: genericResponseType,\n nameOfIntegrationMethod: nameof(CallElasticsearchAsyncInternal),\n integrationType: typeof(ElasticsearchNet5Integration),\n pipeline,\n requestData,\n cancellationToken,\n instrumentedMethod);\n }\n\n \/\/\/ \n \/\/\/ Traces an asynchronous call to Elasticsearch.\n \/\/\/ <\/summary>\n \/\/\/ Type type of the Task<\/typeparam>\n \/\/\/ The pipeline for the original method<\/param>\n \/\/\/ The request data<\/param>\n \/\/\/ A cancellation token<\/param>\n \/\/\/ A delegate for the method we are instrumenting<\/param>\n \/\/\/ The original result<\/returns>\n private static async Task CallElasticsearchAsyncInternal(\n object pipeline,\n object requestData,\n CancellationToken cancellationToken,\n Func originalMethod)\n {\n using (var scope = ElasticsearchNetCommon.CreateScope(Tracer.Instance, IntegrationName, pipeline, requestData))\n {\n try\n {\n var task = (Task)originalMethod(pipeline, requestData, cancellationToken);\n return await task.ConfigureAwait(false);\n }\n catch (Exception ex) when (scope?.Span.SetExceptionForFilter(ex) ?? false)\n {\n \/\/ unreachable code\n throw;\n }\n }\n }\n }\n}\n","lang_cluster":"C#","diff_tag":0,"review_comment":"","id":51} {"diff_hunk":"@@ -31,13 +31,15 @@ namespace Nethermind.Blockchain.Find\n private readonly IReceiptsRecovery _receiptsRecovery;\n private readonly int _maxBlockDepth;\n private readonly IBlockFinder _blockFinder;\n+ private readonly ILogger _logger;\n \n- public LogFinder(IBlockFinder blockFinder, IReceiptStorage receiptStorage, IBloomStorage bloomStorage, IReceiptsRecovery receiptsRecovery, int maxBlockDepth = 1000)\n+ public LogFinder(IBlockFinder blockFinder, IReceiptStorage receiptStorage, IBloomStorage bloomStorage, IReceiptsRecovery receiptsRecovery, ILogManager logManager, int maxBlockDepth = 1000)\n {\n _blockFinder = blockFinder ?? throw new ArgumentNullException(nameof(blockFinder));\n _receiptStorage = receiptStorage ?? throw new ArgumentNullException(nameof(receiptStorage));\n _bloomStorage = bloomStorage ?? throw new ArgumentNullException(nameof(bloomStorage));\n _receiptsRecovery = receiptsRecovery ?? throw new ArgumentNullException(nameof(receiptsRecovery));\n+ _logger = logManager?.GetClassLogger() ?? throw new ArgumentNullException(nameof(logManager));\n _maxBlockDepth = maxBlockDepth;\n }\n ","source_code":"\/\/ Copyright (c) 2018 Demerzel Solutions Limited\n\/\/ This file is part of the Nethermind library.\n\/\/ \n\/\/ The Nethermind library is free software: you can redistribute it and\/or modify\n\/\/ it under the terms of the GNU Lesser General Public License as published by\n\/\/ the Free Software Foundation, either version 3 of the License, or\n\/\/ (at your option) any later version.\n\/\/ \n\/\/ The Nethermind library is distributed in the hope that it will be useful,\n\/\/ but WITHOUT ANY WARRANTY; without even the implied warranty of\n\/\/ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\/\/ GNU Lesser General Public License for more details.\n\/\/ \n\/\/ You should have received a copy of the GNU Lesser General Public License\n\/\/ along with the Nethermind. If not, see .\n\nusing System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing Nethermind.Blockchain.Filters;\nusing Nethermind.Blockchain.Receipts;\nusing Nethermind.Core;\nusing Nethermind.Store.Bloom;\n\nnamespace Nethermind.Blockchain.Find\n{\n public class LogFinder : ILogFinder\n {\n private readonly IReceiptStorage _receiptStorage;\n private readonly IBloomStorage _bloomStorage;\n private readonly IReceiptsRecovery _receiptsRecovery;\n private readonly int _maxBlockDepth;\n private readonly IBlockFinder _blockFinder;\n\n public LogFinder(IBlockFinder blockFinder, IReceiptStorage receiptStorage, IBloomStorage bloomStorage, IReceiptsRecovery receiptsRecovery, int maxBlockDepth = 1000)\n {\n _blockFinder = blockFinder ?? throw new ArgumentNullException(nameof(blockFinder));\n _receiptStorage = receiptStorage ?? throw new ArgumentNullException(nameof(receiptStorage));\n _bloomStorage = bloomStorage ?? throw new ArgumentNullException(nameof(bloomStorage));\n _receiptsRecovery = receiptsRecovery ?? throw new ArgumentNullException(nameof(receiptsRecovery));\n _maxBlockDepth = maxBlockDepth;\n }\n\n public IEnumerable FindLogs(LogFilter filter)\n {\n BlockHeader FindHeader(BlockParameter blockParameter, string name) => _blockFinder.FindHeader(blockParameter) ?? throw new ArgumentException(ILogFinder.NotFoundError, name);\n\n var toBlock = FindHeader(filter.ToBlock, nameof(filter.ToBlock));\n var fromBlock = FindHeader(filter.FromBlock, nameof(filter.FromBlock));\n\n if (fromBlock.Number > toBlock.Number && toBlock.Number != 0)\n {\n throw new ArgumentException(\"'From' block is later than 'to' block.\");\n }\n\n return ShouldUseBloomDatabase(fromBlock, toBlock) && CanUseBloomDatabase(toBlock, fromBlock)\n ? FilterLogsWithBloomsIndex(filter, fromBlock, toBlock) \n : FilterLogsIteratively(filter, fromBlock, toBlock);\n }\n\n private bool ShouldUseBloomDatabase(BlockHeader fromBlock, BlockHeader toBlock)\n {\n var blocksToSearch = toBlock.Number - fromBlock.Number + 1;\n return blocksToSearch > 1; \/\/ if we are searching only in 1 block skip bloom index altogether, this can be tweaked\n }\n\n private IEnumerable FilterLogsWithBloomsIndex(LogFilter filter, BlockHeader fromBlock, BlockHeader toBlock)\n {\n var enumeration = _bloomStorage.GetBlooms(fromBlock.Number, toBlock.Number);\n foreach (var bloom in enumeration)\n {\n if (filter.Matches(bloom) && enumeration.TryGetBlockNumber(out var blockNumber))\n {\n foreach (var filterLog in FindLogsInBlock(filter, _blockFinder.FindBlock(blockNumber)))\n {\n yield return filterLog;\n }\n }\n }\n }\n\n private bool CanUseBloomDatabase(BlockHeader toBlock, BlockHeader fromBlock) => _bloomStorage.ContainsRange(fromBlock.Number, toBlock.Number) && _blockFinder.IsMainChain(toBlock) && _blockFinder.IsMainChain(fromBlock);\n\n private IEnumerable FilterLogsIteratively(LogFilter filter, BlockHeader fromBlock, BlockHeader toBlock)\n {\n int count = 0;\n\n while (count < _maxBlockDepth && toBlock.Number >= (fromBlock?.Number ?? long.MaxValue))\n {\n foreach (var filterLog in FindLogsInBlock(filter, toBlock))\n {\n yield return filterLog;\n }\n\n if (!TryGetParentBlock(toBlock, out toBlock))\n {\n break;\n }\n\n count++;\n }\n }\n\n private IEnumerable FindLogsInBlock(LogFilter filter, BlockHeader block) =>\n filter.Matches(block.Bloom)\n ? FindLogsInBlock(filter, _blockFinder.FindBlock(block.Hash))\n : Enumerable.Empty();\n\n private IEnumerable FindLogsInBlock(LogFilter filter, Block block)\n {\n var receipts = _receiptStorage.FindForBlock(block, _receiptsRecovery);\n long logIndexInBlock = 0;\n foreach (var receipt in receipts)\n {\n if (receipt == null)\n {\n continue;\n }\n\n if (filter.Matches(receipt.Bloom))\n {\n for (var index = 0; index < receipt.Logs.Length; index++)\n {\n var log = receipt.Logs[index];\n if (filter.Accepts(log))\n {\n yield return new FilterLog(logIndexInBlock, index, receipt, log);\n }\n\n logIndexInBlock++;\n }\n }\n else\n {\n logIndexInBlock += receipt.Logs.Length;\n }\n }\n }\n\n private bool TryGetParentBlock(BlockHeader currentBlock, out BlockHeader parentHeader)\n {\n if (currentBlock.IsGenesis)\n {\n parentHeader = null;\n return false;\n }\n\n parentHeader = _blockFinder.FindParentHeader(currentBlock, BlockTreeLookupOptions.TotalDifficultyNotNeeded);\n return true;\n }\n }\n}","lang_cluster":"C#","diff_tag":0,"review_comment":"","id":52} {"diff_hunk":"@@ -81,9 +81,13 @@ namespace Datadog.Trace.DuckTyping\n public static T DuckAs(this object instance)\n where T : class\n {\n- if (DuckType.CanCreate(instance))\n+ if (instance is not null)\n {\n- return DuckType.Create(instance);\n+ DuckType.CreateTypeResult proxyResult = DuckType.CreateCache.GetProxy(instance.GetType());\n+ if (proxyResult.Success)\n+ {\n+ return proxyResult.CreateInstance(instance);\n+ }\n }\n \n return null;","source_code":"using System;\nusing System.Runtime.CompilerServices;\nusing Datadog.Trace.Logging;\n\nnamespace Datadog.Trace.DuckTyping\n{\n \/\/\/ \n \/\/\/ Duck type extensions\n \/\/\/ <\/summary>\n public static class DuckTypeExtensions\n {\n private static readonly IDatadogLogger Log = DatadogLogging.GetLoggerFor(typeof(DuckType));\n\n \/\/\/ \n \/\/\/ Gets the duck type instance for the object implementing a base class or interface T\n \/\/\/ <\/summary>\n \/\/\/ Object instance<\/param>\n \/\/\/ Target type<\/typeparam>\n \/\/\/ DuckType instance<\/returns>\n [MethodImpl(MethodImplOptions.AggressiveInlining)]\n public static T DuckCast(this object instance)\n => DuckType.Create(instance);\n\n \/\/\/ \n \/\/\/ Gets the duck type instance for the object implementing a base class or interface T\n \/\/\/ <\/summary>\n \/\/\/ Object instance<\/param>\n \/\/\/ Target type<\/param>\n \/\/\/ DuckType instance<\/returns>\n [MethodImpl(MethodImplOptions.AggressiveInlining)]\n public static object DuckCast(this object instance, Type targetType)\n => DuckType.Create(targetType, instance);\n\n \/\/\/ \n \/\/\/ Tries to ducktype the object implementing a base class or interface T\n \/\/\/ <\/summary>\n \/\/\/ Target type<\/typeparam>\n \/\/\/ Object instance<\/param>\n \/\/\/ Ducktype instance<\/param>\n \/\/\/ true if the object instance was ducktyped; otherwise, false.<\/returns>\n [MethodImpl(MethodImplOptions.AggressiveInlining)]\n public static bool TryDuckCast(this object instance, out T value)\n {\n if (DuckType.CanCreate(instance))\n {\n value = DuckType.Create(instance);\n return true;\n }\n\n value = default;\n return false;\n }\n\n \/\/\/ \n \/\/\/ Tries to ducktype the object implementing a base class or interface T\n \/\/\/ <\/summary>\n \/\/\/ Object instance<\/param>\n \/\/\/ Target type<\/param>\n \/\/\/ Ducktype instance<\/param>\n \/\/\/ true if the object instance was ducktyped; otherwise, false.<\/returns>\n [MethodImpl(MethodImplOptions.AggressiveInlining)]\n public static bool TryDuckCast(this object instance, Type targetType, out object value)\n {\n if (DuckType.CanCreate(targetType, instance))\n {\n value = DuckType.Create(targetType, instance);\n return true;\n }\n\n value = default;\n return false;\n }\n\n \/\/\/ \n \/\/\/ Gets the duck type instance for the object implementing a base class or interface T\n \/\/\/ <\/summary>\n \/\/\/ Object instance<\/param>\n \/\/\/ Target type<\/typeparam>\n \/\/\/ DuckType instance<\/returns>\n [MethodImpl(MethodImplOptions.AggressiveInlining)]\n public static T DuckAs(this object instance)\n where T : class\n {\n if (DuckType.CanCreate(instance))\n {\n return DuckType.Create(instance);\n }\n\n return null;\n }\n\n \/\/\/ \n \/\/\/ Gets the duck type instance for the object implementing a base class or interface T\n \/\/\/ <\/summary>\n \/\/\/ Object instance<\/param>\n \/\/\/ Target type<\/param>\n \/\/\/ DuckType instance<\/returns>\n [MethodImpl(MethodImplOptions.AggressiveInlining)]\n public static object DuckAs(this object instance, Type targetType)\n {\n if (DuckType.CanCreate(targetType, instance))\n {\n return DuckType.Create(targetType, instance);\n }\n\n return null;\n }\n\n \/\/\/ \n \/\/\/ Gets if a proxy can be created\n \/\/\/ <\/summary>\n \/\/\/ Instance object<\/param>\n \/\/\/ Duck type<\/typeparam>\n \/\/\/ true if the proxy can be created; otherwise, false<\/returns>\n [MethodImpl(MethodImplOptions.AggressiveInlining)]\n public static bool DuckIs(this object instance)\n => DuckType.CanCreate(instance);\n\n \/\/\/ \n \/\/\/ Gets if a proxy can be created\n \/\/\/ <\/summary>\n \/\/\/ Instance object<\/param>\n \/\/\/ Duck type<\/param>\n \/\/\/ true if the proxy can be created; otherwise, false<\/returns>\n [MethodImpl(MethodImplOptions.AggressiveInlining)]\n public static bool DuckIs(this object instance, Type targetType)\n => DuckType.CanCreate(targetType, instance);\n }\n}\n","lang_cluster":"C#","diff_tag":0,"review_comment":"","id":53} {"diff_hunk":"@@ -60,6 +60,20 @@ namespace Datadog.Trace.PlatformHelpers\n \n public string ResourceId { get; }\n \n+ public AzureContext AzureContext { get; private set; } = AzureContext.AzureAppService;\n+\n+ public string FunctionsExtensionVersion { get; }\n+\n+ public string FunctionsWorkerRuntime { get; }\n+\n+ public string InstanceName { get; }\n+\n+ public string InstanceId { get; }\n+\n+ public string OperatingSystem { get; }\n+\n+ public string Runtime { get; }\n+\n private string CompileResourceId()\n {\n string resourceId = null;","source_code":"using System;\nusing System.Collections;\nusing Datadog.Trace.ExtensionMethods;\nusing Datadog.Trace.Logging;\nusing Datadog.Trace.Util;\n\nnamespace Datadog.Trace.PlatformHelpers\n{\n internal class AzureAppServices\n {\n \/\/\/ \n \/\/\/ Configuration key which is used as a flag to tell us whether we are running in the context of Azure App Services.\n \/\/\/ <\/summary>\n internal static readonly string AzureAppServicesContextKey = \"DD_AZURE_APP_SERVICES\";\n\n \/\/\/ \n \/\/\/ Example: 8c56d827-5f07-45ce-8f2b-6c5001db5c6f+apm-dotnet-EastUSwebspace\n \/\/\/ Format: {subscriptionId}+{planResourceGroup}-{hostedInRegion}\n \/\/\/ <\/summary>\n internal static readonly string WebsiteOwnerNameKey = \"WEBSITE_OWNER_NAME\";\n\n \/\/\/ \n \/\/\/ This is the name of the resource group the site instance is assigned to.\n \/\/\/ <\/summary>\n internal static readonly string ResourceGroupKey = \"WEBSITE_RESOURCE_GROUP\";\n\n \/\/\/ \n \/\/\/ This is the unique name of the website instance within azure app services.\n \/\/\/ <\/summary>\n internal static readonly string SiteNameKey = \"WEBSITE_DEPLOYMENT_ID\";\n\n private static readonly Vendors.Serilog.ILogger Log = DatadogLogging.GetLogger(typeof(AzureAppServices));\n\n static AzureAppServices()\n {\n Metadata = new AzureAppServices(EnvironmentHelpers.GetEnvironmentVariables());\n }\n\n public AzureAppServices(IDictionary environmentVariables)\n {\n IsRelevant = GetVariableIfExists(AzureAppServicesContextKey, environmentVariables)?.ToBoolean() ?? false;\n if (IsRelevant)\n {\n SubscriptionId = GetSubscriptionId(environmentVariables);\n ResourceGroup = GetVariableIfExists(ResourceGroupKey, environmentVariables);\n SiteName = GetVariableIfExists(SiteNameKey, environmentVariables);\n ResourceId = CompileResourceId();\n }\n }\n\n public static AzureAppServices Metadata { get; set; }\n\n public bool IsRelevant { get; }\n\n public string SubscriptionId { get; }\n\n public string ResourceGroup { get; }\n\n public string SiteName { get; }\n\n public string ResourceId { get; }\n\n private string CompileResourceId()\n {\n string resourceId = null;\n\n try\n {\n var success = true;\n if (SubscriptionId == null)\n {\n success = false;\n Log.Warning(\"Could not successfully retrieve the subscription ID from variable: {0}\", WebsiteOwnerNameKey);\n }\n\n if (SiteName == null)\n {\n success = false;\n Log.Warning(\"Could not successfully retrieve the deployment ID from variable: {0}\", SiteNameKey);\n }\n\n if (ResourceGroup == null)\n {\n success = false;\n Log.Warning(\"Could not successfully retrieve the resource group name from variable: {0}\", ResourceGroupKey);\n }\n\n if (success)\n {\n resourceId = $\"\/subscriptions\/{SubscriptionId}\/resourcegroups\/{ResourceGroup}\/providers\/microsoft.web\/sites\/{SiteName}\".ToLowerInvariant();\n }\n }\n catch (Exception ex)\n {\n Log.SafeLogError(ex, \"Could not successfully setup the resource id for azure app services.\");\n }\n\n return resourceId;\n }\n\n private string GetSubscriptionId(IDictionary environmentVariables)\n {\n try\n {\n var websiteOwner = GetVariableIfExists(WebsiteOwnerNameKey, environmentVariables);\n if (!string.IsNullOrWhiteSpace(websiteOwner))\n {\n var plusSplit = websiteOwner.Split('+');\n if (plusSplit.Length > 0 && !string.IsNullOrWhiteSpace(plusSplit[0]))\n {\n return plusSplit[0];\n }\n }\n }\n catch (Exception ex)\n {\n Log.SafeLogError(ex, \"Could not successfully retrieve the subscription id for azure app services.\");\n }\n\n return null;\n }\n\n private string GetVariableIfExists(string key, IDictionary environmentVariables)\n {\n if (environmentVariables.Contains(key))\n {\n return environmentVariables[key]?.ToString();\n }\n\n return null;\n }\n }\n}\n","lang_cluster":"C#","diff_tag":0,"review_comment":"","id":54} {"diff_hunk":"@@ -65,16 +65,19 @@ namespace Microsoft.AspNet.Server.Kestrel.Http\n return tcs.Task;\n }\n \n- private void OnConnection(UvStreamHandle listenSocket, int status)\n- {\n- var acceptSocket = new UvTcpHandle();\n- acceptSocket.Init(Thread.Loop, Thread.QueueCloseHandle);\n- listenSocket.Accept(acceptSocket);\n+ \/\/\/ \n+ \/\/\/ Creates the socket used to listen for incoming connections\n+ \/\/\/ <\/summary>\n+ protected abstract T CreateListenSocket(string host, int port);\n \n- DispatchConnection(acceptSocket);\n- }\n+ \/\/\/ \n+ \/\/\/ Handles an incoming connection\n+ \/\/\/ <\/summary>\n+ \/\/\/ Socket being used to listen on<\/param>\n+ \/\/\/ Connection status<\/param>\n+ protected abstract void OnConnection(T listenSocket, int status);\n \n- protected virtual void DispatchConnection(UvTcpHandle socket)\n+ protected virtual void DispatchConnection(T socket)\n {\n var connection = new Connection(this, socket);\n connection.Start();","source_code":"\/\/ Copyright (c) .NET Foundation. All rights reserved.\n\/\/ Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.\n\nusing Microsoft.AspNet.Server.Kestrel.Infrastructure;\nusing Microsoft.AspNet.Server.Kestrel.Networking;\nusing System;\nusing System.Diagnostics;\nusing System.Net;\nusing System.Threading.Tasks;\n\nnamespace Microsoft.AspNet.Server.Kestrel.Http\n{\n \/\/\/ \n \/\/\/ Summary description for Accept\n \/\/\/ <\/summary>\n public class Listener : ListenerContext, IDisposable\n {\n private static readonly Action _connectionCallback = ConnectionCallback;\n\n UvTcpHandle ListenSocket { get; set; }\n\n private static void ConnectionCallback(UvStreamHandle stream, int status, Exception error, object state)\n {\n if (error != null)\n {\n Trace.WriteLine(\"Listener.ConnectionCallback \" + error.ToString());\n }\n else\n {\n ((Listener)state).OnConnection(stream, status);\n }\n }\n\n public Listener(IMemoryPool memory)\n {\n Memory = memory;\n }\n\n public Task StartAsync(\n string scheme,\n string host,\n int port,\n KestrelThread thread,\n Func application)\n {\n Thread = thread;\n Application = application;\n\n var tcs = new TaskCompletionSource();\n Thread.Post(_ =>\n {\n try\n {\n ListenSocket = new UvTcpHandle();\n ListenSocket.Init(Thread.Loop, Thread.QueueCloseHandle);\n ListenSocket.Bind(new IPEndPoint(IPAddress.Any, port));\n ListenSocket.Listen(Constants.ListenBacklog, _connectionCallback, this);\n tcs.SetResult(0);\n }\n catch (Exception ex)\n {\n tcs.SetException(ex);\n }\n }, null);\n return tcs.Task;\n }\n\n private void OnConnection(UvStreamHandle listenSocket, int status)\n {\n var acceptSocket = new UvTcpHandle();\n acceptSocket.Init(Thread.Loop, Thread.QueueCloseHandle);\n listenSocket.Accept(acceptSocket);\n\n DispatchConnection(acceptSocket);\n }\n\n protected virtual void DispatchConnection(UvTcpHandle socket)\n {\n var connection = new Connection(this, socket);\n connection.Start();\n }\n\n public void Dispose()\n {\n \/\/ Ensure the event loop is still running.\n \/\/ If the event loop isn't running and we try to wait on this Post\n \/\/ to complete, then KestrelEngine will never be disposed and\n \/\/ the exception that stopped the event loop will never be surfaced.\n if (Thread.FatalError == null)\n {\n var tcs = new TaskCompletionSource();\n Thread.Post(\n _ =>\n {\n try\n {\n ListenSocket.Dispose();\n tcs.SetResult(0);\n }\n catch (Exception ex)\n {\n tcs.SetException(ex);\n }\n },\n null);\n\n \/\/ REVIEW: Should we add a timeout here to be safe?\n tcs.Task.Wait();\n }\n\n ListenSocket = null;\n }\n }\n}\n","lang_cluster":"C#","diff_tag":0,"review_comment":"","id":55} {"diff_hunk":"@@ -78,11 +78,6 @@ namespace OpenTelemetry.Context\n \/\/\/ The value of the context entry.<\/param>\n public DistributedContext(string key, string value)\n {\n- if (key is null || value is null)\n- {\n- throw new ArgumentNullException(key is null ? nameof(key) : nameof(value));\n- }\n-\n this.entries = carrier is NoopDistributedContextCarrier ? emptyList : new List(1) { new DistributedContextEntry(key, value) };\n }\n ","source_code":"\ufeff\/\/ \n\/\/ Copyright 2018, OpenTelemetry Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/ <\/copyright>\n\nusing System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Runtime.Serialization.Formatters;\n\nnamespace OpenTelemetry.Context\n{\n \/\/\/ \n \/\/\/ Distributed context.\n \/\/\/ <\/summary>\n public readonly struct DistributedContext : IEquatable\n {\n private static DistributedContextCarrier carrier = NoopDistributedContextCarrier.Instance;\n private static List emptyList = new List();\n private readonly IEnumerable entries;\n\n \/\/\/ \n \/\/\/ Initializes a new instance of the struct.\n \/\/\/ <\/summary>\n \/\/\/ Entries for distributed context.<\/param>\n public DistributedContext(IEnumerable entries)\n {\n if (carrier is NoopDistributedContextCarrier || entries is null || entries.Count() == 0)\n {\n this.entries = emptyList;\n }\n else\n {\n \/\/ Filter the default and duplicate entries.\n List list = new List(entries.Count());\n for (int i = 0; i < entries.Count(); i++)\n {\n DistributedContextEntry entry = entries.ElementAt(i);\n if (entry == default)\n {\n continue;\n }\n\n int j;\n for (j = entries.Count() - 1; j > i; j--)\n {\n if (entry.Key == entries.ElementAt(j).Key)\n {\n break;\n }\n }\n\n if (j <= i)\n {\n list.Add(entry);\n }\n }\n\n this.entries = list;\n }\n }\n\n \/\/\/ \n \/\/\/ Initializes a new instance of the struct.\n \/\/\/ <\/summary>\n \/\/\/ The key of the context entry.<\/param>\n \/\/\/ The value of the context entry.<\/param>\n public DistributedContext(string key, string value)\n {\n if (key is null || value is null)\n {\n throw new ArgumentNullException(key is null ? nameof(key) : nameof(value));\n }\n\n this.entries = carrier is NoopDistributedContextCarrier ? emptyList : new List(1) { new DistributedContextEntry(key, value) };\n }\n\n \/\/\/ \n \/\/\/ Initializes a new instance of the struct.\n \/\/\/ <\/summary>\n \/\/\/ The distributed context entry.<\/param>\n public DistributedContext(DistributedContextEntry entry)\n {\n this.entries = carrier is NoopDistributedContextCarrier || entry == default ? emptyList : new List(1) { entry };\n }\n\n \/\/\/ \n \/\/\/ Gets empty object of struct.\n \/\/\/ <\/summary>\n public static DistributedContext Empty { get; } = new DistributedContext(emptyList);\n\n \/\/\/ \n \/\/\/ Gets the current .\n \/\/\/ <\/summary>\n public static DistributedContext Current => carrier.Current;\n\n \/\/\/ \n \/\/\/ Gets or sets the default carrier instance of the class.\n \/\/\/ SDK will need to override the value to AsyncLocalDistributedContextCarrier.Instance.\n \/\/\/ <\/summary>\n public static DistributedContextCarrier Carrier\n {\n get => carrier;\n set\n {\n if (value is null)\n {\n throw new ArgumentNullException(nameof(value));\n }\n\n carrier = value;\n }\n }\n\n \/\/\/ \n \/\/\/ Gets all the in this .\n \/\/\/ <\/summary>\n public IEnumerable Entries => this.entries;\n\n \/\/\/ \n \/\/\/ Sets the current .\n \/\/\/ <\/summary>\n \/\/\/ Context to set as current.<\/param>\n \/\/\/ Scope object. On disposal - original context will be restored.<\/returns>\n public static IDisposable SetCurrent(in DistributedContext context) => carrier.SetCurrent(context);\n\n \/\/\/ \n \/\/\/ Gets the with the specified name.\n \/\/\/ <\/summary>\n \/\/\/ Name of the to get.<\/param>\n \/\/\/ The with the specified name. If not found - null.<\/returns>\n public string GetEntryValue(string key) => this.entries.LastOrDefault(x => x.Key == key).Value;\n\n \/\/\/ \n public bool Equals(DistributedContext other)\n {\n if (this.entries.Count() != other.entries.Count())\n {\n return false;\n }\n\n foreach (DistributedContextEntry entry in this.entries)\n {\n if (other.GetEntryValue(entry.Key) != entry.Value)\n {\n return false;\n }\n }\n\n return true;\n }\n }\n}\n","lang_cluster":"C#","diff_tag":0,"review_comment":"","id":56} {"diff_hunk":"@@ -3,6 +3,7 @@\n \n using System;\n using System.IO;\n+using System.Net;\n using Microsoft.AspNetCore.Builder;\n using Microsoft.AspNetCore.Hosting;\n using Microsoft.AspNetCore.Http;","source_code":"\/\/ Copyright (c) .NET Foundation. All rights reserved.\n\/\/ Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.\n\nusing System;\nusing System.IO;\nusing Microsoft.AspNetCore.Builder;\nusing Microsoft.AspNetCore.Hosting;\nusing Microsoft.AspNetCore.Http;\nusing Microsoft.Extensions.Logging;\n\nnamespace SampleApp\n{\n public class Startup\n {\n public void Configure(IApplicationBuilder app, ILoggerFactory loggerFactory)\n {\n loggerFactory.AddConsole(LogLevel.Trace);\n var logger = loggerFactory.CreateLogger(\"Default\");\n\n app.Run(async context =>\n {\n var connectionFeature = context.Connection;\n logger.LogDebug($\"Peer: {connectionFeature.RemoteIpAddress?.ToString()}:{connectionFeature.RemotePort}\"\n + $\"{Environment.NewLine}\"\n + $\"Sock: {connectionFeature.LocalIpAddress?.ToString()}:{connectionFeature.LocalPort}\");\n\n var response = $\"hello, world{Environment.NewLine}\";\n context.Response.ContentLength = response.Length;\n context.Response.ContentType = \"text\/plain\";\n await context.Response.WriteAsync(response);\n });\n }\n\n public static void Main(string[] args)\n {\n var host = new WebHostBuilder()\n .UseKestrel(options =>\n {\n \/\/ options.ThreadCount = 4;\n options.NoDelay = true;\n options.UseHttps(\"testCert.pfx\", \"testPassword\");\n options.UseConnectionLogging();\n })\n .UseUrls(\"http:\/\/localhost:5000\", \"https:\/\/localhost:5001\")\n .UseContentRoot(Directory.GetCurrentDirectory())\n .UseStartup()\n .Build();\n\n \/\/ The following section should be used to demo sockets\n \/\/var addresses = application.GetAddresses();\n \/\/addresses.Clear();\n \/\/addresses.Add(\"http:\/\/unix:\/tmp\/kestrel-test.sock\");\n\n host.Run();\n }\n }\n}","lang_cluster":"C#","diff_tag":0,"review_comment":"","id":57} {"diff_hunk":"@@ -16,6 +16,7 @@\n \/\/ \n \n using System.Collections.Generic;\n+using Nethermind.Abi;\n using Nethermind.Consensus.Transactions;\n using Nethermind.Core;\n ","source_code":"\ufeff\/\/ Copyright (c) 2018 Demerzel Solutions Limited\n\/\/ This file is part of the Nethermind library.\n\/\/ \n\/\/ The Nethermind library is free software: you can redistribute it and\/or modify\n\/\/ it under the terms of the GNU Lesser General Public License as published by\n\/\/ the Free Software Foundation, either version 3 of the License, or\n\/\/ (at your option) any later version.\n\/\/ \n\/\/ The Nethermind library is distributed in the hope that it will be useful,\n\/\/ but WITHOUT ANY WARRANTY; without even the implied warranty of\n\/\/ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\/\/ GNU Lesser General Public License for more details.\n\/\/ \n\/\/ You should have received a copy of the GNU Lesser General Public License\n\/\/ along with the Nethermind. If not, see .\n\/\/ \n\nusing System.Collections.Generic;\nusing Nethermind.Consensus.Transactions;\nusing Nethermind.Core;\n\nnamespace Nethermind.Consensus.AuRa.Validators\n{\n public partial class ContractBasedValidator : ITxSource\n {\n private readonly long _posdaoTransition;\n \n public IEnumerable GetTransactions(BlockHeader parent, long gasLimit)\n {\n if (ForSealing)\n {\n var newBlockNumber = parent.Number + 1;\n if (newBlockNumber < _posdaoTransition)\n {\n if (_logger.IsTrace) _logger.Trace(\"Skipping a call to emitInitiateChange\");\n }\n else\n {\n bool emitInitChangeCallable = false;\n\n try\n {\n emitInitChangeCallable = ValidatorContract.EmitInitiateChangeCallable(parent);\n }\n catch (AuRaException e)\n {\n if (_logger.IsError) _logger.Error($\"Call to {nameof(ValidatorContract.EmitInitiateChangeCallable)} failed.\", e);\n }\n\n if (emitInitChangeCallable)\n {\n if (_logger.IsTrace) _logger.Trace($\"New block #{newBlockNumber} issued \u2015 calling emitInitiateChange()\");\n Metrics.EmitInitiateChange++;\n yield return ValidatorContract.EmitInitiateChange();\n }\n else\n {\n if (_logger.IsTrace) _logger.Trace($\"New block #{newBlockNumber} issued \u2015 no need to call emitInitiateChange()\");\n }\n }\n }\n }\n }\n}\n","lang_cluster":"C#","diff_tag":0,"review_comment":"","id":58} {"diff_hunk":"@@ -100,15 +100,15 @@ namespace Microsoft.VisualStudio.TestPlatform.CrossPlatEngine.DataCollection\n bool isRunStartingNow,\n ITestMessageEventHandler runEventsHandler)\n {\n- bool areTestCaseLevelEventsRequired = false;\n- bool isDataCollectionStarted = false;\n+ var areTestCaseLevelEventsRequired = false;\n+ var isDataCollectionStarted = false;\n IDictionary environmentVariables = null;\n \n var dataCollectionEventsPort = 0;\n this.InvokeDataCollectionServiceAction(\n () =>\n {\n- var result = this.dataCollectionRequestSender.SendBeforeTestRunStartAndGetResult(settingsXml);\n+ var result = this.dataCollectionRequestSender.SendBeforeTestRunStartAndGetResult(this.settingsXml, runEventsHandler);\n areTestCaseLevelEventsRequired = result.AreTestCaseLevelEventsRequired;\n environmentVariables = result.EnvironmentVariables;\n dataCollectionEventsPort = result.DataCollectionEventsPort;","source_code":"\/\/ Copyright (c) Microsoft Corporation. All rights reserved.\n\/\/ Licensed under the MIT license. See LICENSE file in the project root for full license information.\n\nnamespace Microsoft.VisualStudio.TestPlatform.CrossPlatEngine.DataCollection\n{\n using System;\n using System.Collections.Generic;\n using System.Collections.ObjectModel;\n\n using Microsoft.VisualStudio.TestPlatform.CrossPlatEngine.DataCollection.Interfaces;\n using Microsoft.VisualStudio.TestPlatform.ObjectModel;\n using Microsoft.VisualStudio.TestPlatform.ObjectModel.Client;\n using Microsoft.VisualStudio.TestPlatform.CommunicationUtilities.DataCollection.Interfaces;\n using Microsoft.VisualStudio.TestPlatform.CommunicationUtilities.DataCollection;\n\n \/\/\/ \n \/\/\/ The test data collection client.\n \/\/\/ <\/summary>\n internal class ProxyDataCollectionManager : IProxyDataCollectionManager\n {\n private const string PortOption = \"--port\";\n\n private IDataCollectionRequestSender dataCollectionRequestSender;\n private IDataCollectionLauncher dataCollectionLauncher;\n private string settingsXml;\n\n \/\/\/ \n \/\/\/ Initializes a new instance of the class.\n \/\/\/ <\/summary>\n \/\/\/ \n \/\/\/ The arch.\n \/\/\/ <\/param>\n \/\/\/ \n \/\/\/ The settings Xml.\n \/\/\/ <\/param>\n public ProxyDataCollectionManager(Architecture arch, string settingsXml)\n : this(arch, settingsXml, new DataCollectionRequestSender(), new DataCollectionLauncher())\n {\n }\n\n \/\/\/ \n \/\/\/ Initializes a new instance of the class.\n \/\/\/ <\/summary>\n \/\/\/ \n \/\/\/ The data collection request sender.\n \/\/\/ <\/param>\n \/\/\/ \n \/\/\/ The data collection launcher.\n \/\/\/ <\/param>\n internal ProxyDataCollectionManager(Architecture arch, string settingsXml, IDataCollectionRequestSender dataCollectionRequestSender, IDataCollectionLauncher dataCollectionLauncher)\n {\n this.settingsXml = settingsXml;\n this.dataCollectionRequestSender = dataCollectionRequestSender;\n this.dataCollectionLauncher = dataCollectionLauncher;\n this.InitializeSocketCommunication(arch);\n }\n\n\n \/\/\/ \n \/\/\/ Invoked after ending of test run\n \/\/\/ <\/summary>\n \/\/\/ \n \/\/\/ The is Canceled.\n \/\/\/ <\/param>\n \/\/\/ \n \/\/\/ The run Events Handler.\n \/\/\/ <\/param>\n \/\/\/ \n \/\/\/ The .\n \/\/\/ <\/returns>\n public Collection AfterTestRunEnd(bool isCanceled, ITestMessageEventHandler runEventsHandler)\n {\n Collection attachmentSet = null;\n this.InvokeDataCollectionServiceAction(\n () =>\n {\n attachmentSet = this.dataCollectionRequestSender.SendAfterTestRunStartAndGetResult();\n },\n runEventsHandler);\n return attachmentSet;\n }\n\n \/\/\/ \n \/\/\/ Invoked before starting of test run\n \/\/\/ <\/summary>\n \/\/\/ \n \/\/\/ The reset Data Collectors.\n \/\/\/ <\/param>\n \/\/\/ \n \/\/\/ The is Run Starting Now.\n \/\/\/ <\/param>\n \/\/\/ \n \/\/\/ The run Events Handler.\n \/\/\/ <\/param>\n \/\/\/ \n \/\/\/ BeforeTestRunStartResult object\n \/\/\/ <\/returns>\n public DataCollectionParameters BeforeTestRunStart(\n bool resetDataCollectors,\n bool isRunStartingNow,\n ITestMessageEventHandler runEventsHandler)\n {\n bool areTestCaseLevelEventsRequired = false;\n bool isDataCollectionStarted = false;\n IDictionary environmentVariables = null;\n\n var dataCollectionEventsPort = 0;\n this.InvokeDataCollectionServiceAction(\n () =>\n {\n var result = this.dataCollectionRequestSender.SendBeforeTestRunStartAndGetResult(settingsXml);\n areTestCaseLevelEventsRequired = result.AreTestCaseLevelEventsRequired;\n environmentVariables = result.EnvironmentVariables;\n dataCollectionEventsPort = result.DataCollectionEventsPort;\n },\n runEventsHandler);\n return new DataCollectionParameters(\n isDataCollectionStarted,\n areTestCaseLevelEventsRequired,\n environmentVariables,\n dataCollectionEventsPort);\n }\n\n \/\/\/ \n \/\/\/ The dispose.\n \/\/\/ <\/summary>\n public void Dispose()\n {\n this.dataCollectionRequestSender.Close();\n }\n\n \/\/\/ \n \/\/\/ The initialize socket communication.\n \/\/\/ <\/summary>\n \/\/\/ \n \/\/\/ The arch.\n \/\/\/ <\/param>\n internal void InitializeSocketCommunication(Architecture arch)\n {\n var port = this.dataCollectionRequestSender.InitializeCommunication();\n\n this.dataCollectionLauncher.Initialize(arch);\n this.dataCollectionLauncher.LaunchDataCollector(null, this.GetCommandLineArguments(port));\n this.dataCollectionRequestSender.WaitForRequestHandlerConnection(connectionTimeout: 5000);\n }\n\n private void InvokeDataCollectionServiceAction(Action action, ITestMessageEventHandler runEventsHandler)\n {\n try\n {\n if (EqtTrace.IsVerboseEnabled)\n {\n EqtTrace.Verbose(\"ProxyDataCollectionManager.InvokeDataCollectionServiceAction: Starting.\");\n }\n\n action();\n if (EqtTrace.IsInfoEnabled)\n {\n EqtTrace.Info(\"ProxyDataCollectionManager.InvokeDataCollectionServiceAction: Completed.\");\n }\n }\n catch (Exception ex)\n {\n if (EqtTrace.IsWarningEnabled)\n {\n EqtTrace.Warning(\"ProxyDataCollectionManager.InvokeDataCollectionServiceAction: TestPlatformException = {0}.\", ex);\n }\n\n this.HandleExceptionMessage(runEventsHandler, ex);\n }\n }\n\n private void HandleExceptionMessage(ITestMessageEventHandler runEventsHandler, Exception exception)\n {\n if (EqtTrace.IsErrorEnabled)\n {\n EqtTrace.Error(exception);\n }\n\n runEventsHandler.HandleLogMessage(ObjectModel.Logging.TestMessageLevel.Error, exception.Message);\n }\n\n private IList GetCommandLineArguments(int portNumber)\n {\n var commandlineArguments = new List();\n\n commandlineArguments.Add(PortOption);\n commandlineArguments.Add(portNumber.ToString());\n\n return commandlineArguments;\n }\n }\n}","lang_cluster":"C#","diff_tag":0,"review_comment":"","id":59} {"diff_hunk":"@@ -42,7 +42,7 @@ namespace OpenTelemetry.Context.Propagation\n void Inject(PropagationContext context, T carrier, Action setter);\n \n \/\/\/ \n- \/\/\/ Extracts activity context from textual representation.\n+ \/\/\/ Extracts the context from a carrier.\n \/\/\/ <\/summary>\n \/\/\/ Type of object to extract context from. Typically HttpRequest or similar.<\/typeparam>\n \/\/\/ The default context to be used if Extract fails.<\/param>","source_code":"\/\/ \n\/\/ Copyright The OpenTelemetry Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/ <\/copyright>\n\nusing System;\nusing System.Collections.Generic;\n\nnamespace OpenTelemetry.Context.Propagation\n{\n \/\/\/ \n \/\/\/ Text format wire context propagator. Helps to extract and inject context from textual\n \/\/\/ representation (typically http headers or metadata collection).\n \/\/\/ <\/summary>\n public interface IPropagator\n {\n \/\/\/ \n \/\/\/ Gets the list of headers used by propagator. The use cases of this are:\n \/\/\/ * allow pre-allocation of fields, especially in systems like gRPC Metadata\n \/\/\/ * allow a single-pass over an iterator (ex OpenTracing has no getter in TextMap).\n \/\/\/ <\/summary>\n ISet Fields { get; }\n\n \/\/\/ \n \/\/\/ Injects textual representation of activity context to transmit over the wire.\n \/\/\/ <\/summary>\n \/\/\/ Type of an object to set context on. Typically HttpRequest or similar.<\/typeparam>\n \/\/\/ The default context to transmit over the wire.<\/param>\n \/\/\/ Object to set context on. Instance of this object will be passed to setter.<\/param>\n \/\/\/ Action that will set name and value pair on the object.<\/param>\n void Inject(PropagationContext context, T carrier, Action setter);\n\n \/\/\/ \n \/\/\/ Extracts activity context from textual representation.\n \/\/\/ <\/summary>\n \/\/\/ Type of object to extract context from. Typically HttpRequest or similar.<\/typeparam>\n \/\/\/ The default context to be used if Extract fails.<\/param>\n \/\/\/ Object to extract context from. Instance of this object will be passed to the getter.<\/param>\n \/\/\/ Function that will return string value of a key with the specified name.<\/param>\n \/\/\/ Context from it's text representation.<\/returns>\n PropagationContext Extract(PropagationContext context, T carrier, Func> getter);\n }\n}\n","lang_cluster":"C#","diff_tag":0,"review_comment":"","id":60} {"diff_hunk":"@@ -31,9 +31,7 @@ namespace Nethermind.Db\n public IDb ChtDb { get; } = new MemDb();\n public IDb BeamStateDb { get; } = new MemDb();\n \n- public IDb BaselineTreeDb { get; } = new MemDb();\n-\n- public IDb BaselineTreeMetadataDb { get; } = new MemDb();\n+ public IEnumerable OtherDbs => _otherDbs;\n \n public void Dispose()\n {","source_code":"\/\/ Copyright (c) 2018 Demerzel Solutions Limited\n\/\/ This file is part of the Nethermind library.\n\/\/ \n\/\/ The Nethermind library is free software: you can redistribute it and\/or modify\n\/\/ it under the terms of the GNU Lesser General Public License as published by\n\/\/ the Free Software Foundation, either version 3 of the License, or\n\/\/ (at your option) any later version.\n\/\/ \n\/\/ The Nethermind library is distributed in the hope that it will be useful,\n\/\/ but WITHOUT ANY WARRANTY; without even the implied warranty of\n\/\/ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\/\/ GNU Lesser General Public License for more details.\n\/\/ \n\/\/ You should have received a copy of the GNU Lesser General Public License\n\/\/ along with the Nethermind. If not, see .\n\nnamespace Nethermind.Db\n{\n public class MemDbProvider : IDbProvider\n {\n public ISnapshotableDb StateDb { get; } = new StateDb();\n public ISnapshotableDb CodeDb { get; } = new StateDb();\n public IColumnsDb ReceiptsDb { get; } = new MemColumnsDb();\n public IDb BlocksDb { get; } = new MemDb();\n public IDb HeadersDb { get; } = new MemDb();\n public IDb BlockInfosDb { get; } = new MemDb();\n public IDb PendingTxsDb { get; } = new MemDb();\n public IDb ConfigsDb { get; } = new MemDb();\n public IDb EthRequestsDb { get; } = new MemDb();\n public IDb BloomDb { get; } = new MemDb();\n public IDb ChtDb { get; } = new MemDb();\n public IDb BeamStateDb { get; } = new MemDb();\n\n public IDb BaselineTreeDb { get; } = new MemDb();\n\n public IDb BaselineTreeMetadataDb { get; } = new MemDb();\n\n public void Dispose()\n {\n StateDb?.Dispose();\n CodeDb?.Dispose();\n ReceiptsDb?.Dispose();\n BlocksDb?.Dispose();\n BlockInfosDb?.Dispose();\n PendingTxsDb?.Dispose();\n ConfigsDb?.Dispose();\n EthRequestsDb?.Dispose();\n BloomDb?.Dispose();\n ChtDb?.Dispose();\n BaselineTreeDb?.Dispose();\n BaselineTreeMetadataDb?.Dispose();\n }\n }\n}\n","lang_cluster":"C#","diff_tag":0,"review_comment":"","id":61} {"diff_hunk":"@@ -20,7 +20,7 @@ namespace Datadog.Trace.ClrProfiler.IntegrationTests.AdoNet\n {\n foreach (object[] item in PackageVersions.MySqlData)\n {\n- if ((string)item[0] == string.Empty || !((string)item[0]).StartsWith(\"8\"))\n+ if (!((string)item[0]).StartsWith(\"8\"))\n {\n continue;\n }","source_code":"using System.Collections.Generic;\nusing System.Linq;\nusing Datadog.Core.Tools;\nusing Datadog.Trace.Configuration;\nusing Datadog.Trace.TestHelpers;\nusing Xunit;\nusing Xunit.Abstractions;\n\nnamespace Datadog.Trace.ClrProfiler.IntegrationTests.AdoNet\n{\n public class MySqlCommandTests : TestHelper\n {\n public MySqlCommandTests(ITestOutputHelper output)\n : base(\"MySql\", output)\n {\n SetServiceVersion(\"1.0.0\");\n }\n\n public static IEnumerable GetMySql8Data()\n {\n foreach (object[] item in PackageVersions.MySqlData)\n {\n if ((string)item[0] == string.Empty || !((string)item[0]).StartsWith(\"8\"))\n {\n continue;\n }\n\n yield return item.Concat(new object[] { false, false, }).ToArray();\n yield return item.Concat(new object[] { true, false, }).ToArray();\n yield return item.Concat(new object[] { true, true, }).ToArray();\n }\n }\n\n public static IEnumerable GetOldMySqlData()\n {\n foreach (object[] item in PackageVersions.MySqlData)\n {\n if ((string)item[0] == string.Empty || ((string)item[0]).StartsWith(\"8\"))\n {\n continue;\n }\n\n yield return item.Concat(new object[] { false, false, }).ToArray();\n yield return item.Concat(new object[] { true, false, }).ToArray();\n yield return item.Concat(new object[] { true, true, }).ToArray();\n }\n }\n\n [Theory]\n [MemberData(nameof(GetMySql8Data))]\n [Trait(\"Category\", \"EndToEnd\")]\n public void SubmitsTracesWithNetStandardInMySql8(string packageVersion, bool enableCallTarget, bool enableInlining)\n {\n SubmitsTracesWithNetStandard(packageVersion, enableCallTarget, enableInlining);\n }\n\n [Theory]\n [MemberData(nameof(GetOldMySqlData))]\n [Trait(\"Category\", \"EndToEnd\")]\n [Trait(\"Category\", \"ArmUnsupported\")]\n public void SubmitsTracesWithNetStandardInOldMySql(string packageVersion, bool enableCallTarget, bool enableInlining)\n {\n SubmitsTracesWithNetStandard(packageVersion, enableCallTarget, enableInlining);\n }\n\n [Theory]\n [InlineData(false, false)]\n [InlineData(true, false)]\n [InlineData(true, true)]\n [Trait(\"Category\", \"EndToEnd\")]\n public void SpansDisabledByAdoNetExcludedTypes(bool enableCallTarget, bool enableInlining)\n {\n SetCallTargetSettings(enableCallTarget, enableInlining);\n\n var totalSpanCount = 21;\n\n const string dbType = \"mysql\";\n const string expectedOperationName = dbType + \".query\";\n\n SetEnvironmentVariable(ConfigurationKeys.AdoNetExcludedTypes, \"System.Data.SqlClient.SqlCommand;Microsoft.Data.SqlClient.SqlCommand;MySql.Data.MySqlClient.MySqlCommand;Npgsql.NpgsqlCommand\");\n\n int agentPort = TcpPortProvider.GetOpenPort();\n\n using (var agent = new MockTracerAgent(agentPort))\n using (ProcessResult processResult = RunSampleAndWaitForExit(agent.Port))\n {\n Assert.True(processResult.ExitCode >= 0, $\"Process exited with code {processResult.ExitCode}\");\n\n var spans = agent.WaitForSpans(totalSpanCount, returnAllOperations: true);\n Assert.NotEmpty(spans);\n Assert.Empty(spans.Where(s => s.Name.Equals(expectedOperationName)));\n }\n }\n\n private void SubmitsTracesWithNetStandard(string packageVersion, bool enableCallTarget, bool enableInlining)\n {\n SetCallTargetSettings(enableCallTarget, enableInlining);\n\n \/\/ Note: The automatic instrumentation currently bails out on the generic wrappers.\n \/\/ Once this is implemented, this will add another 1 group for the direct assembly reference\n \/\/ and another 1 group for the netstandard assembly reference\n#if NET452\n var expectedSpanCount = 50; \/\/ 7 queries * 7 groups + 1 internal query\n#else\n var expectedSpanCount = 78; \/\/ 7 queries * 11 groups + 1 internal query\n if (packageVersion == \"6.8.8\")\n {\n expectedSpanCount = 76; \/\/ For this version the callsite instrumentation returns 2 spans less.\n }\n#endif\n\n if (enableCallTarget)\n {\n#if NET452\n expectedSpanCount = 62;\n#else\n expectedSpanCount = 97;\n#endif\n }\n\n const string dbType = \"mysql\";\n const string expectedOperationName = dbType + \".query\";\n const string expectedServiceName = \"Samples.MySql-\" + dbType;\n\n \/\/ NOTE: opt into the additional instrumentation of calls into netstandard.dll\n SetEnvironmentVariable(\"DD_TRACE_NETSTANDARD_ENABLED\", \"true\");\n\n int agentPort = TcpPortProvider.GetOpenPort();\n\n using (var agent = new MockTracerAgent(agentPort))\n using (ProcessResult processResult = RunSampleAndWaitForExit(agent.Port, packageVersion: packageVersion))\n {\n Assert.True(processResult.ExitCode >= 0, $\"Process exited with code {processResult.ExitCode}\");\n\n var spans = agent.WaitForSpans(expectedSpanCount, operationName: expectedOperationName);\n Assert.Equal(expectedSpanCount, spans.Count);\n\n foreach (var span in spans)\n {\n Assert.Equal(expectedOperationName, span.Name);\n Assert.Equal(expectedServiceName, span.Service);\n Assert.Equal(SpanTypes.Sql, span.Type);\n Assert.Equal(dbType, span.Tags[Tags.DbType]);\n Assert.False(span.Tags?.ContainsKey(Tags.Version), \"External service span should not have service version tag.\");\n }\n }\n }\n }\n}\n","lang_cluster":"C#","diff_tag":0,"review_comment":"","id":62} {"diff_hunk":"@@ -14,6 +14,8 @@\n \/\/ You should have received a copy of the GNU Lesser General Public License\n \/\/ along with the Nethermind. If not, see .\n \n+using Nethermind.Core;\n+\n namespace Nethermind.KeyStore.Config\n {\n public class KeyStoreConfig : IKeyStoreConfig","source_code":"\ufeff\/\/ Copyright (c) 2018 Demerzel Solutions Limited\n\/\/ This file is part of the Nethermind library.\n\/\/ \n\/\/ The Nethermind library is free software: you can redistribute it and\/or modify\n\/\/ it under the terms of the GNU Lesser General Public License as published by\n\/\/ the Free Software Foundation, either version 3 of the License, or\n\/\/ (at your option) any later version.\n\/\/ \n\/\/ The Nethermind library is distributed in the hope that it will be useful,\n\/\/ but WITHOUT ANY WARRANTY; without even the implied warranty of\n\/\/ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\/\/ GNU Lesser General Public License for more details.\n\/\/ \n\/\/ You should have received a copy of the GNU Lesser General Public License\n\/\/ along with the Nethermind. If not, see .\n\nnamespace Nethermind.KeyStore.Config\n{\n public class KeyStoreConfig : IKeyStoreConfig\n {\n public string KeyStoreDirectory { get; set; } = \"keystore\";\n public string KeyStoreEncoding { get; set; } = \"UTF-8\";\n public string Kdf { get; set; } = \"scrypt\";\n public string Cipher { get; set; } = \"aes-128-ctr\";\n public int KdfparamsDklen { get; set; } = 32;\n public int KdfparamsN { get; set; } = 262144;\n public int KdfparamsP { get; set; } = 1;\n public int KdfparamsR { get; set; } = 8;\n \n public int KdfparamsSaltLen { get; set; } = 32;\n public int SymmetricEncrypterBlockSize { get; set; } = 128;\n public int SymmetricEncrypterKeySize { get; set; } = 128;\n public int IVSize { get; set; } = 16;\n public string TestNodeKey { get; set; }\n }\n}","lang_cluster":"C#","diff_tag":0,"review_comment":"","id":63} {"diff_hunk":"@@ -112,6 +112,13 @@ namespace Datadog.Trace.Tests.Logging\n Assert.DoesNotContain(CorrelationIdentifier.TraceIdKey, logEvent.Properties.GetKeys());\n Assert.DoesNotContain(LoggingProviderTestHelpers.CustomPropertyName, logEvent.Properties.GetKeys());\n \n+ \/\/ Scope: N\/A\n+ \/\/ Custom property: N\/A\n+ logEvent = filteredLogs[logIndex++];\n+ Assert.DoesNotContain(CorrelationIdentifier.SpanIdKey, logEvent.Properties.GetKeys());\n+ Assert.DoesNotContain(CorrelationIdentifier.TraceIdKey, logEvent.Properties.GetKeys());\n+ Assert.DoesNotContain(LoggingProviderTestHelpers.CustomPropertyName, logEvent.Properties.GetKeys());\n+\n \/\/ Scope: N\/A\n \/\/ Custom property: SET\n logEvent = filteredLogs[logIndex++];","source_code":"using System.Collections.Generic;\nusing System.IO;\nusing System.Reflection;\nusing Datadog.Trace.Logging;\nusing Datadog.Trace.Logging.LogProviders;\nusing log4net.Appender;\nusing log4net.Config;\nusing log4net.Core;\nusing log4net.Layout;\nusing Newtonsoft.Json;\nusing Xunit;\n\nnamespace Datadog.Trace.Tests.Logging\n{\n [Collection(nameof(Datadog.Trace.Tests.Logging))]\n public class Log4NetLogProviderTests\n {\n private readonly ILogProvider _logProvider;\n private readonly ILog _logger;\n private readonly MemoryAppender _memoryAppender;\n\n public Log4NetLogProviderTests()\n {\n _memoryAppender = new MemoryAppender();\n var repository = log4net.LogManager.GetRepository(Assembly.GetAssembly(typeof(log4net.LogManager)));\n BasicConfigurator.Configure(repository, _memoryAppender);\n\n _logProvider = new Log4NetLogProvider();\n LogProvider.SetCurrentLogProvider(_logProvider);\n _logger = new LoggerExecutionWrapper(_logProvider.GetLogger(\"Test\"));\n }\n\n [Fact]\n public void EnabledLibLogSubscriberAddsTraceData()\n {\n \/\/ Assert that the Log4Net log provider is correctly being used\n Assert.IsType(LogProvider.CurrentLogProvider);\n\n \/\/ Instantiate a tracer for this test with default settings and set LogsInjectionEnabled to TRUE\n var tracer = LoggingProviderTestHelpers.InitializeTracer(enableLogsInjection: true);\n LoggingProviderTestHelpers.PerformParentChildScopeSequence(tracer, _logger, _logProvider.OpenMappedContext, out var parentScope, out var childScope);\n\n \/\/ Filter the logs\n List filteredLogs = new List(_memoryAppender.GetEvents());\n filteredLogs.RemoveAll(log => !log.MessageObject.ToString().Contains(LoggingProviderTestHelpers.LogPrefix));\n\n int logIndex = 0;\n LoggingEvent logEvent;\n\n \/\/ Scope: Parent scope\n \/\/ Custom property: N\/A\n logEvent = filteredLogs[logIndex++];\n logEvent.Contains(parentScope);\n Assert.DoesNotContain(LoggingProviderTestHelpers.CustomPropertyName, logEvent.Properties.GetKeys());\n\n \/\/ Scope: Parent scope\n \/\/ Custom property: SET\n logEvent = filteredLogs[logIndex++];\n logEvent.Contains(parentScope);\n Assert.Contains(LoggingProviderTestHelpers.CustomPropertyName, logEvent.Properties.GetKeys());\n Assert.Equal(LoggingProviderTestHelpers.CustomPropertyValue, int.Parse(logEvent.Properties[LoggingProviderTestHelpers.CustomPropertyName].ToString()));\n\n \/\/ Scope: Child scope\n \/\/ Custom property: SET\n logEvent = filteredLogs[logIndex++];\n logEvent.Contains(childScope);\n Assert.Contains(LoggingProviderTestHelpers.CustomPropertyName, logEvent.Properties.GetKeys());\n Assert.Equal(LoggingProviderTestHelpers.CustomPropertyValue, int.Parse(logEvent.Properties[LoggingProviderTestHelpers.CustomPropertyName].ToString()));\n\n \/\/ Scope: Parent scope\n \/\/ Custom property: SET\n logEvent = filteredLogs[logIndex++];\n logEvent.Contains(parentScope);\n Assert.Contains(LoggingProviderTestHelpers.CustomPropertyName, logEvent.Properties.GetKeys());\n Assert.Equal(LoggingProviderTestHelpers.CustomPropertyValue, int.Parse(logEvent.Properties[LoggingProviderTestHelpers.CustomPropertyName].ToString()));\n\n \/\/ EXISTING: Verify the log event is decorated with the parent scope properties\n \/\/ Scope: Parent scope\n \/\/ Custom property: N\/A\n logEvent = filteredLogs[logIndex++];\n logEvent.Contains(parentScope);\n Assert.DoesNotContain(LoggingProviderTestHelpers.CustomPropertyName, logEvent.Properties.GetKeys());\n\n \/\/ Scope: Default values of TraceId=0,SpanId=0\n \/\/ Custom property: N\/A\n logEvent = filteredLogs[logIndex++];\n logEvent.Contains(traceId: 0, spanId: 0);\n Assert.DoesNotContain(LoggingProviderTestHelpers.CustomPropertyName, logEvent.Properties.GetKeys());\n }\n\n [Fact]\n public void DisabledLibLogSubscriberDoesNotAddTraceData()\n {\n \/\/ Assert that the Log4Net log provider is correctly being used\n Assert.IsType(LogProvider.CurrentLogProvider);\n\n \/\/ Instantiate a tracer for this test with default settings and set LogsInjectionEnabled to TRUE\n var tracer = LoggingProviderTestHelpers.InitializeTracer(enableLogsInjection: false);\n LoggingProviderTestHelpers.PerformParentChildScopeSequence(tracer, _logger, _logProvider.OpenMappedContext, out var parentScope, out var childScope);\n\n \/\/ Filter the logs\n List filteredLogs = new List(_memoryAppender.GetEvents());\n filteredLogs.RemoveAll(log => !log.MessageObject.ToString().Contains(LoggingProviderTestHelpers.LogPrefix));\n\n int logIndex = 0;\n LoggingEvent logEvent;\n\n \/\/ Scope: N\/A\n \/\/ Custom property: N\/A\n logEvent = filteredLogs[logIndex++];\n Assert.DoesNotContain(CorrelationIdentifier.SpanIdKey, logEvent.Properties.GetKeys());\n Assert.DoesNotContain(CorrelationIdentifier.TraceIdKey, logEvent.Properties.GetKeys());\n Assert.DoesNotContain(LoggingProviderTestHelpers.CustomPropertyName, logEvent.Properties.GetKeys());\n\n \/\/ Scope: N\/A\n \/\/ Custom property: SET\n logEvent = filteredLogs[logIndex++];\n Assert.DoesNotContain(CorrelationIdentifier.SpanIdKey, logEvent.Properties.GetKeys());\n Assert.DoesNotContain(CorrelationIdentifier.TraceIdKey, logEvent.Properties.GetKeys());\n Assert.Contains(LoggingProviderTestHelpers.CustomPropertyName, logEvent.Properties.GetKeys());\n Assert.Equal(LoggingProviderTestHelpers.CustomPropertyValue, int.Parse(logEvent.Properties[LoggingProviderTestHelpers.CustomPropertyName].ToString()));\n\n \/\/ Scope: N\/A\n \/\/ Custom property: SET\n logEvent = filteredLogs[logIndex++];\n Assert.DoesNotContain(CorrelationIdentifier.SpanIdKey, logEvent.Properties.GetKeys());\n Assert.DoesNotContain(CorrelationIdentifier.TraceIdKey, logEvent.Properties.GetKeys());\n Assert.Contains(LoggingProviderTestHelpers.CustomPropertyName, logEvent.Properties.GetKeys());\n Assert.Equal(LoggingProviderTestHelpers.CustomPropertyValue, int.Parse(logEvent.Properties[LoggingProviderTestHelpers.CustomPropertyName].ToString()));\n\n \/\/ Scope: N\/A\n \/\/ Custom property: SET\n logEvent = filteredLogs[logIndex++];\n Assert.DoesNotContain(CorrelationIdentifier.SpanIdKey, logEvent.Properties.GetKeys());\n Assert.DoesNotContain(CorrelationIdentifier.TraceIdKey, logEvent.Properties.GetKeys());\n Assert.Contains(LoggingProviderTestHelpers.CustomPropertyName, logEvent.Properties.GetKeys());\n Assert.Equal(LoggingProviderTestHelpers.CustomPropertyValue, int.Parse(logEvent.Properties[LoggingProviderTestHelpers.CustomPropertyName].ToString()));\n\n \/\/ Scope: N\/A\n \/\/ Custom property: N\/A\n logEvent = filteredLogs[logIndex++];\n Assert.DoesNotContain(CorrelationIdentifier.SpanIdKey, logEvent.Properties.GetKeys());\n Assert.DoesNotContain(CorrelationIdentifier.TraceIdKey, logEvent.Properties.GetKeys());\n Assert.DoesNotContain(LoggingProviderTestHelpers.CustomPropertyName, logEvent.Properties.GetKeys());\n\n \/\/ Scope: N\/A\n \/\/ Custom property: N\/A\n logEvent = filteredLogs[logIndex++];\n Assert.DoesNotContain(CorrelationIdentifier.SpanIdKey, logEvent.Properties.GetKeys());\n Assert.DoesNotContain(CorrelationIdentifier.TraceIdKey, logEvent.Properties.GetKeys());\n Assert.DoesNotContain(LoggingProviderTestHelpers.CustomPropertyName, logEvent.Properties.GetKeys());\n }\n\n \/\/\/ \n \/\/\/ Lightweight JSON-formatter for Log4Net inspired by https:\/\/github.com\/Litee\/log4net.Layout.Json\n \/\/\/ <\/summary>\n internal class Log4NetJsonLayout : LayoutSkeleton\n {\n public override void ActivateOptions()\n {\n }\n\n public override void Format(TextWriter writer, LoggingEvent e)\n {\n var dic = new Dictionary\n {\n [\"level\"] = e.Level.DisplayName,\n [\"messageObject\"] = e.MessageObject,\n [\"renderedMessage\"] = e.RenderedMessage,\n [\"timestampUtc\"] = e.TimeStamp.ToUniversalTime().ToString(\"O\"),\n [\"logger\"] = e.LoggerName,\n [\"thread\"] = e.ThreadName,\n [\"exceptionObject\"] = e.ExceptionObject,\n [\"exceptionObjectString\"] = e.ExceptionObject == null ? null : e.GetExceptionString(),\n [\"userName\"] = e.UserName,\n [\"domain\"] = e.Domain,\n [\"identity\"] = e.Identity,\n [\"location\"] = e.LocationInformation.FullInfo,\n [\"properties\"] = e.GetProperties()\n };\n writer.Write(JsonConvert.SerializeObject(dic));\n }\n }\n }\n}\n","lang_cluster":"C#","diff_tag":0,"review_comment":"","id":64} {"diff_hunk":"@@ -8,10 +8,11 @@\n namespace MvvmCross.iOS.Views\n {\n using System;\n-\n+ using Foundation;\n using MvvmCross.Binding.BindingContext;\n using MvvmCross.Core.ViewModels;\n using MvvmCross.Platform.iOS.Views;\n+ using UIKit;\n \n public class MvxTabBarViewController\n : MvxEventSourceTabBarController","source_code":"\/\/ MvxTabBarViewController.cs\n\n\/\/ MvvmCross is licensed using Microsoft Public License (Ms-PL)\n\/\/ Contributions and inspirations noted in readme.md and license.txt\n\/\/\n\/\/ Project Lead - Stuart Lodge, @slodge, me@slodge.com\n\nnamespace MvvmCross.iOS.Views\n{\n using System;\n\n using MvvmCross.Binding.BindingContext;\n using MvvmCross.Core.ViewModels;\n using MvvmCross.Platform.iOS.Views;\n\n public class MvxTabBarViewController\n : MvxEventSourceTabBarController\n , IMvxIosView\n {\n protected MvxTabBarViewController()\n {\n this.AdaptForBinding();\n }\n\n protected MvxTabBarViewController(IntPtr handle)\n : base(handle)\n {\n this.AdaptForBinding();\n }\n\n public object DataContext\n {\n get\n {\n \/\/ special code needed in TabBar because View is initialized during construction\n return this.BindingContext?.DataContext;\n }\n set { this.BindingContext.DataContext = value; }\n }\n\n public IMvxViewModel ViewModel\n {\n get { return this.DataContext as IMvxViewModel; }\n set { this.DataContext = value; }\n }\n\n public MvxViewModelRequest Request { get; set; }\n\n public IMvxBindingContext BindingContext { get; set; }\n }\n\n public class MvxTabBarViewController\n : MvxTabBarViewController\n , IMvxIosView where TViewModel : class, IMvxViewModel\n {\n protected MvxTabBarViewController()\n {\n }\n\n protected MvxTabBarViewController(IntPtr handle)\n : base(handle)\n {\n }\n\n public new TViewModel ViewModel\n {\n get { return (TViewModel)base.ViewModel; }\n set { base.ViewModel = value; }\n }\n }\n}","lang_cluster":"C#","diff_tag":0,"review_comment":"","id":65} {"diff_hunk":"@@ -26,8 +26,7 @@ namespace OpenTelemetry.Logs\n public class OpenTelemetryLoggerProvider : ILoggerProvider, ISupportExternalScope\n {\n internal BaseProcessor Processor;\n- private readonly OpenTelemetryLoggerOptions options;\n- private readonly IDictionary loggers;\n+ private readonly IDictionary loggers = new Dictionary(StringComparer.Ordinal);\n private bool disposed;\n private IExternalScopeProvider scopeProvider;\n ","source_code":"\/\/ \n\/\/ Copyright The OpenTelemetry Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/ <\/copyright>\n\n#if NET461 || NETSTANDARD2_0\nusing System;\nusing System.Collections.Generic;\nusing Microsoft.Extensions.Logging;\nusing Microsoft.Extensions.Options;\n\nnamespace OpenTelemetry.Logs\n{\n [ProviderAlias(\"OpenTelemetry\")]\n public class OpenTelemetryLoggerProvider : ILoggerProvider, ISupportExternalScope\n {\n internal BaseProcessor Processor;\n private readonly OpenTelemetryLoggerOptions options;\n private readonly IDictionary loggers;\n private bool disposed;\n private IExternalScopeProvider scopeProvider;\n\n static OpenTelemetryLoggerProvider()\n {\n \/\/ Accessing Sdk class is just to trigger its static ctor,\n \/\/ which sets default Propagators and default Activity Id format\n _ = Sdk.SuppressInstrumentation;\n }\n\n public OpenTelemetryLoggerProvider(IOptionsMonitor options)\n : this(options?.CurrentValue)\n {\n }\n\n internal OpenTelemetryLoggerProvider(OpenTelemetryLoggerOptions options)\n {\n this.options = options ?? throw new ArgumentNullException(nameof(options));\n this.loggers = new Dictionary(StringComparer.Ordinal);\n\n foreach (var processor in options.Processors)\n {\n this.AddProcessor(processor);\n }\n }\n\n internal IExternalScopeProvider ScopeProvider\n {\n get\n {\n if (this.scopeProvider == null)\n {\n this.scopeProvider = new LoggerExternalScopeProvider();\n }\n\n return this.scopeProvider;\n }\n }\n\n void ISupportExternalScope.SetScopeProvider(IExternalScopeProvider scopeProvider)\n {\n \/\/ TODO: set existing loggers\n this.scopeProvider = scopeProvider;\n }\n\n public ILogger CreateLogger(string categoryName)\n {\n lock (this.loggers)\n {\n ILogger logger;\n\n if (this.loggers.TryGetValue(categoryName, out logger))\n {\n return logger;\n }\n\n logger = new OpenTelemetryLogger(categoryName, this);\n this.loggers.Add(categoryName, logger);\n return logger;\n }\n }\n\n \/\/\/ \n public void Dispose()\n {\n this.Dispose(true);\n GC.SuppressFinalize(this);\n }\n\n internal OpenTelemetryLoggerProvider AddProcessor(BaseProcessor processor)\n {\n if (processor == null)\n {\n throw new ArgumentNullException(nameof(processor));\n }\n\n if (this.Processor == null)\n {\n this.Processor = processor;\n }\n else if (this.Processor is CompositeProcessor compositeProcessor)\n {\n compositeProcessor.AddProcessor(processor);\n }\n else\n {\n this.Processor = new CompositeProcessor(new[]\n {\n this.Processor,\n processor,\n });\n }\n\n return this;\n }\n\n protected virtual void Dispose(bool disposing)\n {\n if (this.disposed)\n {\n return;\n }\n\n if (disposing)\n {\n \/\/ Wait for up to 5 seconds grace period\n this.Processor?.Shutdown(5000);\n this.Processor?.Dispose();\n }\n\n this.disposed = true;\n }\n }\n}\n#endif\n","lang_cluster":"C#","diff_tag":0,"review_comment":"","id":66} {"diff_hunk":"@@ -17,12 +17,17 @@\n using System;\n using System.Collections.Concurrent;\n using System.Collections.Generic;\n+using System.IO;\n+using System.Linq;\n+using System.Timers;\n+using Nethermind.Core.Caching;\n+using Nethermind.Core.Timers;\n using Nethermind.Logging;\n using Nethermind.Stats.Model;\n \n namespace Nethermind.Stats\n {\n- public class NodeStatsManager : INodeStatsManager\n+ public class NodeStatsManager : INodeStatsManager, IDisposable\n {\n private class NodeComparer : IEqualityComparer\n {","source_code":"\ufeff\/\/ Copyright (c) 2021 Demerzel Solutions Limited\n\/\/ This file is part of the Nethermind library.\n\/\/ \n\/\/ The Nethermind library is free software: you can redistribute it and\/or modify\n\/\/ it under the terms of the GNU Lesser General Public License as published by\n\/\/ the Free Software Foundation, either version 3 of the License, or\n\/\/ (at your option) any later version.\n\/\/ \n\/\/ The Nethermind library is distributed in the hope that it will be useful,\n\/\/ but WITHOUT ANY WARRANTY; without even the implied warranty of\n\/\/ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\/\/ GNU Lesser General Public License for more details.\n\/\/ \n\/\/ You should have received a copy of the GNU Lesser General Public License\n\/\/ along with the Nethermind. If not, see .\n\nusing System;\nusing System.Collections.Concurrent;\nusing System.Collections.Generic;\nusing Nethermind.Logging;\nusing Nethermind.Stats.Model;\n\nnamespace Nethermind.Stats\n{\n public class NodeStatsManager : INodeStatsManager\n {\n private class NodeComparer : IEqualityComparer\n {\n public bool Equals(Node x, Node y)\n {\n if (ReferenceEquals(x, null))\n {\n return ReferenceEquals(y, null);\n }\n\n if (ReferenceEquals(y, null))\n {\n return false;\n }\n\n return x.Id == y.Id;\n }\n\n public int GetHashCode(Node obj)\n {\n return obj?.GetHashCode() ?? 0;\n }\n }\n \n private readonly ILogger _logger;\n private readonly ConcurrentDictionary _nodeStats = new ConcurrentDictionary(new NodeComparer());\n\n public NodeStatsManager(ILogManager logManager)\n {\n _logger = logManager?.GetClassLogger() ?? throw new ArgumentNullException(nameof(logManager));\n }\n\n private INodeStats AddStats(Node node)\n {\n return new NodeStatsLight(node);\n }\n \n public INodeStats GetOrAdd(Node node)\n {\n if (node == null)\n {\n return null;\n }\n\n \/\/ to avoid allocations\n if (_nodeStats.TryGetValue(node, out INodeStats stats))\n {\n return stats;\n }\n \n return _nodeStats.GetOrAdd(node, AddStats);\n }\n\n public void ReportHandshakeEvent(Node node, ConnectionDirection direction)\n {\n INodeStats stats = GetOrAdd(node);\n stats.AddNodeStatsHandshakeEvent(direction);\n }\n\n public void ReportSyncEvent(Node node, NodeStatsEventType nodeStatsEvent)\n {\n INodeStats stats = GetOrAdd(node);\n stats.AddNodeStatsSyncEvent(nodeStatsEvent);\n }\n \n public void ReportEvent(Node node, NodeStatsEventType eventType)\n {\n INodeStats stats = GetOrAdd(node);\n stats.AddNodeStatsEvent(eventType);\n }\n\n public (bool Result, NodeStatsEventType? DelayReason) IsConnectionDelayed(Node node)\n {\n INodeStats stats = GetOrAdd(node);\n return stats.IsConnectionDelayed();\n }\n\n public CompatibilityValidationType? FindCompatibilityValidationResult(Node node)\n {\n INodeStats stats = GetOrAdd(node);\n return stats.FailedCompatibilityValidation;\n }\n\n public long GetCurrentReputation(Node node)\n {\n INodeStats stats = GetOrAdd(node);\n return stats.CurrentNodeReputation;\n }\n\n public void ReportP2PInitializationEvent(Node node, P2PNodeDetails p2PNodeDetails)\n {\n INodeStats stats = GetOrAdd(node);\n stats.AddNodeStatsP2PInitializedEvent(p2PNodeDetails);\n }\n\n public void ReportSyncPeerInitializeEvent(string protocol, Node node, SyncPeerNodeDetails syncPeerNodeDetails)\n {\n INodeStats stats = GetOrAdd(node);\n if (protocol == \"eth\")\n stats.AddNodeStatsEth62InitializedEvent(syncPeerNodeDetails);\n else if (protocol == \"les\")\n stats.AddNodeStatsLesInitializedEvent(syncPeerNodeDetails);\n else\n throw new ArgumentException($\"Unknown protocol: {protocol}\");\n }\n\n public void ReportFailedValidation(Node node, CompatibilityValidationType validationType)\n {\n INodeStats stats = GetOrAdd(node);\n stats.FailedCompatibilityValidation = validationType;\n }\n\n public void ReportDisconnect(Node node, DisconnectType disconnectType, DisconnectReason disconnectReason)\n {\n INodeStats stats = GetOrAdd(node);\n stats.AddNodeStatsDisconnectEvent(disconnectType, disconnectReason);\n }\n\n public long GetNewPersistedReputation(Node node)\n {\n INodeStats stats = GetOrAdd(node);\n return stats.NewPersistedNodeReputation;\n }\n\n public long GetCurrentPersistedReputation(Node node)\n {\n INodeStats stats = GetOrAdd(node);\n return stats.CurrentPersistedNodeReputation;\n }\n\n public bool HasFailedValidation(Node node)\n {\n INodeStats stats = GetOrAdd(node);\n return stats.FailedCompatibilityValidation != null;\n }\n\n public void ReportTransferSpeedEvent(Node node, TransferSpeedType type, long value)\n {\n INodeStats stats = GetOrAdd(node);\n stats.AddTransferSpeedCaptureEvent(type, value);\n }\n }\n}\n","lang_cluster":"C#","diff_tag":0,"review_comment":"","id":67} {"diff_hunk":"@@ -24,7 +24,7 @@ using OpenTelemetry.Internal;\n \n namespace OpenTelemetry.Trace.Internal\n {\n- internal class BroadcastActivityProcessor : ActivityProcessor, IDisposable\n+ internal class BroadcastActivityProcessor : ActivityProcessor\n {\n private readonly IEnumerable processors;\n private bool isDisposed;","source_code":"\ufeff\/\/ \n\/\/ Copyright The OpenTelemetry Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/ <\/copyright>\n\nusing System;\nusing System.Collections.Generic;\nusing System.Diagnostics;\nusing System.Linq;\nusing System.Threading;\nusing System.Threading.Tasks;\nusing OpenTelemetry.Internal;\n\nnamespace OpenTelemetry.Trace.Internal\n{\n internal class BroadcastActivityProcessor : ActivityProcessor, IDisposable\n {\n private readonly IEnumerable processors;\n private bool isDisposed;\n\n public BroadcastActivityProcessor(IEnumerable processors)\n {\n if (processors == null)\n {\n throw new ArgumentNullException(nameof(processors));\n }\n\n if (!processors.Any())\n {\n throw new ArgumentException($\"{nameof(processors)} collection is empty\");\n }\n\n this.processors = processors;\n }\n\n public override void OnEnd(Activity activity)\n {\n foreach (var processor in this.processors)\n {\n try\n {\n processor.OnEnd(activity);\n }\n catch (Exception e)\n {\n OpenTelemetrySdkEventSource.Log.SpanProcessorException(\"OnEnd\", e);\n }\n }\n }\n\n public override void OnStart(Activity activity)\n {\n foreach (var processor in this.processors)\n {\n try\n {\n processor.OnStart(activity);\n }\n catch (Exception e)\n {\n OpenTelemetrySdkEventSource.Log.SpanProcessorException(\"OnStart\", e);\n }\n }\n }\n\n public override Task ShutdownAsync(CancellationToken cancellationToken)\n {\n var tasks = new List();\n foreach (var processor in this.processors)\n {\n tasks.Add(processor.ShutdownAsync(cancellationToken));\n }\n\n return Task.WhenAll(tasks);\n }\n\n public override Task ForceFlushAsync(CancellationToken cancellationToken)\n {\n var tasks = new List(this.processors.Count());\n foreach (var processor in this.processors)\n {\n tasks.Add(processor.ForceFlushAsync(cancellationToken));\n }\n\n return Task.WhenAll(tasks);\n }\n\n public void Dispose()\n {\n this.Dispose(true);\n }\n\n protected virtual void Dispose(bool disposing)\n {\n try\n {\n this.ShutdownAsync(CancellationToken.None).GetAwaiter().GetResult();\n }\n catch (Exception ex)\n {\n OpenTelemetrySdkEventSource.Log.SpanProcessorException(nameof(this.Dispose), ex);\n }\n\n if (disposing && !this.isDisposed)\n {\n foreach (var processor in this.processors)\n {\n try\n {\n if (processor is IDisposable disposable)\n {\n disposable.Dispose();\n }\n }\n catch (Exception e)\n {\n OpenTelemetrySdkEventSource.Log.SpanProcessorException(\"Dispose\", e);\n }\n }\n\n this.isDisposed = true;\n }\n }\n }\n}\n","lang_cluster":"C#","diff_tag":0,"review_comment":"","id":68} {"diff_hunk":"@@ -6,18 +6,24 @@\n \/\/ Project Lead - Stuart Lodge, @slodge, me@slodge.com\n \n using System;\n+using System.Collections.Generic;\n+using System.Linq;\n using Windows.UI.Core;\n+using Windows.UI.Xaml;\n+using Windows.UI.Xaml.Controls;\n+using Windows.UI.Xaml.Media;\n using MvvmCross.Core.Navigation;\n using MvvmCross.Core.ViewModels;\n using MvvmCross.Core.Views;\n using MvvmCross.Platform;\n using MvvmCross.Platform.Exceptions;\n using MvvmCross.Platform.Platform;\n+using MvvmCross.Uwp.Attributes;\n \n namespace MvvmCross.Uwp.Views\n {\n public class MvxWindowsViewPresenter\n- : MvxViewPresenter, IMvxWindowsViewPresenter\n+ : MvxViewPresenter, IMvxWindowsViewPresenter, IMvxAttributeViewPresenter\n {\n protected readonly IMvxWindowsFrame _rootFrame;\n ","source_code":"\ufeff\/\/ MvxStoreViewPresenter.cs\n\n\/\/ MvvmCross is licensed using Microsoft Public License (Ms-PL)\n\/\/ Contributions and inspirations noted in readme.md and license.txt\n\/\/ \n\/\/ Project Lead - Stuart Lodge, @slodge, me@slodge.com\n\nusing System;\nusing Windows.UI.Core;\nusing MvvmCross.Core.Navigation;\nusing MvvmCross.Core.ViewModels;\nusing MvvmCross.Core.Views;\nusing MvvmCross.Platform;\nusing MvvmCross.Platform.Exceptions;\nusing MvvmCross.Platform.Platform;\n\nnamespace MvvmCross.Uwp.Views\n{\n public class MvxWindowsViewPresenter\n : MvxViewPresenter, IMvxWindowsViewPresenter\n {\n protected readonly IMvxWindowsFrame _rootFrame;\n\n public MvxWindowsViewPresenter(IMvxWindowsFrame rootFrame)\n {\n _rootFrame = rootFrame;\n\n SystemNavigationManager.GetForCurrentView().BackRequested += BackButtonOnBackRequested;\n }\n\n private IMvxViewModelTypeFinder _viewModelTypeFinder;\n public IMvxViewModelTypeFinder ViewModelTypeFinder\n {\n get\n {\n if (_viewModelTypeFinder == null)\n _viewModelTypeFinder = Mvx.Resolve();\n return _viewModelTypeFinder;\n }\n set\n {\n _viewModelTypeFinder = value;\n }\n }\n\n private IMvxViewsContainer _viewsContainer;\n public IMvxViewsContainer ViewsContainer\n {\n get\n {\n if (_viewsContainer == null)\n _viewsContainer = Mvx.Resolve();\n return _viewsContainer;\n }\n set\n {\n _viewsContainer = value;\n }\n }\n\n protected virtual async void BackButtonOnBackRequested(object sender, BackRequestedEventArgs backRequestedEventArgs)\n {\n if (backRequestedEventArgs.Handled)\n return;\n\n var currentView = _rootFrame.Content as IMvxView;\n if (currentView == null)\n {\n Mvx.Warning(\"Ignoring close for viewmodel - rootframe has no current page\");\n return;\n }\n\n var navigationService = Mvx.Resolve();\n\r backRequestedEventArgs.Handled = await navigationService.Close(currentView.ViewModel);\n }\n\n public override void Show(MvxViewModelRequest request)\n {\n try\n {\n var requestText = GetRequestText(request);\n var viewsContainer = Mvx.Resolve();\n var viewType = viewsContainer.GetViewType(request.ViewModelType);\n\n _rootFrame.Navigate(viewType, requestText); \/\/Frame won't allow serialization of it's nav-state if it gets a non-simple type as a nav param\n\n HandleBackButtonVisibility();\n }\n catch (Exception exception)\n {\n MvxTrace.Trace(\"Error seen during navigation request to {0} - error {1}\", request.ViewModelType.Name,\n exception.ToLongString());\n }\n }\n\n protected virtual string GetRequestText(MvxViewModelRequest request)\n {\n var requestTranslator = Mvx.Resolve();\n string requestText = string.Empty;\n if (request is MvxViewModelInstanceRequest)\n {\n requestText = requestTranslator.GetRequestTextWithKeyFor(((MvxViewModelInstanceRequest)request).ViewModelInstance);\n }\n else\n {\n requestText = requestTranslator.GetRequestTextFor(request);\n }\n\n return requestText;\n }\n\n public override void ChangePresentation(MvxPresentationHint hint)\n {\n if (HandlePresentationChange(hint)) return;\n\n if (hint is MvxClosePresentationHint)\n {\n Close((hint as MvxClosePresentationHint).ViewModelToClose);\n return;\n }\n\n MvxTrace.Warning(\"Hint ignored {0}\", hint.GetType().Name);\n }\n\n public override void Close(IMvxViewModel viewModel)\n {\n var currentView = _rootFrame.Content as IMvxView;\n if (currentView == null)\n {\n Mvx.Warning(\"Ignoring close for viewmodel - rootframe has no current page\");\n return;\n }\n\n if (currentView.ViewModel != viewModel)\n {\n Mvx.Warning(\"Ignoring close for viewmodel - rootframe's current page is not the view for the requested viewmodel\");\n return;\n }\n\n if (!_rootFrame.CanGoBack)\n {\n Mvx.Warning(\"Ignoring close for viewmodel - rootframe refuses to go back\");\n return;\n }\n\n _rootFrame.GoBack();\n\n HandleBackButtonVisibility();\n }\n\n protected virtual void HandleBackButtonVisibility()\n {\n SystemNavigationManager.GetForCurrentView().AppViewBackButtonVisibility =\n _rootFrame.CanGoBack ? AppViewBackButtonVisibility.Visible : AppViewBackButtonVisibility.Collapsed;\n }\n }\n}","lang_cluster":"C#","diff_tag":0,"review_comment":"","id":69} {"diff_hunk":"@@ -120,7 +120,7 @@ namespace MvvmCross.Plugins.PictureChooser.iOS\n }\n \n _picker.DismissViewController(true, () => { });\n- _modalHost.NativeModalViewControllerDisappearedOnItsOwn();\n+ _viewPresenter.CloseModalViewControllers();\n }\n \n private void Picker_FinishedPickingMedia(object sender, UIImagePickerMediaPickedEventArgs e)","source_code":"\ufeff\/\/ MvxImagePickerTask.cs\n\/\/ (c) Copyright Cirrious Ltd. http:\/\/www.cirrious.com\n\/\/ MvvmCross is licensed using Microsoft Public License (Ms-PL)\n\/\/ Contributions and inspirations noted in readme.md and license.txt\n\/\/\n\/\/ Project Lead - Stuart Lodge, @slodge, me@slodge.com\n\nusing System;\nusing System.IO;\nusing System.Runtime.InteropServices;\nusing System.Threading.Tasks;\nusing CoreGraphics;\nusing Foundation;\nusing MvvmCross.Platform;\nusing MvvmCross.Platform.iOS.Platform;\nusing MvvmCross.Platform.iOS.Views;\nusing MvvmCross.Platform.Logging;\nusing UIKit;\n\nnamespace MvvmCross.Plugins.PictureChooser.iOS\n{\n [MvvmCross.Platform.Preserve(AllMembers = true)]\n\tpublic class MvxImagePickerTask\n : MvxIosTask, IMvxPictureChooserTask\n {\n private readonly UIImagePickerController _picker;\n private readonly IMvxIosModalHost _modalHost;\n private bool _currentlyActive;\n private int _maxPixelDimension;\n private int _percentQuality;\n private Action _pictureAvailable;\n private Action _assumeCancelled;\n\n public MvxImagePickerTask()\n {\n _modalHost = Mvx.Resolve();\n _picker = new UIImagePickerController\n {\n \/\/CameraCaptureMode = UIImagePickerControllerCameraCaptureMode.Photo,\n \/\/CameraDevice = UIImagePickerControllerCameraDevice.Front\n };\n _picker.FinishedPickingMedia += Picker_FinishedPickingMedia;\n _picker.FinishedPickingImage += Picker_FinishedPickingImage;\n _picker.Canceled += Picker_Canceled;\n }\n\n public void ChoosePictureFromLibrary(int maxPixelDimension, int percentQuality, Action pictureAvailable,\n Action assumeCancelled)\n {\n _picker.SourceType = UIImagePickerControllerSourceType.PhotoLibrary;\n ChoosePictureCommon(maxPixelDimension, percentQuality, pictureAvailable, assumeCancelled);\n }\n\n public void ChoosePictureFromLibrary(int maxPixelDimension, int percentQuality, Action pictureAvailable,\n Action assumeCancelled)\n {\n ChoosePictureFromLibrary(maxPixelDimension, percentQuality, (stream, name) => pictureAvailable(stream), assumeCancelled);\n }\n\n public void TakePicture(int maxPixelDimension, int percentQuality, Action pictureAvailable,\n Action assumeCancelled)\n {\n _picker.SourceType = UIImagePickerControllerSourceType.Camera;\n ChoosePictureCommon(maxPixelDimension, percentQuality, (stream, name) => pictureAvailable(stream), assumeCancelled);\n }\n\n public Task ChoosePictureFromLibrary(int maxPixelDimension, int percentQuality)\n {\n var task = new TaskCompletionSource();\n ChoosePictureFromLibrary(maxPixelDimension, percentQuality, task.SetResult, () => task.SetResult(null));\n return task.Task;\n }\n\n public Task TakePicture(int maxPixelDimension, int percentQuality)\n {\n var task = new TaskCompletionSource();\n TakePicture(maxPixelDimension, percentQuality, task.SetResult, () => task.SetResult(null));\n return task.Task;\n }\n\n public void ContinueFileOpenPicker(object args)\n {\n }\n\n private void ChoosePictureCommon(int maxPixelDimension, int percentQuality,\n Action pictureAvailable, Action assumeCancelled)\n {\n SetCurrentlyActive();\n _maxPixelDimension = maxPixelDimension;\n _percentQuality = percentQuality;\n _pictureAvailable = pictureAvailable;\n _assumeCancelled = assumeCancelled;\n\n _modalHost.PresentModalViewController(_picker, true);\n }\n\n private void HandleImagePick(UIImage image, string name)\n {\n ClearCurrentlyActive();\n if (image != null)\n {\n if (_maxPixelDimension > 0 && (image.Size.Height > _maxPixelDimension || image.Size.Width > _maxPixelDimension))\n {\n \/\/ resize the image\n image = image.ImageToFitSize(new CGSize(_maxPixelDimension, _maxPixelDimension));\n }\n\n using (NSData data = image.AsJPEG(_percentQuality \/ 100f))\n {\n var byteArray = new byte[data.Length];\n Marshal.Copy(data.Bytes, byteArray, 0, Convert.ToInt32(data.Length));\n\n var imageStream = new MemoryStream(byteArray, false);\n _pictureAvailable?.Invoke(imageStream, name);\n }\n }\n else\n {\n _assumeCancelled?.Invoke();\n }\n\n _picker.DismissViewController(true, () => { });\n _modalHost.NativeModalViewControllerDisappearedOnItsOwn();\n }\n\n private void Picker_FinishedPickingMedia(object sender, UIImagePickerMediaPickedEventArgs e)\n {\n NSUrl referenceURL = e.Info[new NSString(\"UIImagePickerControllerReferenceURL\")] as NSUrl;\n var image = e.EditedImage ?? e.OriginalImage;\n HandleImagePick(image, referenceURL != null ? referenceURL.AbsoluteString : string.Empty);\n }\n\n private void Picker_FinishedPickingImage(object sender, UIImagePickerImagePickedEventArgs e)\n {\n NSUrl referenceURL = e.EditingInfo[\"UIImagePickerControllerReferenceURL\"] as NSUrl;\n var image = e.Image;\n HandleImagePick(image, referenceURL != null ? referenceURL.AbsoluteString : string.Empty);\n }\n\n private void Picker_Canceled(object sender, EventArgs e)\n {\n ClearCurrentlyActive();\n _assumeCancelled?.Invoke();\n _picker.DismissViewController(true, () => { });\n _modalHost.NativeModalViewControllerDisappearedOnItsOwn();\n }\n\n private void SetCurrentlyActive()\n {\n if (_currentlyActive)\n MvxPluginLog.Instance.Warn(\"MvxImagePickerTask called when task already active\");\n _currentlyActive = true;\n }\n\n private void ClearCurrentlyActive()\n {\n if (!_currentlyActive)\n MvxPluginLog.Instance.Warn(\"Tried to clear currently active - but already cleared\");\n _currentlyActive = false;\n }\n }\n}\n","lang_cluster":"C#","diff_tag":0,"review_comment":"","id":70} {"diff_hunk":"@@ -63,9 +63,26 @@ namespace OpenTelemetry.Instrumentation.SqlClient\n \/\/\/ \n \/\/\/ : the activity being enriched.<\/para>\n \/\/\/ string: the name of the event.<\/para>\n- \/\/\/ object: the raw object from which additional information can be extracted to enrich the activity.\n- \/\/\/ The type of this object depends on the event, which is given by the above parameter.<\/para>\n+ \/\/\/ object: the raw SqlCommand<\/c> object from which additional information can be extracted to enrich the activity.<\/para>\n+ \/\/\/ See also: example<\/a>.<\/para>\n \/\/\/ <\/remarks>\n+ \/\/\/ \n+ \/\/\/ \n+ \/\/\/ using var tracerProvider = Sdk.CreateTracerProviderBuilder()\n+ \/\/\/ .AddSqlClientInstrumentation(opt => opt.Enrich\n+ \/\/\/ = (activity, eventName, rawObject) =>\n+ \/\/\/ {\n+ \/\/\/ if (eventName.Equals(\"OnCustom\"))\n+ \/\/\/ {\n+ \/\/\/ if (rawObject is SqlCommand cmd)\n+ \/\/\/ {\n+ \/\/\/ activity.SetTag(\"db.commandTimeout\", cmd.CommandTimeout);\n+ \/\/\/ }\n+ \/\/\/ }\n+ \/\/\/ })\n+ \/\/\/ .Build();\n+ \/\/\/ <\/code>\n+ \/\/\/ <\/example>\n public Action Enrich { get; set; }\n \n internal static SqlConnectionDetails ParseDataSource(string dataSource)","source_code":"\/\/ \n\/\/ Copyright The OpenTelemetry Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/ <\/copyright>\n\nusing System;\nusing System.Collections.Concurrent;\nusing System.Data;\nusing System.Diagnostics;\nusing System.Text.RegularExpressions;\nusing OpenTelemetry.Trace;\n\nnamespace OpenTelemetry.Instrumentation.SqlClient\n{\n \/\/\/ \n \/\/\/ Options for .\n \/\/\/ <\/summary>\n public class SqlClientInstrumentationOptions\n {\n \/*\n * Match...\n * serverName\n * serverName[ ]\\\\[ ]instanceName\n * serverName[ ],[ ]port\n * serverName[ ]\\\\[ ]instanceName[ ],[ ]port\n * [ ] can be any number of white-space, SQL allows it for some reason.\n *\/\n private static readonly Regex DataSourceRegex = new Regex(\"^(.*?)\\\\s*(?:[\\\\\\\\,]|$)\\\\s*(.*?)\\\\s*(?:,|$)\\\\s*(.*)$\", RegexOptions.Compiled);\n private static readonly ConcurrentDictionary ConnectionDetailCache = new ConcurrentDictionary(StringComparer.OrdinalIgnoreCase);\n\n \/\/\/ \n \/\/\/ Gets or sets a value indicating whether or not the should add the names of commands as the tag. Default value: True.\n \/\/\/ <\/summary>\n public bool SetStoredProcedureCommandName { get; set; } = true;\n\n \/\/\/ \n \/\/\/ Gets or sets a value indicating whether or not the should add the text of commands as the tag. Default value: False.\n \/\/\/ <\/summary>\n public bool SetTextCommandContent { get; set; }\n\n \/\/\/ \n \/\/\/ Gets or sets a value indicating whether or not the should parse the DataSource on a SqlConnection into server name, instance name, and\/or port connection-level attribute tags. Default value: False.\n \/\/\/ <\/summary>\n \/\/\/ \n \/\/\/ The default behavior is to set the SqlConnection DataSource as the tag. If enabled, SqlConnection DataSource will be parsed and the server name will be sent as the or tag, the instance name will be sent as the tag, and the port will be sent as the tag if it is not 1433 (the default port).\n \/\/\/ <\/remarks>\n public bool EnableConnectionLevelAttributes { get; set; }\n\n \/\/\/ \n \/\/\/ Gets or sets an action to enrich an Activity.\n \/\/\/ <\/summary>\n \/\/\/ \n \/\/\/ : the activity being enriched.<\/para>\n \/\/\/ string: the name of the event.<\/para>\n \/\/\/ object: the raw object from which additional information can be extracted to enrich the activity.\n \/\/\/ The type of this object depends on the event, which is given by the above parameter.<\/para>\n \/\/\/ <\/remarks>\n public Action Enrich { get; set; }\n\n internal static SqlConnectionDetails ParseDataSource(string dataSource)\n {\n Match match = DataSourceRegex.Match(dataSource);\n\n string serverHostName = match.Groups[1].Value;\n string serverIpAddress = null;\n\n var uriHostNameType = Uri.CheckHostName(serverHostName);\n if (uriHostNameType == UriHostNameType.IPv4 || uriHostNameType == UriHostNameType.IPv6)\n {\n serverIpAddress = serverHostName;\n serverHostName = null;\n }\n\n string instanceName;\n string port;\n if (match.Groups[3].Length > 0)\n {\n instanceName = match.Groups[2].Value;\n port = match.Groups[3].Value;\n if (port == \"1433\")\n {\n port = null;\n }\n }\n else if (int.TryParse(match.Groups[2].Value, out int parsedPort))\n {\n port = parsedPort == 1433 ? null : match.Groups[2].Value;\n instanceName = null;\n }\n else\n {\n instanceName = match.Groups[2].Value;\n\n if (string.IsNullOrEmpty(instanceName))\n {\n instanceName = null;\n }\n\n port = null;\n }\n\n return new SqlConnectionDetails\n {\n ServerHostName = serverHostName,\n ServerIpAddress = serverIpAddress,\n InstanceName = instanceName,\n Port = port,\n };\n }\n\n internal void AddConnectionLevelDetailsToActivity(string dataSource, Activity sqlActivity)\n {\n if (!this.EnableConnectionLevelAttributes)\n {\n sqlActivity.SetTag(SemanticConventions.AttributePeerService, dataSource);\n }\n else\n {\n if (!ConnectionDetailCache.TryGetValue(dataSource, out SqlConnectionDetails connectionDetails))\n {\n connectionDetails = ParseDataSource(dataSource);\n ConnectionDetailCache.TryAdd(dataSource, connectionDetails);\n }\n\n if (!string.IsNullOrEmpty(connectionDetails.ServerHostName))\n {\n sqlActivity.SetTag(SemanticConventions.AttributeNetPeerName, connectionDetails.ServerHostName);\n }\n else\n {\n sqlActivity.SetTag(SemanticConventions.AttributeNetPeerIp, connectionDetails.ServerIpAddress);\n }\n\n if (!string.IsNullOrEmpty(connectionDetails.InstanceName))\n {\n sqlActivity.SetTag(SemanticConventions.AttributeDbMsSqlInstanceName, connectionDetails.InstanceName);\n }\n\n if (!string.IsNullOrEmpty(connectionDetails.Port))\n {\n sqlActivity.SetTag(SemanticConventions.AttributeNetPeerPort, connectionDetails.Port);\n }\n }\n }\n\n internal class SqlConnectionDetails\n {\n public string ServerHostName { get; set; }\n\n public string ServerIpAddress { get; set; }\n\n public string InstanceName { get; set; }\n\n public string Port { get; set; }\n }\n }\n}\n","lang_cluster":"C#","diff_tag":0,"review_comment":"","id":71} {"diff_hunk":"@@ -55,6 +55,12 @@ namespace OpenTelemetry.Instrumentation.AspNet\n \n if (propagationContext.Baggage != default)\n {\n+ \/\/ todo: RestoreActivityIfNeeded below compensates for\n+ \/\/ AsyncLocal Activity.Current being lost. Baggage\n+ \/\/ potentially will suffer from the same issue, but we can\u2019t\n+ \/\/ simply add it to context.Items because any change results\n+ \/\/ in a new instance. Probably need to save it at the end of\n+ \/\/ each OnExecuteRequestStep.\n Baggage.Current = propagationContext.Baggage;\n }\n ","source_code":"\/\/ \n\/\/ Copyright The OpenTelemetry Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/ <\/copyright>\n\nusing System;\nusing System.Collections;\nusing System.Collections.Generic;\nusing System.Diagnostics;\nusing System.Web;\nusing OpenTelemetry.Context.Propagation;\n\nnamespace OpenTelemetry.Instrumentation.AspNet\n{\n \/\/\/ \n \/\/\/ Activity helper class.\n \/\/\/ <\/summary>\n internal static class ActivityHelper\n {\n \/\/\/ \n \/\/\/ Key to store the activity in HttpContext.\n \/\/\/ <\/summary>\n public const string ActivityKey = \"__AspnetActivity__\";\n\n private static readonly ActivitySource AspNetSource = new ActivitySource(TelemetryHttpModule.AspNetSourceName);\n private static readonly Func> HttpRequestHeaderValuesGetter = (request, name) => request.Headers.GetValues(name);\n\n \/\/\/ \n \/\/\/ Creates root (first level) activity that describes incoming request.\n \/\/\/ <\/summary>\n \/\/\/ .<\/param>\n \/\/\/ Current HttpContext.<\/param>\n \/\/\/ Callback action.<\/param>\n \/\/\/ New root activity.<\/returns>\n public static Activity StartAspNetActivity(TextMapPropagator textMapPropagator, HttpContext context, Action onRequestStartedCallback)\n {\n PropagationContext propagationContext = textMapPropagator.Extract(default, context.Request, HttpRequestHeaderValuesGetter);\n\n Activity activity = AspNetSource.CreateActivity(TelemetryHttpModule.AspNetActivityName, ActivityKind.Server, propagationContext.ActivityContext);\n\n if (activity != null)\n {\n context.Items[ActivityKey] = activity;\n\n if (propagationContext.Baggage != default)\n {\n Baggage.Current = propagationContext.Baggage;\n }\n\n try\n {\n onRequestStartedCallback?.Invoke(activity, context);\n }\n catch (Exception callbackEx)\n {\n AspNetTelemetryEventSource.Log.CallbackException(activity, \"OnStarted\", callbackEx);\n }\n\n AspNetTelemetryEventSource.Log.ActivityStarted(activity);\n }\n\n return activity;\n }\n\n \/\/\/ \n \/\/\/ Stops the activity and notifies listeners about it.\n \/\/\/ <\/summary>\n \/\/\/ Current HttpContext.<\/param>\n \/\/\/ Callback action.<\/param>\n public static void StopAspNetActivity(HttpContext context, Action onRequestStoppedCallback)\n {\n var contextItems = context.Items;\n var currentActivity = Activity.Current;\n Activity aspNetActivity = (Activity)contextItems[ActivityKey];\n\n if (currentActivity != aspNetActivity)\n {\n Activity.Current = aspNetActivity;\n }\n\n if (aspNetActivity != null)\n {\n aspNetActivity.Stop();\n contextItems[ActivityKey] = null;\n\n try\n {\n onRequestStoppedCallback?.Invoke(aspNetActivity, context);\n }\n catch (Exception callbackEx)\n {\n AspNetTelemetryEventSource.Log.CallbackException(aspNetActivity, \"OnStopped\", callbackEx);\n }\n\n AspNetTelemetryEventSource.Log.ActivityStopped(currentActivity);\n }\n\n if (currentActivity != aspNetActivity)\n {\n Activity.Current = currentActivity;\n }\n }\n\n public static void WriteActivityException(IDictionary contextItems, Exception exception, Action onExceptionCallback)\n {\n Activity aspNetActivity = (Activity)contextItems[ActivityKey];\n\n if (aspNetActivity != null)\n {\n try\n {\n onExceptionCallback?.Invoke(aspNetActivity, exception);\n }\n catch (Exception callbackEx)\n {\n AspNetTelemetryEventSource.Log.CallbackException(aspNetActivity, \"OnException\", callbackEx);\n }\n\n AspNetTelemetryEventSource.Log.ActivityException(aspNetActivity, exception);\n }\n }\n\n \/\/\/ \n \/\/\/ It's possible that a request is executed in both native threads and managed threads,\n \/\/\/ in such case Activity.Current will be lost during native thread and managed thread switch.\n \/\/\/ This method is intended to restore the current activity in order to correlate the child\n \/\/\/ activities with the root activity of the request.\n \/\/\/ <\/summary>\n \/\/\/ HttpContext.Items dictionary.<\/param>\n internal static void RestoreActivityIfNeeded(IDictionary contextItems)\n {\n if (Activity.Current == null)\n {\n Activity aspNetActivity = (Activity)contextItems[ActivityKey];\n if (aspNetActivity != null)\n {\n Activity.Current = aspNetActivity;\n AspNetTelemetryEventSource.Log.ActivityRestored(aspNetActivity);\n }\n }\n }\n }\n}\n","lang_cluster":"C#","diff_tag":0,"review_comment":"","id":72} {"diff_hunk":"@@ -55,14 +55,14 @@ namespace OpenTelemetry.Trace.Test\n [Fact]\n public void ThrowsInExporter()\n {\n- this.activityExporter = new TestActivityExporter(_ => throw new ArgumentException(\"123\"));\n- this.openTelemetry = Sdk.CreateTracerProvider(b => b\n- .AddActivitySource(\"cijo\")\n- .AddProcessorPipeline(p => p\n- .SetExporter(this.activityExporter)\n- .SetExportingProcessor(e => new SimpleActivityProcessor(e))));\n-\n- ActivitySource source = new ActivitySource(\"cijo\");\n+ var activityExporter = new TestActivityExporter(_ => throw new ArgumentException(\"123\"));\n+ using var openTelemetry = Sdk.CreateTracerProviderBuilder()\n+ .AddSource(\"random\")\n+ .SetSampler(new AlwaysOnSampler())\n+ .AddProcessor(new SimpleActivityProcessor(activityExporter))\n+ .Build();\n+\n+ ActivitySource source = new ActivitySource(\"random\");\n var activity = source.StartActivity(\"somename\");\n \n \/\/ does not throw","source_code":"\ufeff\/\/ \n\/\/ Copyright The OpenTelemetry Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/ <\/copyright>\n\nusing System;\nusing System.Diagnostics;\nusing System.Threading;\nusing System.Threading.Tasks;\nusing OpenTelemetry.Testing.Export;\nusing OpenTelemetry.Trace.Samplers;\nusing Xunit;\n\nnamespace OpenTelemetry.Trace.Test\n{\n public class SimpleActivityProcessorTest : IDisposable\n {\n private const string SpanName1 = \"MySpanName\/1\";\n private const string SpanName2 = \"MySpanName\/2\";\n private const string ActivitySourceName = \"defaultactivitysource\";\n\n private TestActivityExporter activityExporter;\n private TracerProvider openTelemetry;\n private ActivitySource activitySource;\n\n public SimpleActivityProcessorTest()\n {\n this.activityExporter = new TestActivityExporter(null);\n this.openTelemetry = Sdk.CreateTracerProvider(b => b\n .AddActivitySource(ActivitySourceName)\n .AddProcessorPipeline(p => p\n .SetExporter(this.activityExporter)\n .SetExportingProcessor(e => new SimpleActivityProcessor(e)))\n .SetSampler(new AlwaysOnSampler()));\n this.activitySource = new ActivitySource(ActivitySourceName);\n }\n\n [Fact]\n public void ThrowsOnNullExporter()\n {\n Assert.Throws(() => new SimpleActivityProcessor(null));\n }\n\n [Fact]\n public void ThrowsInExporter()\n {\n this.activityExporter = new TestActivityExporter(_ => throw new ArgumentException(\"123\"));\n this.openTelemetry = Sdk.CreateTracerProvider(b => b\n .AddActivitySource(\"cijo\")\n .AddProcessorPipeline(p => p\n .SetExporter(this.activityExporter)\n .SetExportingProcessor(e => new SimpleActivityProcessor(e))));\n\n ActivitySource source = new ActivitySource(\"cijo\");\n var activity = source.StartActivity(\"somename\");\n\n \/\/ does not throw\n activity.Stop();\n }\n\n [Fact]\n public void ProcessorDoesNotBlockOnExporter()\n {\n this.activityExporter = new TestActivityExporter(async _ => await Task.Delay(500));\n this.openTelemetry = Sdk.CreateTracerProvider(b => b\n .AddActivitySource(\"cijo\")\n .AddProcessorPipeline(p => p\n .SetExporter(this.activityExporter)\n .SetExportingProcessor(e => new SimpleActivityProcessor(e))));\n\n ActivitySource source = new ActivitySource(\"cijo\");\n var activity = source.StartActivity(\"somename\");\n\n \/\/ does not block\n var sw = Stopwatch.StartNew();\n activity.Stop();\n sw.Stop();\n\n Assert.InRange(sw.Elapsed, TimeSpan.Zero, TimeSpan.FromMilliseconds(100));\n\n var exported = this.WaitForSpans(this.activityExporter, 1, TimeSpan.FromMilliseconds(600));\n\n Assert.Single(exported);\n }\n\n [Fact]\n public async Task ShutdownTwice()\n {\n var activityProcessor = new SimpleActivityProcessor(new TestActivityExporter(null));\n\n await activityProcessor.ShutdownAsync(CancellationToken.None).ConfigureAwait(false);\n\n \/\/ does not throw\n await activityProcessor.ShutdownAsync(CancellationToken.None).ConfigureAwait(false);\n }\n\n [Fact]\n public async Task ForceFlushReturnsCompletedTask()\n {\n var activityProcessor = new SimpleActivityProcessor(new TestActivityExporter(null));\n\n var forceFlushTask = activityProcessor.ForceFlushAsync(CancellationToken.None);\n await forceFlushTask;\n\n Assert.True(forceFlushTask.IsCompleted);\n }\n\n [Fact]\n public void ExportDifferentSampledSpans()\n {\n var span1 = this.CreateSampledEndedSpan(SpanName1);\n var span2 = this.CreateSampledEndedSpan(SpanName2);\n\n var exported = this.WaitForSpans(this.activityExporter, 2, TimeSpan.FromMilliseconds(100));\n Assert.Equal(2, exported.Length);\n Assert.Contains(span1, exported);\n Assert.Contains(span2, exported);\n }\n\n [Fact(Skip = \"Reenable once AlwaysParentSampler is added\")]\n public void ExportNotSampledSpans()\n {\n var span1 = this.CreateNotSampledEndedSpan(SpanName1);\n var span2 = this.CreateSampledEndedSpan(SpanName2);\n\n \/\/ Spans are recorded and exported in the same order as they are ended, we test that a non\n \/\/ sampled span is not exported by creating and ending a sampled span after a non sampled span\n \/\/ and checking that the first exported span is the sampled span (the non sampled did not get\n \/\/ exported).\n\n var exported = this.WaitForSpans(this.activityExporter, 1, TimeSpan.FromMilliseconds(100));\n\n \/\/ Need to check this because otherwise the variable span1 is unused, other option is to not\n \/\/ have a span1 variable.\n Assert.Single(exported);\n Assert.Contains(span2, exported);\n }\n\n public void Dispose()\n {\n this.activityExporter.ShutdownAsync(CancellationToken.None);\n Activity.Current = null;\n }\n\n private Activity CreateSampledEndedSpan(string spanName)\n {\n var context = new ActivityContext(ActivityTraceId.CreateRandom(), ActivitySpanId.CreateRandom(), ActivityTraceFlags.Recorded);\n\n var activity = this.activitySource.StartActivity(spanName, ActivityKind.Internal, context);\n activity.Stop();\n return activity;\n }\n\n private Activity CreateNotSampledEndedSpan(string spanName)\n {\n var context = new ActivityContext(ActivityTraceId.CreateRandom(), ActivitySpanId.CreateRandom(), ActivityTraceFlags.None);\n var activity = this.activitySource.StartActivity(spanName, ActivityKind.Internal, context);\n activity.Stop();\n return activity;\n }\n\n private Activity[] WaitForSpans(TestActivityExporter exporter, int spanCount, TimeSpan timeout)\n {\n Assert.True(\n SpinWait.SpinUntil(\n () =>\n {\n Thread.Sleep(0);\n return exporter.ExportedActivities.Length >= spanCount;\n }, timeout + TimeSpan.FromMilliseconds(20)));\n\n return exporter.ExportedActivities;\n }\n }\n}\n","lang_cluster":"C#","diff_tag":0,"review_comment":"","id":73} {"diff_hunk":"@@ -96,5 +96,14 @@ namespace SampleApp\n \n return hostBuilder.Build().RunAsync();\n }\n+\n+ private static void ShowConfig(IConfiguration config)\n+ {\n+ foreach (var pair in config.GetChildren())\n+ {\n+ Console.WriteLine($\"{pair.Path} - {pair.Value}\");\n+ ShowConfig(pair);\n+ }\n+ }\n }\n }","source_code":"\/\/ Copyright (c) .NET Foundation. All rights reserved.\n\/\/ Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.\n\nusing System;\nusing System.Diagnostics;\nusing System.Globalization;\nusing System.IO;\nusing System.Net;\nusing System.Threading.Tasks;\nusing Microsoft.AspNetCore.Builder;\nusing Microsoft.AspNetCore.Hosting;\nusing Microsoft.AspNetCore.Http;\nusing Microsoft.AspNetCore.Server.Kestrel.Transport.Abstractions.Internal;\nusing Microsoft.Extensions.Configuration;\nusing Microsoft.Extensions.Logging;\n\nnamespace SampleApp\n{\n public class Startup\n {\n public void Configure(IApplicationBuilder app, ILoggerFactory loggerFactory)\n {\n var logger = loggerFactory.CreateLogger(\"Default\");\n\n app.Run(async context =>\n {\n var connectionFeature = context.Connection;\n logger.LogDebug($\"Peer: {connectionFeature.RemoteIpAddress?.ToString()}:{connectionFeature.RemotePort}\"\n + $\"{Environment.NewLine}\"\n + $\"Sock: {connectionFeature.LocalIpAddress?.ToString()}:{connectionFeature.LocalPort}\");\n\n var response = $\"hello, world{Environment.NewLine}\";\n context.Response.ContentLength = response.Length;\n context.Response.ContentType = \"text\/plain\";\n await context.Response.WriteAsync(response);\n });\n }\n\n public static Task Main(string[] args)\n {\n TaskScheduler.UnobservedTaskException += (sender, e) =>\n {\n Console.WriteLine(\"Unobserved exception: {0}\", e.Exception);\n };\n\n var hostBuilder = new WebHostBuilder()\n .ConfigureLogging((_, factory) =>\n {\n factory.AddConsole();\n })\n .UseKestrel((context, options) =>\n {\n var basePort = context.Configuration.GetValue(\"BASE_PORT\") ?? 5000;\n\n \/\/ Run callbacks on the transport thread\n options.ApplicationSchedulingMode = SchedulingMode.Inline;\n\n options.Listen(IPAddress.Loopback, basePort, listenOptions =>\n {\n \/\/ Uncomment the following to enable Nagle's algorithm for this endpoint.\n \/\/listenOptions.NoDelay = false;\n\n listenOptions.UseConnectionLogging();\n });\n\n options.Listen(IPAddress.Loopback, basePort + 1, listenOptions =>\n {\n listenOptions.UseHttps(\"testCert.pfx\", \"testPassword\");\n listenOptions.UseConnectionLogging();\n });\n\n options.ListenLocalhost(basePort + 2, listenOptions =>\n {\n listenOptions.UseHttps(\"testCert.pfx\", \"testPassword\");\n });\n\n options.ListenAnyIP(basePort + 3);\n\n options.UseSystemd();\n\n \/\/ The following section should be used to demo sockets\n \/\/options.ListenUnixSocket(\"\/tmp\/kestrel-test.sock\");\n })\n .UseContentRoot(Directory.GetCurrentDirectory())\n .UseStartup();\n\n if (string.Equals(Process.GetCurrentProcess().Id.ToString(), Environment.GetEnvironmentVariable(\"LISTEN_PID\")))\n {\n \/\/ Use libuv if activated by systemd, since that's currently the only transport that supports being passed a socket handle.\n hostBuilder.UseLibuv(options =>\n {\n \/\/ Uncomment the following line to change the default number of libuv threads for all endpoints.\n \/\/ options.ThreadCount = 4;\n });\n }\n \n return hostBuilder.Build().RunAsync();\n }\n }\n}","lang_cluster":"C#","diff_tag":0,"review_comment":"","id":74} {"diff_hunk":"@@ -50,7 +50,7 @@ namespace System.Drawing.Tests\n {\n try\n {\n- return new [] \n+ return new []\n {\n new ImageTestData(ImageFormat.Bmp),\n new ImageTestData(ImageFormat.Jpeg),","source_code":"\ufeff\/\/ Licensed to the .NET Foundation under one or more agreements.\n\/\/ The .NET Foundation licenses this file to you under the MIT license.\n\/\/ See the LICENSE file in the project root for more information.\n\nusing System.Collections.Generic;\nusing System.Drawing.Imaging;\nusing System.IO;\nusing System.Runtime.InteropServices;\nusing BenchmarkDotNet.Attributes;\nusing MicroBenchmarks;\n\nnamespace System.Drawing.Tests\n{\n [BenchmarkCategory(Categories.Libraries)]\n public class Perf_Image_Load\n {\n \/\/ this field is lazy to avoid the exception during static ctor initialization of this type (harder to catch and handle properly)\n private static readonly Lazy LazyTestCases = new Lazy(CreateTestCases);\n\n public IEnumerable ImageFormats() => LazyTestCases.Value;\n\n [Benchmark]\n [ArgumentsSource(nameof(ImageFormats))]\n public void Bitmap_FromStream(ImageTestData format)\n {\n using (new Bitmap(format.Stream))\n {\n }\n }\n\n [Benchmark]\n [ArgumentsSource(nameof(ImageFormats))]\n public void Image_FromStream(ImageTestData format)\n {\n using (Image.FromStream(format.Stream))\n {\n }\n }\n\n [Benchmark]\n [ArgumentsSource(nameof(ImageFormats))]\n public void Image_FromStream_NoValidation(ImageTestData format)\n {\n using (Image.FromStream(format.Stream, false, false))\n {\n }\n }\n\n private static ImageTestData[] CreateTestCases()\n {\n try\n {\n return new [] \n {\n new ImageTestData(ImageFormat.Bmp),\n new ImageTestData(ImageFormat.Jpeg),\n new ImageTestData(ImageFormat.Png),\n new ImageTestData(ImageFormat.Gif)\n };\n }\n catch (Exception) when (RuntimeInformation.IsOSPlatform(OSPlatform.Linux))\n {\n Console.ForegroundColor = ConsoleColor.Red;\n Console.WriteLine(\"libgdiplus is missing, you can install it by running 'apt-get install libgdiplus'\");\n Console.ResetColor();\n\n throw;\n }\n }\n\n public class ImageTestData\n {\n public Stream Stream { get; }\n private string FormatName { get; }\n\n public ImageTestData(ImageFormat format)\n {\n Stream = CreateTestImage(format);\n FormatName = format.ToString();\n }\n\n \/\/ the value returned by ToString is used in the text representation of Benchmark ID in our reporting system\n public override string ToString() => FormatName;\n\n private static Stream CreateTestImage(ImageFormat format)\n {\n Random r = new Random(1066); \/\/ the seed must not be changed\n\n const int Size = 1000;\n Point RandomPoint() => new Point(r.Next(Size), r.Next(Size));\n\n var result = new MemoryStream();\n\n using (Bitmap bitmap = new Bitmap(Size, Size))\n using (Pen pen = new Pen(Color.Blue))\n using (Graphics graphics = Graphics.FromImage(bitmap))\n {\n for (int i = 0; i < 100; i++)\n {\n graphics.DrawBezier(pen, RandomPoint(), RandomPoint(), RandomPoint(), RandomPoint());\n }\n\n bitmap.Save(result, format);\n }\n\n return result;\n }\n }\n }\n}\n","lang_cluster":"C#","diff_tag":0,"review_comment":"","id":75} {"diff_hunk":"@@ -152,6 +152,7 @@ namespace Microsoft.AspNetCore.Server.Kestrel.Core.Internal\n finally\n {\n CloseRawPipes();\n+ _adaptedPipelineTcs.TrySetResult(null);\n }\n }\n ","source_code":"\ufeff\/\/ Copyright (c) .NET Foundation. All rights reserved.\n\/\/ Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.\n\nusing System;\nusing System.Collections.Generic;\nusing System.IO;\nusing System.Threading.Tasks;\nusing Microsoft.AspNetCore.Server.Kestrel.Core.Adapter;\nusing Microsoft.AspNetCore.Server.Kestrel.Core.Adapter.Internal;\nusing Microsoft.AspNetCore.Server.Kestrel.Core.Internal.Http;\nusing Microsoft.AspNetCore.Server.Kestrel.Core.Internal.Infrastructure;\nusing Microsoft.AspNetCore.Server.Kestrel.Internal.System.IO.Pipelines;\nusing Microsoft.AspNetCore.Server.Kestrel.Transport.Abstractions;\nusing Microsoft.Extensions.Internal;\nusing Microsoft.Extensions.Logging;\n\nnamespace Microsoft.AspNetCore.Server.Kestrel.Core.Internal\n{\n public class FrameConnection : IConnectionContext\n {\n private readonly FrameConnectionContext _context;\n private readonly Frame _frame;\n private readonly List _connectionAdapters;\n private readonly TaskCompletionSource _frameStartedTcs = new TaskCompletionSource();\n\n private AdaptedPipeline _adaptedPipeline;\n private Stream _filteredStream;\n private Task _adaptedPipelineTask = TaskCache.CompletedTask;\n\n public FrameConnection(FrameConnectionContext context)\n {\n _context = context;\n _frame = context.Frame;\n _connectionAdapters = context.ConnectionAdapters;\n }\n\n public string ConnectionId => _context.ConnectionId;\n public IPipeWriter Input => _context.Input.Writer;\n public IPipeReader Output => _context.Output.Reader;\n\n private PipeFactory PipeFactory => _context.PipeFactory;\n\n \/\/ Internal for testing\n internal PipeOptions AdaptedPipeOptions => new PipeOptions\n {\n ReaderScheduler = InlineScheduler.Default,\n WriterScheduler = InlineScheduler.Default,\n MaximumSizeHigh = _context.ServiceContext.ServerOptions.Limits.MaxRequestBufferSize ?? 0,\n MaximumSizeLow = _context.ServiceContext.ServerOptions.Limits.MaxRequestBufferSize ?? 0\n };\n\n private IKestrelTrace Log => _context.ServiceContext.Log;\n\n public void StartRequestProcessing()\n {\n _frame.Input = _context.Input.Reader;\n _frame.Output = _context.OutputProducer;\n\n if (_connectionAdapters.Count == 0)\n {\n _frame.Start();\n _frameStartedTcs.SetResult(null);\n }\n else\n {\n \/\/ Ensure that IConnectionAdapter.OnConnectionAsync does not run on the transport thread.\n _context.ServiceContext.ThreadPool.UnsafeRun(state =>\n {\n \/\/ ApplyConnectionAdaptersAsync should never throw. If it succeeds, it will call _frame.Start().\n \/\/ Otherwise, it will close the connection.\n var ignore = ((FrameConnection)state).ApplyConnectionAdaptersAsync();\n }, this);\n }\n }\n\n public void OnConnectionClosed()\n {\n Log.ConnectionStop(ConnectionId);\n KestrelEventSource.Log.ConnectionStop(this);\n }\n\n public async Task StopAsync()\n {\n await _frameStartedTcs.Task;\n await _frame.StopAsync();\n await _adaptedPipelineTask;\n }\n\n public void Abort(Exception ex)\n {\n _frame.Abort(ex);\n }\n\n public void Timeout()\n {\n _frame.SetBadRequestState(RequestRejectionReason.RequestTimeout);\n }\n\n private async Task ApplyConnectionAdaptersAsync()\n {\n try\n {\n var rawSocketOutput = _frame.Output;\n var rawStream = new RawStream(_frame.Input, rawSocketOutput);\n var adapterContext = new ConnectionAdapterContext(rawStream);\n var adaptedConnections = new IAdaptedConnection[_connectionAdapters.Count];\n\n for (var i = 0; i < _connectionAdapters.Count; i++)\n {\n var adaptedConnection = await _connectionAdapters[i].OnConnectionAsync(adapterContext);\n adaptedConnections[i] = adaptedConnection;\n adapterContext = new ConnectionAdapterContext(adaptedConnection.ConnectionStream);\n }\n\n if (adapterContext.ConnectionStream != rawStream)\n {\n _filteredStream = adapterContext.ConnectionStream;\n _adaptedPipeline = new AdaptedPipeline(\n adapterContext.ConnectionStream,\n PipeFactory.Create(AdaptedPipeOptions),\n PipeFactory.Create(AdaptedPipeOptions));\n\n _frame.Input = _adaptedPipeline.Input.Reader;\n _frame.Output = _adaptedPipeline.Output;\n\n _adaptedPipelineTask = RunAdaptedPipeline();\n }\n\n _frame.AdaptedConnections = adaptedConnections;\n _frame.Start();\n _frameStartedTcs.SetResult(null);\n }\n catch (Exception ex)\n {\n Log.LogError(0, ex, $\"Uncaught exception from the {nameof(IConnectionAdapter.OnConnectionAsync)} method of an {nameof(IConnectionAdapter)}.\");\n _frameStartedTcs.SetResult(null);\n CloseRawPipes();\n }\n }\n\n private async Task RunAdaptedPipeline()\n {\n try\n {\n await _adaptedPipeline.RunAsync();\n }\n catch (Exception ex)\n {\n \/\/ adaptedPipeline.RunAsync() shouldn't throw.\n Log.LogError(0, ex, $\"{nameof(FrameConnection)}.{nameof(ApplyConnectionAdaptersAsync)}\");\n }\n finally\n {\n CloseRawPipes();\n }\n }\n\n private void CloseRawPipes()\n {\n _filteredStream?.Dispose();\n _context.OutputProducer.Dispose();\n _context.Input.Reader.Complete();\n }\n }\n}\n","lang_cluster":"C#","diff_tag":0,"review_comment":"","id":76} {"diff_hunk":"@@ -13,11 +13,13 @@\n \/\/ See the License for the specific language governing permissions and\n \/\/ limitations under the License.\n \/\/ <\/copyright>\n+using System;\n using System.Collections.Generic;\n using System.Linq;\n using System.Text;\n using System.Threading;\n using System.Threading.Tasks;\n+using OpenTelemetry.Resources;\n using Thrift.Protocols;\n using Thrift.Protocols.Entities;\n ","source_code":"\ufeff\/\/ \n\/\/ Copyright 2018, OpenTelemetry Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/ <\/copyright>\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\nusing System.Threading;\nusing System.Threading.Tasks;\nusing Thrift.Protocols;\nusing Thrift.Protocols.Entities;\n\nnamespace OpenTelemetry.Exporter.Jaeger.Implementation\n{\n public class Process : TAbstractBase\n {\n public Process()\n {\n }\n\n public Process(string serviceName, IDictionary processTags)\n : this()\n {\n this.ServiceName = serviceName;\n\n if (processTags != null)\n {\n this.Tags = new List();\n this.Tags.AddRange(processTags.Select(pt => pt.ToJaegerTag()));\n }\n }\n\n public string ServiceName { get; set; }\n\n public List Tags { get; set; }\n\n public async Task WriteAsync(TProtocol oprot, CancellationToken cancellationToken)\n {\n oprot.IncrementRecursionDepth();\n\n try\n {\n var struc = new TStruct(\"Process\");\n await oprot.WriteStructBeginAsync(struc, cancellationToken);\n\n var field = new TField\n {\n Name = \"serviceName\",\n Type = TType.String,\n ID = 1,\n };\n\n await oprot.WriteFieldBeginAsync(field, cancellationToken);\n await oprot.WriteStringAsync(this.ServiceName, cancellationToken);\n await oprot.WriteFieldEndAsync(cancellationToken);\n\n if (this.Tags != null)\n {\n field.Name = \"tags\";\n field.Type = TType.List;\n field.ID = 2;\n\n await oprot.WriteFieldBeginAsync(field, cancellationToken);\n {\n await oprot.WriteListBeginAsync(new TList(TType.Struct, this.Tags.Count), cancellationToken);\n\n foreach (JaegerTag jt in this.Tags)\n {\n await jt.WriteAsync(oprot, cancellationToken);\n }\n\n await oprot.WriteListEndAsync(cancellationToken);\n }\n\n await oprot.WriteFieldEndAsync(cancellationToken);\n }\n\n await oprot.WriteFieldStopAsync(cancellationToken);\n await oprot.WriteStructEndAsync(cancellationToken);\n }\n finally\n {\n oprot.DecrementRecursionDepth();\n }\n }\n\n public override string ToString()\n {\n var sb = new StringBuilder(\"Process(\");\n sb.Append(\", ServiceName: \");\n sb.Append(this.ServiceName);\n\n if (this.Tags != null)\n {\n sb.Append(\", Tags: \");\n sb.Append(this.Tags);\n }\n\n sb.Append(\")\");\n return sb.ToString();\n }\n }\n}\n","lang_cluster":"C#","diff_tag":0,"review_comment":"","id":77} {"diff_hunk":"@@ -69,7 +69,7 @@ namespace Nethermind.Runner.Ethereum.Steps\n \n var validator = new AuRaValidatorProcessorFactory(\n readOnlyTxProcessingEnv.StateProvider,\n- new AbiEncoder(),\n+ _context.AbiEncoder,\n readOnlyTxProcessingEnv.TransactionProcessor,\n new ReadOnlyTransactionProcessorSource(readOnlyTxProcessingEnv),\n readOnlyTxProcessingEnv.BlockTree,","source_code":"\ufeff\/\/ Copyright (c) 2018 Demerzel Solutions Limited\n\/\/ This file is part of the Nethermind library.\n\/\/ \n\/\/ The Nethermind library is free software: you can redistribute it and\/or modify\n\/\/ it under the terms of the GNU Lesser General Public License as published by\n\/\/ the Free Software Foundation, either version 3 of the License, or\n\/\/ (at your option) any later version.\n\/\/ \n\/\/ The Nethermind library is distributed in the hope that it will be useful,\n\/\/ but WITHOUT ANY WARRANTY; without even the implied warranty of\n\/\/ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\/\/ GNU Lesser General Public License for more details.\n\/\/ \n\/\/ You should have received a copy of the GNU Lesser General Public License\n\/\/ along with the Nethermind. If not, see .\n\nusing Nethermind.Abi;\nusing Nethermind.Blockchain;\nusing Nethermind.Blockchain.Processing;\nusing Nethermind.Consensus.AuRa;\nusing Nethermind.Consensus.AuRa.Config;\nusing Nethermind.Db;\nusing Nethermind.Logging;\nusing Nethermind.Runner.Ethereum.Context;\nusing Nethermind.Store;\n\nnamespace Nethermind.Runner.Ethereum.Steps\n{\n [RunnerStepDependencies(typeof(InitializeNetwork), typeof(InitializeFinalizationAuRa), typeof(SetupKeyStore))]\n public class StartBlockProducerAuRa : StartBlockProducer\n {\n private readonly AuRaEthereumRunnerContext _context;\n\n public StartBlockProducerAuRa(AuRaEthereumRunnerContext context) : base(context)\n {\n _context = context;\n }\n\n protected override void BuildProducer()\n {\n if (_context.NodeKey == null) throw new StepDependencyException(nameof(_context.NodeKey));\n if (_context.ChainSpec == null) throw new StepDependencyException(nameof(_context.ChainSpec));\n \n ILogger logger = _context.LogManager.GetClassLogger();\n if (logger.IsWarn) logger.Warn(\"Starting AuRa block producer & sealer\");\n \n IAuRaStepCalculator stepCalculator = new AuRaStepCalculator(_context.ChainSpec.AuRa.StepDuration, _context.Timestamper);\n BlockProducerContext producerContext = GetProducerChain();\n var auraConfig = _context.Config();\n _context.BlockProducer = new AuRaBlockProducer(\n producerContext.PendingTxSelector,\n producerContext.ChainProcessor,\n producerContext.ReadOnlyStateProvider,\n _context.Sealer,\n _context.BlockTree,\n _context.BlockProcessingQueue,\n _context.Timestamper,\n _context.LogManager,\n stepCalculator,\n auraConfig,\n _context.NodeKey.Address);\n }\n\n protected override BlockProcessor CreateBlockProcessor(ReadOnlyTxProcessingEnv readOnlyTxProcessingEnv, IReadOnlyDbProvider readOnlyDbProvider)\n {\n if (_context.RewardCalculatorSource == null) throw new StepDependencyException(nameof(_context.RewardCalculatorSource));\n if (_context.ValidatorStore == null) throw new StepDependencyException(nameof(_context.ValidatorStore));\n if (_context.ChainSpec == null) throw new StepDependencyException(nameof(_context.ChainSpec));\n \n var validator = new AuRaValidatorProcessorFactory(\n readOnlyTxProcessingEnv.StateProvider,\n new AbiEncoder(),\n readOnlyTxProcessingEnv.TransactionProcessor,\n new ReadOnlyTransactionProcessorSource(readOnlyTxProcessingEnv),\n readOnlyTxProcessingEnv.BlockTree,\n _context.ReceiptStorage,\n _context.ValidatorStore,\n _context.LogManager)\n .CreateValidatorProcessor(_context.ChainSpec.AuRa.Validators);\n \n var blockProducer = new AuRaBlockProcessor(\n _context.SpecProvider,\n _context.BlockValidator,\n _context.RewardCalculatorSource.Get(readOnlyTxProcessingEnv.TransactionProcessor),\n readOnlyTxProcessingEnv.TransactionProcessor,\n readOnlyDbProvider.StateDb,\n readOnlyDbProvider.CodeDb,\n readOnlyTxProcessingEnv.StateProvider,\n readOnlyTxProcessingEnv.StorageProvider,\n _context.TxPool,\n _context.ReceiptStorage,\n _context.LogManager, \n validator);\n \n validator.SetFinalizationManager(_context.FinalizationManager, true);\n\n return blockProducer;\n }\n }\n}","lang_cluster":"C#","diff_tag":0,"review_comment":"","id":78} {"diff_hunk":"@@ -117,8 +117,8 @@ namespace Nethermind.Consensus.AuRa.Transactions\n {\n public const int MaxCacheSize = 4096;\n \n- internal ICache<(Keccak ParentHash, Address Sender), ITransactionPermissionContract.TxPermissions?> Permissions { get; } =\n- new LruCache<(Keccak ParentHash, Address Sender), ITransactionPermissionContract.TxPermissions?>(MaxCacheSize, \"TxPermissions\");\n+ internal ICache<(Keccak ParentHash, Address Sender), (ITransactionPermissionContract.TxPermissions Permissions, bool ContractExists)> Permissions { get; } =\n+ new LruCache<(Keccak ParentHash, Address Sender), (ITransactionPermissionContract.TxPermissions Permissions, bool ContractExists)>(MaxCacheSize, \"TxPermissions\");\n }\n }\n }","source_code":"\ufeff\/\/ Copyright (c) 2021 Demerzel Solutions Limited\n\/\/ This file is part of the Nethermind library.\n\/\/ \n\/\/ The Nethermind library is free software: you can redistribute it and\/or modify\n\/\/ it under the terms of the GNU Lesser General Public License as published by\n\/\/ the Free Software Foundation, either version 3 of the License, or\n\/\/ (at your option) any later version.\n\/\/ \n\/\/ The Nethermind library is distributed in the hope that it will be useful,\n\/\/ but WITHOUT ANY WARRANTY; without even the implied warranty of\n\/\/ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\/\/ GNU Lesser General Public License for more details.\n\/\/ \n\/\/ You should have received a copy of the GNU Lesser General Public License\n\/\/ along with the Nethermind. If not, see .\n\/\/ \n\nusing System;\nusing System.Diagnostics;\nusing Nethermind.Abi;\nusing Nethermind.Consensus.AuRa.Contracts;\nusing Nethermind.Consensus.Transactions;\nusing Nethermind.Core;\nusing Nethermind.Core.Caching;\nusing Nethermind.Core.Crypto;\nusing Nethermind.Logging;\nusing Nethermind.State;\n\nnamespace Nethermind.Consensus.AuRa.Transactions\n{\n public class PermissionBasedTxFilter : ITxFilter\n {\n private readonly VersionedContract _contract;\n private readonly Cache _cache;\n private readonly IReadOnlyStateProvider _stateProvider;\n private readonly ILogger _logger;\n\n public PermissionBasedTxFilter(\n VersionedContract contract,\n Cache cache,\n IReadOnlyStateProvider stateProvider,\n ILogManager logManager)\n {\n _contract = contract ?? throw new ArgumentNullException(nameof(contract));\n _cache = cache ?? throw new ArgumentNullException(nameof(cache));\n _stateProvider = stateProvider ?? throw new ArgumentNullException(nameof(stateProvider));\n _logger = logManager?.GetClassLogger() ?? throw new ArgumentNullException(nameof(logManager));\n }\n \n public (bool Allowed, string Reason) IsAllowed(Transaction tx, BlockHeader parentHeader)\n {\n if (parentHeader.Number + 1 < _contract.Activation)\n {\n return (true, string.Empty);\n }\n\n var txType = GetTxType(tx);\n var txPermissions = GetPermissions(tx, parentHeader);\n if (_logger.IsTrace) _logger.Trace($\"Given transaction: {tx.Hash} sender: {tx.SenderAddress} to: {tx.To} value: {tx.Value}, gas_price: {tx.GasPrice}. \" +\n $\"Permissions required: {txType}, got: {txPermissions}.\");\n return (txPermissions & txType) == txType ? (true, string.Empty) : (false, \"permission denied\");\n }\n\n private ITransactionPermissionContract.TxPermissions GetPermissions(Transaction tx, BlockHeader parentHeader)\n {\n var key = (parentHeader.Hash, tx.SenderAddress);\n var txCachedPermissions = _cache.Permissions.Get(key);\n return txCachedPermissions ?? GetPermissionsFromContract(tx, parentHeader, key);\n }\n\n private ITransactionPermissionContract.TxPermissions GetPermissionsFromContract(\n Transaction tx,\n BlockHeader parentHeader,\n in (Keccak Hash, Address SenderAddress) key)\n {\n ITransactionPermissionContract.TxPermissions txPermissions = ITransactionPermissionContract.TxPermissions.None;\n bool shouldCache = true;\n \n ITransactionPermissionContract versionedContract = GetVersionedContract(parentHeader);\n if (versionedContract is null)\n {\n if (_logger.IsError) _logger.Error(\"Unknown version of tx permissions contract is used.\");\n }\n else\n {\n if (_logger.IsTrace) _logger.Trace($\"Version of tx permission contract: {versionedContract.Version}.\");\n \n try\n {\n (txPermissions, shouldCache) = versionedContract.AllowedTxTypes(parentHeader, tx);\n }\n catch (AbiException e)\n {\n if (_logger.IsError) _logger.Error($\"Error calling tx permissions contract on {parentHeader.ToString(BlockHeader.Format.FullHashAndNumber)} for tx {tx.ToShortString()} {new StackTrace()}.\", e);\n }\n }\n\n if (shouldCache)\n {\n _cache.Permissions.Set(key, txPermissions);\n }\n\n return txPermissions;\n }\n\n private ITransactionPermissionContract? GetVersionedContract(BlockHeader blockHeader)\n => _contract.ResolveVersion(blockHeader);\n\n private ITransactionPermissionContract.TxPermissions GetTxType(Transaction tx) =>\n tx.IsContractCreation\n ? ITransactionPermissionContract.TxPermissions.Create\n : (_stateProvider.GetCode(tx.To) ?? Array.Empty()).Length != 0\n ? ITransactionPermissionContract.TxPermissions.Call\n : ITransactionPermissionContract.TxPermissions.Basic;\n \n public class Cache\n {\n public const int MaxCacheSize = 4096;\n \n internal ICache<(Keccak ParentHash, Address Sender), ITransactionPermissionContract.TxPermissions?> Permissions { get; } =\n new LruCache<(Keccak ParentHash, Address Sender), ITransactionPermissionContract.TxPermissions?>(MaxCacheSize, \"TxPermissions\");\n }\n }\n}\n","lang_cluster":"C#","diff_tag":0,"review_comment":"","id":79} {"diff_hunk":"@@ -38,7 +38,7 @@ namespace Datadog.Trace.ClrProfiler.IntegrationTests\n public class AspNetMvc4TestsCallTargetClassic : AspNetMvc4Tests\n {\n public AspNetMvc4TestsCallTargetClassic(IisFixture iisFixture, ITestOutputHelper output)\n- : base(iisFixture, output, enableCallTarget: true, classicMode: true)\n+ : base(iisFixture, output, enableCallTarget: true, classicMode: true, enableFeatureFlag: false)\n {\n }\n }","source_code":"\/\/ \n\/\/ Unless explicitly stated otherwise all files in this repository are licensed under the Apache 2 License.\n\/\/ This product includes software developed at Datadog (https:\/\/www.datadoghq.com\/). Copyright 2017 Datadog, Inc.\n\/\/ <\/copyright>\n\n#if NET461\n#pragma warning disable SA1402 \/\/ File may only contain a single class\n#pragma warning disable SA1649 \/\/ File name must match first type name\n\nusing System.Net;\nusing System.Threading.Tasks;\nusing Datadog.Trace.TestHelpers;\nusing Xunit;\nusing Xunit.Abstractions;\n\nnamespace Datadog.Trace.ClrProfiler.IntegrationTests\n{\n [CollectionDefinition(\"IisTests\", DisableParallelization = true)]\n [Collection(\"IisTests\")]\n public class AspNetMvc4TestsCallsiteClassic : AspNetMvc4Tests\n {\n public AspNetMvc4TestsCallsiteClassic(IisFixture iisFixture, ITestOutputHelper output)\n : base(iisFixture, output, enableCallTarget: false, classicMode: true)\n {\n }\n }\n\n [Collection(\"IisTests\")]\n public class AspNetMvc4TestsCallsiteIntegrated : AspNetMvc4Tests\n {\n public AspNetMvc4TestsCallsiteIntegrated(IisFixture iisFixture, ITestOutputHelper output)\n : base(iisFixture, output, enableCallTarget: false, classicMode: false)\n {\n }\n }\n\n [Collection(\"IisTests\")]\n public class AspNetMvc4TestsCallTargetClassic : AspNetMvc4Tests\n {\n public AspNetMvc4TestsCallTargetClassic(IisFixture iisFixture, ITestOutputHelper output)\n : base(iisFixture, output, enableCallTarget: true, classicMode: true)\n {\n }\n }\n\n [Collection(\"IisTests\")]\n public class AspNetMvc4TestsCallTargetIntegrated : AspNetMvc4Tests\n {\n public AspNetMvc4TestsCallTargetIntegrated(IisFixture iisFixture, ITestOutputHelper output)\n : base(iisFixture, output, enableCallTarget: true, classicMode: false)\n {\n }\n }\n\n public abstract class AspNetMvc4Tests : TestHelper, IClassFixture\n {\n private readonly IisFixture _iisFixture;\n\n public AspNetMvc4Tests(IisFixture iisFixture, ITestOutputHelper output, bool enableCallTarget, bool classicMode)\n : base(\"AspNetMvc4\", @\"test\\test-applications\\aspnet\", output)\n {\n SetServiceVersion(\"1.0.0\");\n SetCallTargetSettings(enableCallTarget);\n\n _iisFixture = iisFixture;\n _iisFixture.TryStartIis(this, classicMode);\n }\n\n [Theory]\n [Trait(\"Category\", \"EndToEnd\")]\n [Trait(\"RunOnWindows\", \"True\")]\n [Trait(\"LoadFromGAC\", \"True\")]\n [MemberData(nameof(AspNetMvc4TestData.WithoutFeatureFlag), MemberType = typeof(AspNetMvc4TestData))]\n public async Task SubmitsTraces(\n string path,\n string expectedAspNetResourceName,\n string expectedResourceName,\n HttpStatusCode expectedStatusCode,\n bool isError,\n string expectedErrorType,\n string expectedErrorMessage,\n SerializableDictionary tags)\n {\n await AssertWebServerSpan(\n path,\n _iisFixture.Agent,\n _iisFixture.HttpPort,\n expectedStatusCode,\n isError,\n expectedAspNetErrorType: expectedErrorType,\n expectedAspNetErrorMessage: expectedErrorMessage,\n expectedErrorType: expectedErrorType,\n expectedErrorMessage: expectedErrorMessage,\n \"web\",\n \"aspnet-mvc.request\",\n expectedAspNetResourceName,\n expectedResourceName,\n \"1.0.0\",\n tags);\n }\n }\n}\n#endif\n","lang_cluster":"C#","diff_tag":0,"review_comment":"","id":80} {"diff_hunk":"@@ -12,7 +12,7 @@ using Datadog.Trace.DuckTyping;\n namespace Datadog.Trace.ClrProfiler.AutoInstrumentation.Testing.MsTestV2\n {\n \/\/\/ \n- \/\/\/ Microsoft.VisualStudio.TestPlatform.TestFramework.Execute calltarget instrumentation\n+ \/\/\/ Microsoft.VisualStudio.TestPlatform.MSTest.TestAdapter.Execution.TestMethodRunner.Execute calltarget instrumentation\n \/\/\/ <\/summary>\n [InstrumentMethod(\n AssemblyName = \"Microsoft.VisualStudio.TestPlatform.MSTest.TestAdapter\",","source_code":"\/\/ \n\/\/ Unless explicitly stated otherwise all files in this repository are licensed under the Apache 2 License.\n\/\/ This product includes software developed at Datadog (https:\/\/www.datadoghq.com\/). Copyright 2017 Datadog, Inc.\n\/\/ <\/copyright>\n\nusing System;\nusing System.ComponentModel;\nusing Datadog.Trace.Ci.Tags;\nusing Datadog.Trace.ClrProfiler.CallTarget;\nusing Datadog.Trace.DuckTyping;\n\nnamespace Datadog.Trace.ClrProfiler.AutoInstrumentation.Testing.MsTestV2\n{\n \/\/\/ \n \/\/\/ Microsoft.VisualStudio.TestPlatform.TestFramework.Execute calltarget instrumentation\n \/\/\/ <\/summary>\n [InstrumentMethod(\n AssemblyName = \"Microsoft.VisualStudio.TestPlatform.MSTest.TestAdapter\",\n TypeName = \"Microsoft.VisualStudio.TestPlatform.MSTest.TestAdapter.Execution.TestMethodRunner\",\n MethodName = \"Execute\",\n ReturnTypeName = \"Microsoft.VisualStudio.TestPlatform.MSTest.TestAdapter.ObjectModel.UnitTestResult\",\n ParameterTypeNames = new string[0],\n MinimumVersion = \"14.0.0\",\n MaximumVersion = \"14.*.*\",\n IntegrationName = MsTestIntegration.IntegrationName)]\n [Browsable(false)]\n [EditorBrowsable(EditorBrowsableState.Never)]\n public class TestMethodRunnerExecuteIntegration\n {\n \/\/\/ \n \/\/\/ OnMethodBegin callback\n \/\/\/ <\/summary>\n \/\/\/ Type of the target<\/typeparam>\n \/\/\/ Instance value, aka `this` of the instrumented method.<\/param>\n \/\/\/ Calltarget state value<\/returns>\n public static CallTargetState OnMethodBegin(TTarget instance)\n {\n if (!MsTestIntegration.IsEnabled)\n {\n return CallTargetState.GetDefault();\n }\n\n return new CallTargetState((Scope)null, instance);\n }\n\n \/\/\/ \n \/\/\/ OnAsyncMethodEnd callback\n \/\/\/ <\/summary>\n \/\/\/ Type of the target<\/typeparam>\n \/\/\/ Type of the return value<\/typeparam>\n \/\/\/ Instance value, aka `this` of the instrumented method.<\/param>\n \/\/\/ Return value<\/param>\n \/\/\/ Exception instance in case the original code threw an exception.<\/param>\n \/\/\/ Calltarget state value<\/param>\n \/\/\/ A response value, in an async scenario will be T of Task of T<\/returns>\n public static CallTargetReturn OnMethodEnd(TTarget instance, TReturn returnValue, Exception exception, CallTargetState state)\n where TTarget : ITestMethodRunner\n {\n if (returnValue is Array returnValueArray && returnValueArray.Length == 1)\n {\n object unitTestResultObject = returnValueArray.GetValue(0);\n if (unitTestResultObject != null && unitTestResultObject.TryDuckCast(out var unitTestResult))\n {\n var outcome = unitTestResult.Outcome;\n if (outcome == UnitTestResultOutcome.Inconclusive || outcome == UnitTestResultOutcome.NotRunnable || outcome == UnitTestResultOutcome.Ignored)\n {\n \/\/ This instrumentation catches all tests being ignored\n if (state.State != null && state.State.TryDuckCast(out var testMethodRunner))\n {\n var scope = MsTestIntegration.OnMethodBegin(testMethodRunner.TestMethodInfo, testMethodRunner.GetType());\n scope.Span.SetTag(TestTags.Status, TestTags.StatusSkip);\n scope.Span.SetTag(TestTags.SkipReason, unitTestResult.ErrorMessage);\n scope.Dispose();\n }\n }\n }\n }\n\n return new CallTargetReturn(returnValue);\n }\n }\n}\n","lang_cluster":"C#","diff_tag":0,"review_comment":"","id":81} {"diff_hunk":"@@ -118,29 +118,30 @@ namespace MvvmCross.Droid.Support.V7.RecyclerView\n public IEnumerable ItemsSource\n {\n get { return Adapter.ItemsSource; }\n- set { Adapter.ItemsSource = value; }\n+ set\n+ {\n+ var adapter = Adapter;\n+ if (adapter != null)\n+ adapter.ItemsSource = value;\n+ }\n }\n \n public int ItemTemplateId\n {\n get\n {\n- var singleItemDefaultTemplateSelector = ItemTemplateSelector as MvxDefaultTemplateSelector;\n-\n- if (singleItemDefaultTemplateSelector == null)\n+ if (!(ItemTemplateSelector is MvxDefaultTemplateSelector singleItemDefaultTemplateSelector))\n throw new InvalidOperationException(\n- $\"If you wan't to use single item-template RecyclerView Adapter you can't change it's\" +\n+ $\"If you don't want to use single item-template RecyclerView Adapter you can't change it's\" +\n $\"{nameof(IMvxTemplateSelector)} to anything other than {nameof(MvxDefaultTemplateSelector)}\");\n \n return singleItemDefaultTemplateSelector.ItemTemplateId;\n }\n set\n {\n- var singleItemDefaultTemplateSelector = ItemTemplateSelector as MvxDefaultTemplateSelector;\n-\n- if (singleItemDefaultTemplateSelector == null)\n+ if (!(ItemTemplateSelector is MvxDefaultTemplateSelector singleItemDefaultTemplateSelector))\n throw new InvalidOperationException(\n- $\"If you wan't to use single item-template RecyclerView Adapter you can't change it's\" +\n+ $\"If you don't want to use single item-template RecyclerView Adapter you can't change it's\" +\n $\"{nameof(IMvxTemplateSelector)} to anything other than {nameof(MvxDefaultTemplateSelector)}\");\n \n singleItemDefaultTemplateSelector.ItemTemplateId = value;","source_code":"\ufeff\/\/ Licensed to the .NET Foundation under one or more agreements.\n\/\/ The .NET Foundation licenses this file to you under the MS-PL license.\n\/\/ See the LICENSE file in the project root for more information.\n\nusing System;\nusing System.Collections;\nusing System.Windows.Input;\nusing Android.Content;\nusing Android.Runtime;\nusing Android.Util;\nusing MvvmCross.Binding.Attributes;\nusing MvvmCross.Droid.Support.V7.RecyclerView.AttributeHelpers;\nusing MvvmCross.Droid.Support.V7.RecyclerView.ItemTemplates;\nusing MvvmCross.Platforms.Android.Binding.Views;\n\nnamespace MvvmCross.Droid.Support.V7.RecyclerView\n{\n [Register(\"mvvmcross.droid.support.v7.recyclerview.MvxRecyclerView\")]\n public class MvxRecyclerView : Android.Support.V7.Widget.RecyclerView\n {\n public MvxRecyclerView(IntPtr javaReference, JniHandleOwnership transfer)\n : base(javaReference, transfer)\n {\n }\n\n public MvxRecyclerView(Context context, IAttributeSet attrs) :\n this(context, attrs, 0, new MvxRecyclerAdapter())\n {\n }\n\n public MvxRecyclerView(Context context, IAttributeSet attrs, int defStyle) \n : this(context, attrs, defStyle, new MvxRecyclerAdapter())\n {\n }\n\n public MvxRecyclerView(Context context, IAttributeSet attrs, int defStyle, IMvxRecyclerAdapter adapter) \n : base(context, attrs, defStyle)\n {\n \/\/ Note: Any calling derived class passing a null adapter is responsible for setting\n \/\/ it's own ItemTemplateSelector\n if (adapter == null)\n return;\n\n var currentLayoutManager = GetLayoutManager();\n\n \/\/ Love you Android\n \/\/ https:\/\/code.google.com\/p\/android\/issues\/detail?id=77846#c10\n \/\/ Don't believe those bastards, it's not fixed - workaround hack hack hack\n if (currentLayoutManager == null)\n SetLayoutManager(new MvxGuardedLinearLayoutManager(context));\n\n var itemTemplateId = MvxAttributeHelpers.ReadListItemTemplateId(context, attrs);\n var itemTemplateSelector = MvxRecyclerViewAttributeExtensions.BuildItemTemplateSelector(context, attrs, itemTemplateId);\n\n adapter.ItemTemplateSelector = itemTemplateSelector;\n Adapter = adapter;\n\n if (itemTemplateId == 0)\n itemTemplateId = global::Android.Resource.Layout.SimpleListItem1;\n\n if (itemTemplateSelector.GetType() == typeof(MvxDefaultTemplateSelector))\n ItemTemplateId = itemTemplateId;\n }\n\n public sealed override void SetLayoutManager(LayoutManager layout)\n {\n base.SetLayoutManager(layout);\n }\n\n protected override void OnDetachedFromWindow()\n {\n base.OnDetachedFromWindow();\n\n \/\/ Remove all the views that are currently in play.\n \/\/ This clears out all of the ViewHolder DataContexts by detaching the ViewHolder.\n \/\/ Eventually the GC will come along and clear out the binding contexts.\n \/\/ Issue #1405\n GetLayoutManager()?.RemoveAllViews();\n }\n\n public new IMvxRecyclerAdapter Adapter\n {\n get\n {\n return GetAdapter() as IMvxRecyclerAdapter;\n }\n set\n {\n var existing = Adapter;\n\n if (existing == value)\n return;\n\n \/\/ Support lib doesn't seem to have anything similar to IListAdapter yet\n \/\/ hence cast to Adapter.\n if (value != null && existing != null)\n {\n value.ItemsSource = existing.ItemsSource;\n value.ItemTemplateSelector = existing.ItemTemplateSelector;\n value.ItemClick = existing.ItemClick;\n value.ItemLongClick = existing.ItemLongClick;\n\n SwapAdapter((Adapter)value, false);\n }\n else\n {\n SetAdapter((Adapter)value);\n }\n\n if (existing != null)\n {\n existing.ItemsSource = null;\n }\n }\n }\n\n [MvxSetToNullAfterBinding]\n public IEnumerable ItemsSource\n {\n get { return Adapter.ItemsSource; }\n set { Adapter.ItemsSource = value; }\n }\n\n public int ItemTemplateId\n {\n get\n {\n var singleItemDefaultTemplateSelector = ItemTemplateSelector as MvxDefaultTemplateSelector;\n\n if (singleItemDefaultTemplateSelector == null)\n throw new InvalidOperationException(\n $\"If you wan't to use single item-template RecyclerView Adapter you can't change it's\" +\n $\"{nameof(IMvxTemplateSelector)} to anything other than {nameof(MvxDefaultTemplateSelector)}\");\n\n return singleItemDefaultTemplateSelector.ItemTemplateId;\n }\n set\n {\n var singleItemDefaultTemplateSelector = ItemTemplateSelector as MvxDefaultTemplateSelector;\n\n if (singleItemDefaultTemplateSelector == null)\n throw new InvalidOperationException(\n $\"If you wan't to use single item-template RecyclerView Adapter you can't change it's\" +\n $\"{nameof(IMvxTemplateSelector)} to anything other than {nameof(MvxDefaultTemplateSelector)}\");\n\n singleItemDefaultTemplateSelector.ItemTemplateId = value;\n Adapter.ItemTemplateSelector = singleItemDefaultTemplateSelector;\n }\n }\n\n public IMvxTemplateSelector ItemTemplateSelector\n {\n get { return Adapter.ItemTemplateSelector; }\n set { Adapter.ItemTemplateSelector = value; }\n }\n\n public ICommand ItemClick\n {\n get { return Adapter.ItemClick; }\n set { Adapter.ItemClick = value; }\n }\n\n public ICommand ItemLongClick\n {\n get { return Adapter.ItemLongClick; }\n set { Adapter.ItemLongClick = value; }\n }\n }\n}\n","lang_cluster":"C#","diff_tag":0,"review_comment":"","id":82} {"diff_hunk":"@@ -53,15 +53,15 @@ namespace MvvmCross.Forms.Platforms.Android.Presenters\n FormsPagePresenter.RegisterAttributeTypes();\n }\n \n- public override void ChangePresentation(MvxPresentationHint hint)\n+ public override async Task ChangePresentation(MvxPresentationHint hint)\n {\n- FormsPagePresenter.ChangePresentation(hint);\n- base.ChangePresentation(hint);\n+ await FormsPagePresenter.ChangePresentation(hint);\n+ return await base.ChangePresentation(hint);\n }\n \n- public override void Close(IMvxViewModel viewModel)\n+ public override Task Close(IMvxViewModel viewModel)\n {\n- FormsPagePresenter.Close(viewModel);\n+ return FormsPagePresenter.Close(viewModel);\n }\n \n public virtual bool ShowPlatformHost(Type hostViewModel = null)","source_code":"\ufeff\/\/ Licensed to the .NET Foundation under one or more agreements.\n\/\/ The .NET Foundation licenses this file to you under the MS-PL license.\n\/\/ See the LICENSE file in the project root for more information.\n\nusing MvvmCross.Droid.Support.V7.AppCompat;\nusing MvvmCross.Forms.Presenters;\nusing System;\nusing System.Collections.Generic;\nusing System.Reflection;\nusing MvvmCross.Forms.Core;\nusing MvvmCross.Platforms.Android.Views;\nusing MvvmCross.ViewModels;\nusing MvvmCross.Forms.Platforms.Android.Views;\nusing Xamarin.Forms;\n\nnamespace MvvmCross.Forms.Platforms.Android.Presenters\n{\n public class MvxFormsAndroidViewPresenter\n : MvxAppCompatViewPresenter, IMvxFormsViewPresenter\n {\n public MvxFormsAndroidViewPresenter(IEnumerable androidViewAssemblies, Application formsApplication) : base(androidViewAssemblies)\n {\n FormsApplication = formsApplication ?? throw new ArgumentNullException(nameof(formsApplication), \"MvxFormsApplication cannot be null\");\n }\n\n private Application _formsApplication;\n public Application FormsApplication\n {\n get { return _formsApplication; }\n set { _formsApplication = value; }\n }\n\n private IMvxFormsPagePresenter _formsPagePresenter;\n public virtual IMvxFormsPagePresenter FormsPagePresenter\n {\n get\n {\n if (_formsPagePresenter == null)\n throw new ArgumentNullException(nameof(FormsPagePresenter), \"IMvxFormsPagePresenter cannot be null. Set the value in CreateViewPresenter in the setup.\");\n return _formsPagePresenter;\n }\n set { _formsPagePresenter = value; }\n }\n\n public override void Show(MvxViewModelRequest request)\n {\n FormsPagePresenter.Show(request);\n }\n\n public override void RegisterAttributeTypes()\n {\n base.RegisterAttributeTypes();\n FormsPagePresenter.RegisterAttributeTypes();\n }\n\n public override void ChangePresentation(MvxPresentationHint hint)\n {\n FormsPagePresenter.ChangePresentation(hint);\n base.ChangePresentation(hint);\n }\n\n public override void Close(IMvxViewModel viewModel)\n {\n FormsPagePresenter.Close(viewModel);\n }\n\n public virtual bool ShowPlatformHost(Type hostViewModel = null)\n {\n \/\/ if there is no Actitivty host associated, assume is the current activity\n if (hostViewModel == null)\n hostViewModel = GetCurrentActivityViewModelType();\n\n var currentHostViewModelType = GetCurrentActivityViewModelType();\n if (hostViewModel != currentHostViewModelType)\n {\n var hostViewModelRequest = MvxViewModelRequest.GetDefaultRequest(hostViewModel);\n Show(hostViewModelRequest);\n }\n return true;\n }\n\n public virtual bool ClosePlatformViews()\n {\n CloseFragments();\n if (!(CurrentActivity is MvxFormsAppCompatActivity || CurrentActivity is MvxFormsApplicationActivity) &&\n !(CurrentActivity is MvxSplashScreenActivity || CurrentActivity is MvxSplashScreenAppCompatActivity))\n CurrentActivity?.Finish();\n return true;\n }\n }\n}\n","lang_cluster":"C#","diff_tag":0,"review_comment":"","id":83} {"diff_hunk":"@@ -91,5 +91,24 @@ namespace OpenTelemetry.Exporter\n \n return ExportResult.Success;\n }\n+\n+ \/\/\/ \n+ protected override bool OnShutdown(int timeoutMilliseconds)\n+ {\n+ if (this.Channel == null)\n+ {\n+ return true;\n+ }\n+\n+ if (timeoutMilliseconds == -1)\n+ {\n+ this.Channel.ShutdownAsync().Wait();\n+ return true;\n+ }\n+ else\n+ {\n+ return Task.WaitAny(new Task[] { this.Channel.ShutdownAsync(), Task.Delay(timeoutMilliseconds) }) == 0;\n+ }\n+ }\n }\n }","source_code":"\/\/ \n\/\/ Copyright The OpenTelemetry Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/ <\/copyright>\n\nusing System;\nusing Grpc.Core;\nusing OpenTelemetry.Exporter.OpenTelemetryProtocol;\nusing OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation;\nusing OpenTelemetry.Metrics;\nusing OtlpCollector = Opentelemetry.Proto.Collector.Metrics.V1;\n\nnamespace OpenTelemetry.Exporter\n{\n \/\/\/ \n \/\/\/ Exporter consuming and exporting the data using\n \/\/\/ the OpenTelemetry protocol (OTLP).\n \/\/\/ <\/summary>\n public class OtlpMetricsExporter : BaseOtlpExporter\n {\n private readonly OtlpCollector.MetricsService.IMetricsServiceClient metricsClient;\n\n \/\/\/ \n \/\/\/ Initializes a new instance of the class.\n \/\/\/ <\/summary>\n \/\/\/ Configuration options for the exporter.<\/param>\n public OtlpMetricsExporter(OtlpExporterOptions options)\n : this(options, null)\n {\n }\n\n \/\/\/ \n \/\/\/ Initializes a new instance of the class.\n \/\/\/ <\/summary>\n \/\/\/ Configuration options for the exporter.<\/param>\n \/\/\/ .<\/param>\n internal OtlpMetricsExporter(OtlpExporterOptions options, OtlpCollector.MetricsService.IMetricsServiceClient metricsServiceClient = null)\n : base(options)\n {\n if (metricsServiceClient != null)\n {\n this.metricsClient = metricsServiceClient;\n }\n else\n {\n this.Channel = options.CreateChannel();\n this.metricsClient = new OtlpCollector.MetricsService.MetricsServiceClient(this.Channel);\n }\n }\n\n \/\/\/ \n public override ExportResult Export(in Batch batch)\n {\n \/\/ Prevents the exporter's gRPC and HTTP operations from being instrumented.\n using var scope = SuppressInstrumentationScope.Begin();\n\n var request = new OtlpCollector.ExportMetricsServiceRequest();\n\n request.AddBatch(this.ProcessResource, batch);\n var deadline = DateTime.UtcNow.AddMilliseconds(this.Options.TimeoutMilliseconds);\n\n try\n {\n this.metricsClient.Export(request, headers: this.Headers, deadline: deadline);\n }\n catch (RpcException ex)\n {\n OpenTelemetryProtocolExporterEventSource.Log.FailedToReachCollector(ex);\n return ExportResult.Failure;\n }\n catch (Exception ex)\n {\n OpenTelemetryProtocolExporterEventSource.Log.ExportMethodException(ex);\n return ExportResult.Failure;\n }\n finally\n {\n request.Return();\n }\n\n return ExportResult.Success;\n }\n }\n}\n","lang_cluster":"C#","diff_tag":0,"review_comment":"","id":84} {"diff_hunk":"@@ -3,6 +3,7 @@\n \n using System;\n using System.Collections.Generic;\n+using System.Linq;\n using System.Reflection;\n using Microsoft.AspNetCore.Hosting;\n using Microsoft.AspNetCore.Hosting.Server;","source_code":"\/\/ Copyright (c) .NET Foundation. All rights reserved.\n\/\/ Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.\n\nusing System;\nusing System.Collections.Generic;\nusing System.Reflection;\nusing Microsoft.AspNetCore.Hosting;\nusing Microsoft.AspNetCore.Hosting.Server;\nusing Microsoft.AspNetCore.Hosting.Server.Features;\nusing Microsoft.AspNetCore.Http.Features;\nusing Microsoft.AspNetCore.Server.Kestrel.Http;\nusing Microsoft.AspNetCore.Server.Kestrel.Infrastructure;\nusing Microsoft.Extensions.Logging;\nusing Microsoft.Extensions.Options;\n\nnamespace Microsoft.AspNetCore.Server.Kestrel\n{\n public class KestrelServer : IServer\n {\n private Stack _disposables;\n private readonly IApplicationLifetime _applicationLifetime;\n private readonly ILogger _logger;\n private readonly IServerAddressesFeature _serverAddresses;\n\n public KestrelServer(IOptions options, IApplicationLifetime applicationLifetime, ILoggerFactory loggerFactory)\n {\n if (options == null)\n {\n throw new ArgumentNullException(nameof(options));\n }\n\n if (applicationLifetime == null)\n {\n throw new ArgumentNullException(nameof(applicationLifetime));\n }\n\n if (loggerFactory == null)\n {\n throw new ArgumentNullException(nameof(loggerFactory));\n }\n\n Options = options.Value ?? new KestrelServerOptions();\n _applicationLifetime = applicationLifetime;\n _logger = loggerFactory.CreateLogger(typeof(KestrelServer).GetTypeInfo().Namespace);\n Features = new FeatureCollection();\n var componentFactory = new HttpComponentFactory(Options);\n Features.Set(componentFactory);\n _serverAddresses = new ServerAddressesFeature();\n Features.Set(_serverAddresses);\n }\n\n public IFeatureCollection Features { get; }\n\n public KestrelServerOptions Options { get; }\n\n public void Start(IHttpApplication application)\n {\n if (_disposables != null)\n {\n \/\/ The server has already started and\/or has not been cleaned up yet\n throw new InvalidOperationException(\"Server has already started.\");\n }\n _disposables = new Stack();\n\n try\n {\n var componentFactory = Features.Get();\n var dateHeaderValueManager = new DateHeaderValueManager();\n var trace = new KestrelTrace(_logger);\n var engine = new KestrelEngine(new ServiceContext\n {\n FrameFactory = context =>\n {\n return new Frame(application, context);\n },\n AppLifetime = _applicationLifetime,\n Log = trace,\n ThreadPool = new LoggingThreadPool(trace),\n DateHeaderValueManager = dateHeaderValueManager,\n ServerOptions = Options,\n HttpComponentFactory = componentFactory\n });\n\n _disposables.Push(engine);\n _disposables.Push(dateHeaderValueManager);\n\n var threadCount = Options.ThreadCount;\n\n if (threadCount <= 0)\n {\n throw new ArgumentOutOfRangeException(nameof(threadCount),\n threadCount,\n \"ThreadCount must be positive.\");\n }\n\n engine.Start(threadCount);\n var atLeastOneListener = false;\n\n foreach (var address in _serverAddresses.Addresses)\n {\n var parsedAddress = ServerAddress.FromUrl(address);\n if (parsedAddress == null)\n {\n throw new FormatException(\"Unrecognized listening address: \" + address);\n }\n else\n {\n atLeastOneListener = true;\n _disposables.Push(engine.CreateServer(\n parsedAddress));\n }\n }\n\n if (!atLeastOneListener)\n {\n throw new InvalidOperationException(\"No recognized listening addresses were configured.\");\n }\n }\n catch\n {\n Dispose();\n throw;\n }\n }\n\n public void Dispose()\n {\n if (_disposables != null)\n {\n while (_disposables.Count > 0)\n {\n _disposables.Pop().Dispose();\n }\n _disposables = null;\n }\n }\n }\n}\n","lang_cluster":"C#","diff_tag":0,"review_comment":"","id":85} {"diff_hunk":"@@ -42,11 +42,15 @@ namespace pwiz.Common.SystemUtil\n private Exception _readException;\n \n private readonly List _readLines = new List();\n+ private StringBuilder _errorLines;\n \n- public ProcessStreamReader(Process process)\n+ public ProcessStreamReader(Process process, bool keepErrorLines = false)\n {\n- Thread threadOut = new Thread(() => ReadStream(process.StandardOutput, ref _isOutComplete));\n- Thread threadErr = new Thread(() => ReadStream(process.StandardError, ref _isErrComplete));\n+ if (keepErrorLines)\n+ _errorLines = new StringBuilder();\n+\n+ Thread threadOut = new Thread(() => ReadStream(process.StandardOutput, ref _isOutComplete, null));\n+ Thread threadErr = new Thread(() => ReadStream(process.StandardError, ref _isErrComplete, _errorLines));\n threadOut.Start();\n threadErr.Start();\n }","source_code":"\ufeff\/*\r\n * Original author: Nicholas Shulman ,\r\n * MacCoss Lab, Department of Genome Sciences, UW\r\n *\r\n * Copyright 2009 University of Washington - Seattle, WA\r\n *\r\n * Licensed under the Apache License, Version 2.0 (the \"License\");\r\n * you may not use this file except in compliance with the License.\r\n * You may obtain a copy of the License at\r\n *\r\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\r\n *\r\n * Unless required by applicable law or agreed to in writing, software\r\n * distributed under the License is distributed on an \"AS IS\" BASIS,\r\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n * See the License for the specific language governing permissions and\r\n * limitations under the License.\r\n *\/\r\nusing System;\r\nusing System.Collections.Generic;\r\nusing System.Diagnostics;\r\nusing System.IO;\r\nusing System.Threading;\r\n\r\nnamespace pwiz.Common.SystemUtil\r\n{\r\n \/\/\/ \r\n \/\/\/ Class for reading both standard out and standard error from a process.\r\n \/\/\/ \r\n \/\/\/ This is a tough problem, since TextReader.ReadLine() blocks, until it\r\n \/\/\/ has a line to return, or the process ends. It did not seem possible\r\n \/\/\/ to solve this on a single thread to present real-time feedback to the\r\n \/\/\/ user based on process output.\r\n \/\/\/ <\/para>\r\n \/\/\/ One solution presented on the web looked promising, but it did not\r\n \/\/\/ correctly interleave output from both streams reliably.<\/para>\r\n \/\/\/ <\/summary>\r\n public class ProcessStreamReader\r\n {\r\n private bool _isOutComplete;\r\n private bool _isErrComplete;\r\n private Exception _readException;\r\n\r\n private readonly List _readLines = new List();\r\n\r\n public ProcessStreamReader(Process process)\r\n {\r\n Thread threadOut = new Thread(() => ReadStream(process.StandardOutput, ref _isOutComplete));\r\n Thread threadErr = new Thread(() => ReadStream(process.StandardError, ref _isErrComplete));\r\n threadOut.Start();\r\n threadErr.Start();\r\n }\r\n\r\n \/\/\/ \r\n \/\/\/ Public access to read the next line from the interleaved output\r\n \/\/\/ of both standard out and standard error.\r\n \/\/\/ <\/summary>\r\n public string ReadLine(IProgressMonitor progressMonitor)\r\n {\r\n int timeout = progressMonitor == null ? Timeout.Infinite : 10000;\r\n lock (_readLines)\r\n {\r\n for (;;)\r\n {\r\n if (progressMonitor != null && progressMonitor.IsCanceled)\r\n {\r\n return string.Empty;\r\n }\r\n if (_readLines.Count > 0)\r\n {\r\n string line = _readLines[0];\r\n _readLines.RemoveAt(0);\r\n return line;\r\n }\r\n if (_readException != null)\r\n throw _readException;\r\n if (_isOutComplete && _isErrComplete)\r\n return null;\r\n Monitor.Wait(_readLines, timeout);\r\n }\r\n }\r\n }\r\n\r\n public string ReadLine()\r\n {\r\n return ReadLine(null);\r\n }\r\n\r\n \/\/\/ \r\n \/\/\/ Handles reading from a single stream, and noting its completion\r\n \/\/\/ on a background thread.\r\n \/\/\/ <\/summary>\r\n private void ReadStream(TextReader reader, ref bool isComplete)\r\n {\r\n try\r\n {\r\n string line;\r\n while ((line = reader.ReadLine()) != null)\r\n {\r\n lock (_readLines)\r\n {\r\n _readLines.Add(line);\r\n Monitor.Pulse(_readLines);\r\n }\r\n }\r\n\r\n lock(_readLines)\r\n {\r\n isComplete = true;\r\n Monitor.Pulse(_readLines);\r\n }\r\n }\r\n catch (Exception x)\r\n {\r\n lock (_readLines)\r\n {\r\n _readException = x;\r\n Monitor.Pulse(_readLines);\r\n }\r\n }\r\n }\r\n }\r\n}","lang_cluster":"C#","diff_tag":0,"review_comment":"","id":86} {"diff_hunk":"@@ -1,4 +1,4 @@\n-\ufeff\/\/ MvxAppStart.cs\n+\/\/ MvxAppStart.cs\n \n \/\/ MvvmCross is licensed using Microsoft Public License (Ms-PL)\n \/\/ Contributions and inspirations noted in readme.md and license.txt","source_code":"\ufeff\/\/ MvxAppStart.cs\n\n\/\/ MvvmCross is licensed using Microsoft Public License (Ms-PL)\n\/\/ Contributions and inspirations noted in readme.md and license.txt\n\/\/\n\/\/ Project Lead - Stuart Lodge, @slodge, me@slodge.com\n\nusing System;\nusing MvvmCross.Platform.Platform;\n\nnamespace MvvmCross.Core.ViewModels\n{\n [Obsolete(\"Please use MvxNavigationServiceAppStart instead\")]\n public class MvxAppStart\n : MvxNavigatingObject, IMvxAppStart\n where TViewModel : IMvxViewModel\n {\n public void Start(object hint = null)\n {\n if (hint != null)\n {\n MvxTrace.Trace(\"Hint ignored in default MvxAppStart\");\n }\n ShowViewModel();\n }\n }\n}","lang_cluster":"C#","diff_tag":0,"review_comment":"","id":87} {"diff_hunk":"@@ -16,6 +16,7 @@\n \n using FluentAssertions;\n using Nethermind.Blockchain.Validators;\n+using Nethermind.Core;\n using Nethermind.Core.Crypto;\n using Nethermind.Core.Specs;\n using Nethermind.Core.Test.Builders;","source_code":"\ufeff\/\/ Copyright (c) 2021 Demerzel Solutions Limited\n\/\/ This file is part of the Nethermind library.\n\/\/ \n\/\/ The Nethermind library is free software: you can redistribute it and\/or modify\n\/\/ it under the terms of the GNU Lesser General Public License as published by\n\/\/ the Free Software Foundation, either version 3 of the License, or\n\/\/ (at your option) any later version.\n\/\/ \n\/\/ The Nethermind library is distributed in the hope that it will be useful,\n\/\/ but WITHOUT ANY WARRANTY; without even the implied warranty of\n\/\/ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\/\/ GNU Lesser General Public License for more details.\n\/\/ \n\/\/ You should have received a copy of the GNU Lesser General Public License\n\/\/ along with the Nethermind. If not, see .\n\nusing FluentAssertions;\nusing Nethermind.Blockchain.Validators;\nusing Nethermind.Core.Crypto;\nusing Nethermind.Core.Specs;\nusing Nethermind.Core.Test.Builders;\nusing Nethermind.Specs.Forks;\nusing NSubstitute;\nusing NUnit.Framework;\n\nnamespace Nethermind.Blockchain.Test.Validators\n{\n [TestFixture]\n public class TxValidatorTests\n {\n [SetUp]\n public void Setup()\n {\n }\n\n [Test]\n public void Zero_r_is_not_valid()\n {\n byte[] sigData = new byte[65];\n \/\/ r is zero\n sigData[63] = 1; \/\/ correct s\n\n Signature signature = new Signature(sigData);\n var tx = Build.A.Transaction.WithSignature(signature).TestObject;\n \n TxValidator txValidator = new TxValidator(1);\n txValidator.IsWellFormed(tx, MuirGlacier.Instance).Should().BeFalse();\n }\n \n [Test]\n public void Zero_s_is_not_valid()\n {\n byte[] sigData = new byte[65];\n sigData[31] = 1; \/\/ correct r\n \/\/ s is zero\n \n Signature signature = new Signature(sigData);\n var tx = Build.A.Transaction.WithSignature(signature).TestObject;\n \n TxValidator txValidator = new TxValidator(1);\n txValidator.IsWellFormed(tx, MuirGlacier.Instance).Should().BeFalse();\n }\n \n [Test]\n public void Bad_chain_id_is_not_valid()\n {\n byte[] sigData = new byte[65];\n sigData[31] = 1; \/\/ correct r\n sigData[63] = 1; \/\/ correct s\n sigData[64] = 39;\n Signature signature = new Signature(sigData);\n var tx = Build.A.Transaction.WithSignature(signature).TestObject;\n \n TxValidator txValidator = new TxValidator(1);\n txValidator.IsWellFormed(tx, MuirGlacier.Instance).Should().BeFalse();\n }\n \n [Test]\n public void No_chain_id_tx_is_valid()\n {\n byte[] sigData = new byte[65];\n sigData[31] = 1; \/\/ correct r\n sigData[63] = 1; \/\/ correct s\n Signature signature = new Signature(sigData);\n var tx = Build.A.Transaction.WithSignature(signature).TestObject;\n \n TxValidator txValidator = new TxValidator(1);\n txValidator.IsWellFormed(tx, MuirGlacier.Instance).Should().BeTrue();\n }\n \n [Test]\n public void Is_valid_with_valid_chain_id()\n {\n byte[] sigData = new byte[65];\n sigData[31] = 1; \/\/ correct r\n sigData[63] = 1; \/\/ correct s\n sigData[64] = 38;\n Signature signature = new Signature(sigData);\n var tx = Build.A.Transaction.WithSignature(signature).TestObject;\n \n TxValidator txValidator = new TxValidator(1);\n txValidator.IsWellFormed(tx, MuirGlacier.Instance).Should().BeTrue();\n }\n \n [TestCase(true)]\n [TestCase(false)]\n public void Before_eip_155_has_to_have_valid_chain_id_unless_overridden(bool validateChainId)\n {\n byte[] sigData = new byte[65];\n sigData[31] = 1; \/\/ correct r\n sigData[63] = 1; \/\/ correct s\n sigData[64] = 41;\n Signature signature = new Signature(sigData);\n var tx = Build.A.Transaction.WithSignature(signature).TestObject;\n\n IReleaseSpec releaseSpec = Substitute.For();\n releaseSpec.IsEip155Enabled.Returns(false);\n releaseSpec.ValidateChainId.Returns(validateChainId);\n \n TxValidator txValidator = new TxValidator(1);\n txValidator.IsWellFormed(tx, releaseSpec).Should().Be(!validateChainId);\n }\n }\n}\n","lang_cluster":"C#","diff_tag":0,"review_comment":"","id":88} {"diff_hunk":"@@ -79,6 +79,11 @@ namespace Nethermind.JsonRpc.Data\n [JsonProperty(NullValueHandling = NullValueHandling.Include)]\n public byte[]? Input { get; set; }\n \n+ [JsonConverter(typeof(ByteConverter))]\n+ public byte Type { get; set; }\n+ \n+ public AccessListItemForRpc[]? AccessList { get; set; }\n+\n public UInt256? V { get; set; }\n \n public UInt256? S { get; set; }","source_code":"\ufeff\/\/ Copyright (c) 2021 Demerzel Solutions Limited\n\/\/ This file is part of the Nethermind library.\n\/\/ \n\/\/ The Nethermind library is free software: you can redistribute it and\/or modify\n\/\/ it under the terms of the GNU Lesser General Public License as published by\n\/\/ the Free Software Foundation, either version 3 of the License, or\n\/\/ (at your option) any later version.\n\/\/ \n\/\/ The Nethermind library is distributed in the hope that it will be useful,\n\/\/ but WITHOUT ANY WARRANTY; without even the implied warranty of\n\/\/ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\/\/ GNU Lesser General Public License for more details.\n\/\/ \n\/\/ You should have received a copy of the GNU Lesser General Public License\n\/\/ along with the Nethermind. If not, see .\n\nusing Nethermind.Core;\nusing Nethermind.Core.Crypto;\nusing Nethermind.Core.Extensions;\nusing Nethermind.Int256;\nusing Newtonsoft.Json;\n\nnamespace Nethermind.JsonRpc.Data\n{\n public class TransactionForRpc\n {\n public TransactionForRpc(Transaction transaction) : this(null, null, null, transaction) { }\n\n public TransactionForRpc(Keccak? blockHash, long? blockNumber, int? txIndex, Transaction transaction)\n {\n Hash = transaction.Hash;\n Nonce = transaction.Nonce;\n BlockHash = blockHash;\n BlockNumber = blockNumber;\n TransactionIndex = txIndex;\n From = transaction.SenderAddress;\n To = transaction.To;\n Value = transaction.Value;\n GasPrice = transaction.GasPrice;\n Gas = transaction.GasLimit;\n Input = Data = transaction.Data;\n\n Signature? signature = transaction.Signature;\n if (signature != null)\n {\n R = new UInt256(signature.R, true);\n S = new UInt256(signature.S, true);\n V = (UInt256?)signature.V;\n }\n }\n\n \/\/ ReSharper disable once UnusedMember.Global\n public TransactionForRpc()\n {\n }\n\n public Keccak? Hash { get; set; }\n public UInt256? Nonce { get; set; }\n\n [JsonProperty(NullValueHandling = NullValueHandling.Include)]\n public Keccak? BlockHash { get; set; }\n\n [JsonProperty(NullValueHandling = NullValueHandling.Include)]\n public long? BlockNumber { get; set; }\n\n [JsonProperty(NullValueHandling = NullValueHandling.Include)]\n public long? TransactionIndex { get; set; }\n\n public Address? From { get; set; }\n\n [JsonProperty(NullValueHandling = NullValueHandling.Include)]\n public Address? To { get; set; }\n\n public UInt256? Value { get; set; }\n public UInt256? GasPrice { get; set; }\n public long? Gas { get; set; }\n public byte[]? Data { get; set; }\n\n [JsonProperty(NullValueHandling = NullValueHandling.Include)]\n public byte[]? Input { get; set; }\n\n public UInt256? V { get; set; }\n\n public UInt256? S { get; set; }\n\n public UInt256? R { get; set; }\n\n public Transaction ToTransactionWithDefaults()\n {\n Transaction tx = new();\n tx.GasLimit = Gas ?? 90000;\n tx.GasPrice = GasPrice ?? 20.GWei();\n tx.Nonce = (ulong)(Nonce ?? 0); \/\/ here pick the last nonce?\n tx.To = To;\n tx.SenderAddress = From;\n tx.Value = Value ?? 0;\n tx.Data = Data ?? Input;\n\n return tx;\n }\n\n public Transaction ToTransaction()\n {\n Transaction tx = new();\n tx.GasLimit = Gas ?? 0;\n tx.GasPrice = GasPrice ?? 0;\n tx.Nonce = (ulong)(Nonce ?? 0); \/\/ here pick the last nonce?\n tx.To = To;\n tx.SenderAddress = From;\n tx.Value = Value ?? 0;\n tx.Data = Data ?? Input;\n\n return tx;\n }\n }\n}\n","lang_cluster":"C#","diff_tag":0,"review_comment":"","id":89} {"diff_hunk":"@@ -6,9 +6,7 @@ using System.Buffers;\n using System.IO.Pipelines;\n using System.Threading;\n using System.Threading.Tasks;\n-using Microsoft.AspNetCore.Http.Features;\n using Microsoft.AspNetCore.Connections;\n-using Microsoft.AspNetCore.Connections.Features;\n using Microsoft.AspNetCore.Server.Kestrel.Core.Internal.Infrastructure;\n using Microsoft.AspNetCore.Server.Kestrel.Transport.Abstractions.Internal;\n using Microsoft.Extensions.Logging;","source_code":"\ufeff\/\/ Copyright (c) .NET Foundation. All rights reserved.\n\/\/ Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.\n\nusing System;\nusing System.Buffers;\nusing System.IO.Pipelines;\nusing System.Threading;\nusing System.Threading.Tasks;\nusing Microsoft.AspNetCore.Http.Features;\nusing Microsoft.AspNetCore.Connections;\nusing Microsoft.AspNetCore.Connections.Features;\nusing Microsoft.AspNetCore.Server.Kestrel.Core.Internal.Infrastructure;\nusing Microsoft.AspNetCore.Server.Kestrel.Transport.Abstractions.Internal;\nusing Microsoft.Extensions.Logging;\n\nnamespace Microsoft.AspNetCore.Server.Kestrel.Core.Internal\n{\n public class ConnectionDispatcher : IConnectionDispatcher\n {\n private static long _lastConnectionId = long.MinValue;\n\n private readonly ServiceContext _serviceContext;\n private readonly ConnectionDelegate _connectionDelegate;\n\n public ConnectionDispatcher(ServiceContext serviceContext, ConnectionDelegate connectionDelegate)\n {\n _serviceContext = serviceContext;\n _connectionDelegate = connectionDelegate;\n }\n\n private IKestrelTrace Log => _serviceContext.Log;\n\n public Task OnConnection(TransportConnection connection)\n {\n \/\/ REVIEW: Unfortunately, we still need to use the service context to create the pipes since the settings\n \/\/ for the scheduler and limits are specified here\n var inputOptions = GetInputPipeOptions(_serviceContext, connection.MemoryPool, connection.InputWriterScheduler);\n var outputOptions = GetOutputPipeOptions(_serviceContext, connection.MemoryPool, connection.OutputReaderScheduler);\n\n var pair = DuplexPipe.CreateConnectionPair(inputOptions, outputOptions);\n\n \/\/ Set the transport and connection id\n connection.ConnectionId = CorrelationIdGenerator.GetNextId();\n connection.Transport = pair.Transport;\n\n \/\/ This *must* be set before returning from OnConnection\n connection.Application = pair.Application;\n\n return Execute(new KestrelConnection(connection));\n }\n\n private async Task Execute(KestrelConnection connection)\n {\n var id = Interlocked.Increment(ref _lastConnectionId);\n var connectionContext = connection.TransportConnection;\n\n try\n {\n _serviceContext.ConnectionManager.AddConnection(id, connection);\n\n Log.ConnectionStart(connectionContext.ConnectionId);\n KestrelEventSource.Log.ConnectionStart(connectionContext);\n\n using (BeginConnectionScope(connectionContext))\n {\n try\n {\n await _connectionDelegate(connectionContext);\n }\n catch (Exception ex)\n {\n Log.LogCritical(0, ex, $\"{nameof(ConnectionDispatcher)}.{nameof(Execute)}() {connectionContext.ConnectionId}\");\n }\n finally\n {\n \/\/ Complete the transport PipeReader and PipeWriter after calling into application code\n connectionContext.Transport.Input.Complete();\n connectionContext.Transport.Output.Complete();\n }\n\n \/\/ Wait for the transport to close\n await CancellationTokenAsTask(connectionContext.ConnectionClosed);\n }\n }\n finally\n {\n Log.ConnectionStop(connectionContext.ConnectionId);\n KestrelEventSource.Log.ConnectionStop(connectionContext);\n\n connection.Complete();\n\n _serviceContext.ConnectionManager.RemoveConnection(id);\n }\n }\n\n private IDisposable BeginConnectionScope(ConnectionContext connectionContext)\n {\n if (Log.IsEnabled(LogLevel.Critical))\n {\n return Log.BeginScope(new ConnectionLogScope(connectionContext.ConnectionId));\n }\n\n return null;\n }\n\n private static Task CancellationTokenAsTask(CancellationToken token)\n {\n if (token.IsCancellationRequested)\n {\n return Task.CompletedTask;\n }\n\n \/\/ Transports already dispatch prior to tripping ConnectionClosed\n \/\/ since application code can register to this token.\n var tcs = new TaskCompletionSource();\n token.Register(state => ((TaskCompletionSource)state).SetResult(null), tcs);\n return tcs.Task;\n }\n\n \/\/ Internal for testing\n internal static PipeOptions GetInputPipeOptions(ServiceContext serviceContext, MemoryPool memoryPool, PipeScheduler writerScheduler) => new PipeOptions\n (\n pool: memoryPool,\n readerScheduler: serviceContext.Scheduler,\n writerScheduler: writerScheduler,\n pauseWriterThreshold: serviceContext.ServerOptions.Limits.MaxRequestBufferSize ?? 0,\n resumeWriterThreshold: serviceContext.ServerOptions.Limits.MaxRequestBufferSize ?? 0,\n useSynchronizationContext: false,\n minimumSegmentSize: KestrelMemoryPool.MinimumSegmentSize\n );\n\n internal static PipeOptions GetOutputPipeOptions(ServiceContext serviceContext, MemoryPool memoryPool, PipeScheduler readerScheduler) => new PipeOptions\n (\n pool: memoryPool,\n readerScheduler: readerScheduler,\n writerScheduler: serviceContext.Scheduler,\n pauseWriterThreshold: GetOutputResponseBufferSize(serviceContext),\n resumeWriterThreshold: GetOutputResponseBufferSize(serviceContext),\n useSynchronizationContext: false,\n minimumSegmentSize: KestrelMemoryPool.MinimumSegmentSize\n );\n\n private static long GetOutputResponseBufferSize(ServiceContext serviceContext)\n {\n var bufferSize = serviceContext.ServerOptions.Limits.MaxResponseBufferSize;\n if (bufferSize == 0)\n {\n \/\/ 0 = no buffering so we need to configure the pipe so the the writer waits on the reader directly\n return 1;\n }\n\n \/\/ null means that we have no back pressure\n return bufferSize ?? 0;\n }\n }\n}\n","lang_cluster":"C#","diff_tag":0,"review_comment":"","id":90} {"diff_hunk":"@@ -11,6 +11,7 @@ namespace Microsoft.VisualStudio.TestPlatform.CommandLine.Processors.Utilities\n using Microsoft.VisualStudio.TestPlatform.Common.Interfaces;\n using Microsoft.VisualStudio.TestPlatform.ObjectModel;\n using Microsoft.VisualStudio.TestPlatform.ObjectModel.Utilities;\n+ using Microsoft.VisualStudio.TestPlatform.Common;\n \n \/\/\/ \n \/\/\/ Utilities to get the run settings from the provider and the commandline options specified.","source_code":"\/\/ Copyright (c) Microsoft Corporation. All rights reserved.\n\/\/ Licensed under the MIT license. See LICENSE file in the project root for full license information.\n\nnamespace Microsoft.VisualStudio.TestPlatform.CommandLine.Processors.Utilities\n{\n using System.Diagnostics.CodeAnalysis;\n using System.IO;\n using System.Xml;\n\n using Microsoft.VisualStudio.TestPlatform.Utilities;\n using Microsoft.VisualStudio.TestPlatform.Common.Interfaces;\n using Microsoft.VisualStudio.TestPlatform.ObjectModel;\n using Microsoft.VisualStudio.TestPlatform.ObjectModel.Utilities;\n\n \/\/\/ \n \/\/\/ Utilities to get the run settings from the provider and the commandline options specified.\n \/\/\/ <\/summary>\n internal class RunSettingsUtilities\n {\n private const string EmptyRunSettings = @\"<\/RunSettings>\";\n\n \/\/\/ \n \/\/\/ Gets the run settings to be used for the session.\n \/\/\/ <\/summary>\n \/\/\/ The current provider of run settings.<\/param>\n \/\/\/ The command line options specified. <\/param>\n \/\/\/ <\/returns>\n internal static string GetRunSettings(IRunSettingsProvider runSettingsProvider, CommandLineOptions commandlineOptions)\n {\n var runSettings = runSettingsProvider?.ActiveRunSettings?.SettingsXml;\n\n if (string.IsNullOrWhiteSpace(runSettings))\n {\n runSettings = EmptyRunSettings;\n }\n\n runSettings = GetEffectiveRunSettings(runSettings, commandlineOptions);\n\n return runSettings;\n }\n\n \/\/\/ \n \/\/\/ Gets the effective run settings adding the commandline options to the run settings if not already present.\n \/\/\/ <\/summary>\n \/\/\/ The run settings XML. <\/param>\n \/\/\/ The command line options. <\/param>\n \/\/\/ Effective run settings. <\/returns>\n [SuppressMessage(\"Microsoft.Security.Xml\", \"CA3053:UseXmlSecureResolver\",\n Justification = \"XmlDocument.XmlResolver is not available in core. Suppress until fxcop issue is fixed.\")]\n private static string GetEffectiveRunSettings(string runSettings, CommandLineOptions commandLineOptions)\n {\n var architecture = Constants.DefaultPlatform;\n\n if (commandLineOptions != null && commandLineOptions.ArchitectureSpecified)\n {\n architecture = commandLineOptions.TargetArchitecture;\n }\n\n var framework = Framework.DefaultFramework;\n\n if (commandLineOptions != null && commandLineOptions.FrameworkVersionSpecified)\n {\n framework = commandLineOptions.TargetFrameworkVersion;\n }\n\n var defaultResultsDirectory = Path.Combine(Directory.GetCurrentDirectory(), Constants.ResultsDirectoryName);\n\n using (var stream = new StringReader(runSettings))\n using (var reader = XmlReader.Create(stream, XmlRunSettingsUtilities.ReaderSettings))\n {\n var document = new XmlDocument();\n document.Load(reader);\n\n var navigator = document.CreateNavigator();\n\n InferRunSettingsHelper.UpdateRunSettingsWithUserProvidedSwitches(navigator, architecture, framework, defaultResultsDirectory);\n\n if (commandLineOptions != null && commandLineOptions.Parallel)\n {\n ParallelRunSettingsUtilities.UpdateRunSettingsWithParallelSettingIfNotConfigured(navigator);\n }\n\n return navigator.OuterXml;\n }\n }\n }\n}\n","lang_cluster":"C#","diff_tag":0,"review_comment":"","id":91} {"diff_hunk":"@@ -120,6 +120,7 @@ namespace Nethermind.DataMarketplace.Infrastructure\n EthJsonRpcClientProxy = ethJsonRpcClientProxy;\n HttpClient = httpClient;\n MonitoringService = monitoringService;\n+ BloomStorage = bloomStorage;\n }\n }\n }","source_code":"\/\/ Copyright (c) 2018 Demerzel Solutions Limited\n\/\/ This file is part of the Nethermind library.\n\/\/ \n\/\/ The Nethermind library is free software: you can redistribute it and\/or modify\n\/\/ it under the terms of the GNU Lesser General Public License as published by\n\/\/ the Free Software Foundation, either version 3 of the License, or\n\/\/ (at your option) any later version.\n\/\/ \n\/\/ The Nethermind library is distributed in the hope that it will be useful,\n\/\/ but WITHOUT ANY WARRANTY; without even the implied warranty of\n\/\/ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\/\/ GNU Lesser General Public License for more details.\n\/\/ \n\/\/ You should have received a copy of the GNU Lesser General Public License\n\/\/ along with the Nethermind. If not, see .\n\nusing Nethermind.Blockchain;\nusing Nethermind.Blockchain.Filters;\nusing Nethermind.Blockchain.Receipts;\nusing Nethermind.Blockchain.TxPools;\nusing Nethermind.Config;\nusing Nethermind.Core;\nusing Nethermind.Core.Crypto;\nusing Nethermind.Core.Specs;\nusing Nethermind.Crypto;\nusing Nethermind.Specs;\nusing Nethermind.DataMarketplace.Channels;\nusing Nethermind.DataMarketplace.Core;\nusing Nethermind.DataMarketplace.Core.Configs;\nusing Nethermind.DataMarketplace.Core.Services;\nusing Nethermind.DataMarketplace.Infrastructure.Persistence.Mongo;\nusing Nethermind.Facade.Proxy;\nusing Nethermind.Grpc;\nusing Nethermind.JsonRpc.Modules;\nusing Nethermind.KeyStore;\nusing Nethermind.Logging;\nusing Nethermind.Monitoring;\nusing Nethermind.Network;\nusing Nethermind.Serialization.Json;\nusing Nethermind.Store;\nusing Nethermind.Wallet;\n\nnamespace Nethermind.DataMarketplace.Infrastructure\n{\n public class NdmRequiredServices\n {\n public IConfigProvider ConfigProvider { get; }\n public IConfigManager ConfigManager { get; }\n public INdmConfig NdmConfig { get; }\n public string BaseDbPath { get; }\n public IDbProvider RocksProvider { get; }\n public IMongoProvider MongoProvider { get; }\n public ILogManager LogManager { get; }\n public IBlockTree BlockTree { get; }\n public ITxPool TransactionPool { get; }\n public ISpecProvider SpecProvider { get; }\n public IReceiptStorage ReceiptStorage { get; }\n public IFilterStore FilterStore { get; }\n public IFilterManager FilterManager { get; }\n public IWallet Wallet { get; }\n public ITimestamper Timestamper { get; }\n public IEthereumEcdsa Ecdsa { get; }\n public IKeyStore KeyStore { get; }\n public IRpcModuleProvider RpcModuleProvider { get; }\n public IJsonSerializer JsonSerializer { get; }\n public ICryptoRandom CryptoRandom { get; }\n public IEnode Enode { get; }\n public INdmConsumerChannelManager NdmConsumerChannelManager { get; }\n public INdmDataPublisher NdmDataPublisher { get; }\n public IGrpcServer GrpcServer { get; }\n public IEthRequestService EthRequestService { get; }\n public INdmNotifier Notifier { get; }\n public bool EnableUnsecuredDevWallet { get; }\n public IBlockProcessor BlockProcessor { get; }\n public IJsonRpcClientProxy JsonRpcClientProxy { get; }\n public IEthJsonRpcClientProxy EthJsonRpcClientProxy { get; }\n public IHttpClient HttpClient { get; }\n public IMonitoringService MonitoringService { get; }\n\n public NdmRequiredServices(IConfigProvider configProvider, IConfigManager configManager, INdmConfig ndmConfig,\n string baseDbPath, IDbProvider rocksProvider, IMongoProvider mongoProvider, ILogManager logManager,\n IBlockTree blockTree, ITxPool transactionPool, ISpecProvider specProvider, IReceiptStorage receiptStorage,\n IFilterStore filterStore, IFilterManager filterManager, IWallet wallet, ITimestamper timestamper,\n IEthereumEcdsa ecdsa, IKeyStore keyStore, IRpcModuleProvider rpcModuleProvider,\n IJsonSerializer jsonSerializer, ICryptoRandom cryptoRandom, IEnode enode,\n INdmConsumerChannelManager ndmConsumerChannelManager, INdmDataPublisher ndmDataPublisher,\n IGrpcServer grpcServer, IEthRequestService ethRequestService, INdmNotifier notifier,\n bool enableUnsecuredDevWallet, IBlockProcessor blockProcessor, IJsonRpcClientProxy jsonRpcClientProxy,\n IEthJsonRpcClientProxy ethJsonRpcClientProxy, IHttpClient httpClient, IMonitoringService monitoringService)\n {\n ConfigProvider = configProvider;\n ConfigManager = configManager;\n NdmConfig = ndmConfig;\n BaseDbPath = baseDbPath;\n RocksProvider = rocksProvider;\n MongoProvider = mongoProvider;\n LogManager = logManager;\n BlockTree = blockTree;\n TransactionPool = transactionPool;\n SpecProvider = specProvider;\n ReceiptStorage = receiptStorage;\n FilterStore = filterStore;\n FilterManager = filterManager;\n Wallet = wallet;\n Timestamper = timestamper;\n Ecdsa = ecdsa;\n KeyStore = keyStore;\n RpcModuleProvider = rpcModuleProvider;\n JsonSerializer = jsonSerializer;\n CryptoRandom = cryptoRandom;\n Enode = enode;\n NdmConsumerChannelManager = ndmConsumerChannelManager;\n NdmDataPublisher = ndmDataPublisher;\n GrpcServer = grpcServer;\n EthRequestService = ethRequestService;\n Notifier = notifier;\n EnableUnsecuredDevWallet = enableUnsecuredDevWallet;\n BlockProcessor = blockProcessor;\n JsonRpcClientProxy = jsonRpcClientProxy;\n EthJsonRpcClientProxy = ethJsonRpcClientProxy;\n HttpClient = httpClient;\n MonitoringService = monitoringService;\n }\n }\n}","lang_cluster":"C#","diff_tag":0,"review_comment":"","id":92} {"diff_hunk":"@@ -11,7 +11,6 @@ namespace Microsoft.VisualStudio.TestPlatform.CrossPlatEngine.DataCollection\n using Microsoft.VisualStudio.TestPlatform.CrossPlatEngine.DataCollection.Interfaces;\n using Microsoft.VisualStudio.TestPlatform.CrossPlatEngine.Helpers;\n using Microsoft.VisualStudio.TestPlatform.CrossPlatEngine.Helpers.Interfaces;\n- using Microsoft.VisualStudio.TestPlatform.ObjectModel;\n \n \/\/\/ \n \/\/\/ The datacollection launcher.","source_code":"\/\/ Copyright (c) Microsoft Corporation. All rights reserved.\n\/\/ Licensed under the MIT license. See LICENSE file in the project root for full license information.\n\nnamespace Microsoft.VisualStudio.TestPlatform.CrossPlatEngine.DataCollection\n{\n using System.Collections.Generic;\n using System.Diagnostics;\n using System.IO;\n using System.Reflection;\n\n using Microsoft.VisualStudio.TestPlatform.CrossPlatEngine.DataCollection.Interfaces;\n using Microsoft.VisualStudio.TestPlatform.CrossPlatEngine.Helpers;\n using Microsoft.VisualStudio.TestPlatform.CrossPlatEngine.Helpers.Interfaces;\n using Microsoft.VisualStudio.TestPlatform.ObjectModel;\n\n \/\/\/ \n \/\/\/ The datacollection launcher.\n \/\/\/ This works for Desktop local scenarios\n \/\/\/ <\/summary>\n internal class DataCollectionLauncher : IDataCollectionLauncher\n {\n private const string DataCollectorProcessName = \"datacollector.exe\";\n private const string DotnetProcessName = \"dotnet.exe\";\n private const string DotnetProcessNameXPlat = \"dotnet\";\n\n private string dataCollectorProcessName;\n private Process dataCollectorProcess;\n private IProcessHelper processHelper;\n \n \/\/\/ \n \/\/\/ The constructor.\n \/\/\/ <\/summary>\n public DataCollectionLauncher()\n : this(new ProcessHelper())\n {\n }\n\n \/\/\/ \n \/\/\/ Initializes a new instance of the class.\n \/\/\/ <\/summary>\n \/\/\/ \n \/\/\/ The process helper. \n \/\/\/ <\/param>\n internal DataCollectionLauncher(IProcessHelper processHelper)\n {\n this.processHelper = processHelper;\n this.dataCollectorProcess = null;\n }\n\n \/\/\/ \n \/\/\/ Initialize with desired architecture for the host\n \/\/\/ <\/summary>\n \/\/\/ architecture for the host<\/param>\n public void Initialize(Architecture architecture)\n {\n this.dataCollectorProcessName = DataCollectorProcessName;\n }\n\n \/\/\/ \n \/\/\/ Launches the test host for discovery\/execution.\n \/\/\/ <\/summary>\n \/\/\/ Environment variables for the process.<\/param>\n \/\/\/ The command line arguments to pass to the process.<\/param>\n \/\/\/ ProcessId of launched Process. 0 means not launched.<\/returns>\n public virtual int LaunchDataCollector(IDictionary environmentVariables, IList commandLineArguments)\n {\n var currentWorkingDirectory = Path.GetDirectoryName(typeof(DataCollectionLauncher).GetTypeInfo().Assembly.Location);\n string dataCollectorProcessPath, processWorkingDirectory = null;\n\n \/\/ TODO: DRY: Move this code to a common place\n \/\/ If we are running in the dotnet.exe context we do not want to launch dataCollector.exe but dotnet.exe with the dataCollector assembly. \n \/\/ Since dotnet.exe is already built for multiple platforms this would avoid building dataCollector.exe also in multiple platforms.\n var currentProcessFileName = this.processHelper.GetCurrentProcessFileName();\n if (currentProcessFileName.EndsWith(DotnetProcessName) || currentProcessFileName.EndsWith(DotnetProcessNameXPlat))\n {\n dataCollectorProcessPath = currentProcessFileName;\n var dataCollectorAssemblyPath = Path.Combine(currentWorkingDirectory, this.dataCollectorProcessName.Replace(\"exe\", \"dll\"));\n commandLineArguments.Insert(0, dataCollectorAssemblyPath);\n processWorkingDirectory = Path.GetDirectoryName(currentProcessFileName);\n }\n else\n {\n dataCollectorProcessPath = Path.Combine(currentWorkingDirectory, this.dataCollectorProcessName);\n \/\/ For IDEs and other scenario - Current directory should be the working directory - not the vstest.console.exe location\n \/\/ For VS - this becomes the solution directory for example\n \/\/ \"TestResults\" directory will be created at \"current directory\" of test host\n processWorkingDirectory = Directory.GetCurrentDirectory();\n }\n\n var argumentsString = string.Join(\" \", commandLineArguments);\n\n this.dataCollectorProcess = this.processHelper.LaunchProcess(dataCollectorProcessPath, argumentsString, processWorkingDirectory);\n return this.dataCollectorProcess.Id;\n }\n\n }\n}","lang_cluster":"C#","diff_tag":0,"review_comment":"","id":93} {"diff_hunk":"@@ -63,7 +63,7 @@ namespace Microsoft.AspNet.Server.Kestrel.Networking\n }\n catch (Exception ex)\n {\n- Trace.WriteLine(\"UvConnectRequest \" + ex.ToString());\n+ req._log.LogError(\"UvConnectRequest\", ex);\n }\n }\n }","source_code":"\/\/ Copyright (c) .NET Foundation. All rights reserved.\n\/\/ Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.\n\nusing System;\nusing System.Collections.Generic;\nusing System.Diagnostics;\nusing System.Runtime.InteropServices;\n\nnamespace Microsoft.AspNet.Server.Kestrel.Networking\n{\n \/\/\/ \n \/\/\/ Summary description for UvWriteRequest\n \/\/\/ <\/summary>\n public class UvConnectRequest : UvRequest\n {\n private readonly static Libuv.uv_connect_cb _uv_connect_cb = UvConnectCb;\n\n private Action _callback;\n private object _state;\n\n public void Init(UvLoopHandle loop)\n {\n var requestSize = loop.Libuv.req_size(Libuv.RequestType.CONNECT);\n CreateMemory(\n loop.Libuv,\n loop.ThreadId,\n requestSize);\n }\n\n public void Connect(\n UvPipeHandle pipe, \n string name, \n Action callback, \n object state)\n {\n _callback = callback;\n _state = state;\n\n Pin();\n Libuv.pipe_connect(this, pipe, name, _uv_connect_cb);\n }\n\n private static void UvConnectCb(IntPtr ptr, int status)\n {\n var req = FromIntPtr(ptr);\n req.Unpin();\n\n var callback = req._callback;\n req._callback = null;\n\n var state = req._state;\n req._state = null;\n\n Exception error = null;\n if (status < 0)\n {\n req.Libuv.Check(status, out error);\n }\n\n try\n {\n callback(req, status, error, state);\n }\n catch (Exception ex)\n {\n Trace.WriteLine(\"UvConnectRequest \" + ex.ToString());\n }\n }\n }\n}","lang_cluster":"C#","diff_tag":0,"review_comment":"","id":94} {"diff_hunk":"@@ -150,6 +150,20 @@ namespace NLog.LayoutRenderers.Wrappers\n }\n }\n \n+ bool InvalidateCachedValue(LogEventInfo logEvent)\n+ {\n+ var newCacheKey = CacheKey?.Render(logEvent);\n+ if (_cachedValue == null || _renderedCacheKey != newCacheKey || (_cachedValueTimeout.HasValue && logEvent.TimeStamp > _cachedValueExpires))\n+ {\n+ _renderedCacheKey = newCacheKey;\n+ if (_cachedValueTimeout.HasValue)\n+ _cachedValueExpires = logEvent.TimeStamp + _cachedValueTimeout.Value;\n+ return true;\n+ }\n+\n+ return false;\n+ }\n+\n \/\/\/ \n string IStringValueRenderer.GetFormattedString(LogEventInfo logEvent) => Cached ? RenderInner(logEvent) : null;\n }","source_code":"\/\/ \n\/\/ Copyright (c) 2004-2019 Jaroslaw Kowalski , Kim Christensen, Julian Verdurmen\n\/\/ \n\/\/ All rights reserved.\n\/\/ \n\/\/ Redistribution and use in source and binary forms, with or without \n\/\/ modification, are permitted provided that the following conditions \n\/\/ are met:\n\/\/ \n\/\/ * Redistributions of source code must retain the above copyright notice, \n\/\/ this list of conditions and the following disclaimer. \n\/\/ \n\/\/ * Redistributions in binary form must reproduce the above copyright notice,\n\/\/ this list of conditions and the following disclaimer in the documentation\n\/\/ and\/or other materials provided with the distribution. \n\/\/ \n\/\/ * Neither the name of Jaroslaw Kowalski nor the names of its \n\/\/ contributors may be used to endorse or promote products derived from this\n\/\/ software without specific prior written permission. \n\/\/ \n\/\/ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n\/\/ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE \n\/\/ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE \n\/\/ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE \n\/\/ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR \n\/\/ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n\/\/ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS \n\/\/ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN \n\/\/ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) \n\/\/ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF \n\/\/ THE POSSIBILITY OF SUCH DAMAGE.\n\/\/ \n\nnamespace NLog.LayoutRenderers.Wrappers\n{\n using System;\n using System.ComponentModel;\n using NLog.Config;\n using NLog.Internal;\n using NLog.Layouts;\n\n \/\/\/ \n \/\/\/ Applies caching to another layout output.\n \/\/\/ <\/summary>\n \/\/\/ \n \/\/\/ The value of the inner layout will be rendered only once and reused subsequently.\n \/\/\/ <\/remarks>\n [LayoutRenderer(\"cached\")]\n [AmbientProperty(\"Cached\")]\n [AmbientProperty(\"ClearCache\")]\n [ThreadAgnostic]\n public sealed class CachedLayoutRendererWrapper : WrapperLayoutRendererBase, IStringValueRenderer\n {\n \/\/\/ \n \/\/\/ A value indicating when the cache is cleared.\n \/\/\/ <\/summary>\n [Flags]\n public enum ClearCacheOption \n { \n \/\/\/ Never clear the cache.<\/summary>\n None = 0,\n \/\/\/ Clear the cache whenever the is initialized.<\/summary>\n OnInit = 1,\n \/\/\/ Clear the cache whenever the is closed.<\/summary>\n OnClose = 2\n }\n\n private string _cachedValue;\n private string _renderedCacheKey;\n\n \/\/\/ \n \/\/\/ Initializes a new instance of the class.\n \/\/\/ <\/summary>\n public CachedLayoutRendererWrapper()\n {\n Cached = true;\n ClearCache = ClearCacheOption.OnInit | ClearCacheOption.OnClose;\n }\n\n \/\/\/ \n \/\/\/ Gets or sets a value indicating whether this is enabled.\n \/\/\/ <\/summary>\n \/\/\/ \n [DefaultValue(true)]\n public bool Cached { get; set; }\n\n \/\/\/ \n \/\/\/ Gets or sets a value indicating when the cache is cleared.\n \/\/\/ <\/summary>\n \/\/\/ \n public ClearCacheOption ClearCache { get; set; }\n\n \/\/\/ \n \/\/\/ Cachekey. If the cachekey changes, resets the value. For example, the cachekey would be the current day.s\n \/\/\/ <\/summary>\n \/\/\/ \n public Layout CacheKey { get; set; }\n\n \/\/\/ \n \/\/\/ Initializes the layout renderer.\n \/\/\/ <\/summary>\n protected override void InitializeLayoutRenderer()\n {\n base.InitializeLayoutRenderer();\n if ((ClearCache & ClearCacheOption.OnInit) == ClearCacheOption.OnInit)\n _cachedValue = null;\n }\n\n \/\/\/ \n \/\/\/ Closes the layout renderer.\n \/\/\/ <\/summary>\n protected override void CloseLayoutRenderer()\n {\n base.CloseLayoutRenderer();\n if ((ClearCache & ClearCacheOption.OnClose) == ClearCacheOption.OnClose)\n _cachedValue = null;\n }\n\n \/\/\/ \n \/\/\/ Transforms the output of another layout.\n \/\/\/ <\/summary>\n \/\/\/ Output to be transform.<\/param>\n \/\/\/ Transformed text.<\/returns>\n protected override string Transform(string text)\n {\n return text;\n }\n\n \/\/\/ \n \/\/\/ Renders the inner layout contents.\n \/\/\/ <\/summary>\n \/\/\/ The log event.<\/param>\n \/\/\/ Contents of inner layout.<\/returns>\n protected override string RenderInner(LogEventInfo logEvent)\n {\n if (Cached)\n {\n var newCacheKey = CacheKey?.Render(logEvent);\n if (_cachedValue == null || _renderedCacheKey != newCacheKey)\n {\n _cachedValue = base.RenderInner(logEvent);\n _renderedCacheKey = newCacheKey;\n }\n\n return _cachedValue;\n }\n else\n {\n return base.RenderInner(logEvent);\n }\n }\n\n \/\/\/ \n string IStringValueRenderer.GetFormattedString(LogEventInfo logEvent) => Cached ? RenderInner(logEvent) : null;\n }\n}\n","lang_cluster":"C#","diff_tag":0,"review_comment":"","id":95} {"diff_hunk":"@@ -9,6 +9,7 @@ using Microsoft.AspNetCore.Builder;\n using Microsoft.AspNetCore.Hosting;\n using Microsoft.AspNetCore.Http;\n using Microsoft.AspNetCore.Server.Kestrel.Transport.Abstractions.Internal;\n+using Microsoft.Extensions.Configuration;\n using Microsoft.Extensions.Logging;\n \n namespace SampleApp","source_code":"\/\/ Copyright (c) .NET Foundation. All rights reserved.\n\/\/ Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.\n\nusing System;\nusing System.IO;\nusing System.Net;\nusing System.Threading.Tasks;\nusing Microsoft.AspNetCore.Builder;\nusing Microsoft.AspNetCore.Hosting;\nusing Microsoft.AspNetCore.Http;\nusing Microsoft.AspNetCore.Server.Kestrel.Transport.Abstractions.Internal;\nusing Microsoft.Extensions.Logging;\n\nnamespace SampleApp\n{\n public class Startup\n {\n public void Configure(IApplicationBuilder app, ILoggerFactory loggerFactory)\n {\n var logger = loggerFactory.CreateLogger(\"Default\");\n\n app.Run(async context =>\n {\n var connectionFeature = context.Connection;\n logger.LogDebug($\"Peer: {connectionFeature.RemoteIpAddress?.ToString()}:{connectionFeature.RemotePort}\"\n + $\"{Environment.NewLine}\"\n + $\"Sock: {connectionFeature.LocalIpAddress?.ToString()}:{connectionFeature.LocalPort}\");\n\n var response = $\"hello, world{Environment.NewLine}\";\n context.Response.ContentLength = response.Length;\n context.Response.ContentType = \"text\/plain\";\n await context.Response.WriteAsync(response);\n });\n }\n\n public static void Main(string[] args)\n {\n TaskScheduler.UnobservedTaskException += (sender, e) =>\n {\n Console.WriteLine(\"Unobserved exception: {0}\", e.Exception);\n };\n\n var host = new WebHostBuilder()\n .ConfigureLogging((_, factory) =>\n {\n factory.AddConsole();\n })\n .UseKestrel(options =>\n {\n \/\/ Run callbacks on the transport thread\n options.ApplicationSchedulingMode = SchedulingMode.Inline;\n\n options.Listen(IPAddress.Loopback, 5000, listenOptions =>\n {\n \/\/ Uncomment the following to enable Nagle's algorithm for this endpoint.\n \/\/listenOptions.NoDelay = false;\n\n listenOptions.UseConnectionLogging();\n });\n\n options.Listen(IPAddress.Loopback, 5001, listenOptions =>\n {\n listenOptions.UseHttps(\"testCert.pfx\", \"testPassword\");\n listenOptions.UseConnectionLogging();\n });\n\n options.UseSystemd();\n\n \/\/ The following section should be used to demo sockets\n \/\/options.ListenUnixSocket(\"\/tmp\/kestrel-test.sock\");\n })\n .UseLibuv(options =>\n {\n \/\/ Uncomment the following line to change the default number of libuv threads for all endpoints.\n \/\/ options.ThreadCount = 4;\n })\n .UseContentRoot(Directory.GetCurrentDirectory())\n .UseStartup()\n .Build();\n\n host.Run();\n }\n }\n}","lang_cluster":"C#","diff_tag":0,"review_comment":"","id":96} {"diff_hunk":"@@ -61,7 +61,7 @@ namespace OpenTelemetry.Trace\n if (activity.IsAllDataRequested)\n {\n activity.SetResource(this.resource);\n- this.activityProcessor.OnStart(activity);\n+ this.activityProcessor?.OnStart(activity);\n }\n }\n ","source_code":"\ufeff\/\/ \n\/\/ Copyright The OpenTelemetry Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/ <\/copyright>\n\nusing System.Diagnostics;\nusing OpenTelemetry.Resources;\n\nnamespace OpenTelemetry.Trace\n{\n \/\/\/ \n \/\/\/ This class encapsulates the logic for performing ActivitySource actions\n \/\/\/ on Activities that are created using default ActivitySource.\n \/\/\/ All activities created without using ActivitySource will have a\n \/\/\/ default ActivitySource assigned to them with their name as empty string.\n \/\/\/ This class is to be used by instrumentation adapters which converts\/augments\n \/\/\/ activies created without ActivitySource, into something which closely\n \/\/\/ matches the one created using ActivitySource.\n \/\/\/ <\/summary>\n \/\/\/ \n \/\/\/ This class is meant to be only used when writing new Instrumentation for\n \/\/\/ libraries which are already instrumented with DiagnosticSource\/Activity\n \/\/\/ following this doc:\n \/\/\/ https:\/\/github.com\/dotnet\/runtime\/blob\/master\/src\/libraries\/System.Diagnostics.DiagnosticSource\/src\/ActivityUserGuide.md.\n \/\/\/ <\/remarks>\n public class ActivitySourceAdapter\n {\n private readonly Sampler sampler;\n private readonly Resource resource;\n private ActivityProcessor activityProcessor;\n\n internal ActivitySourceAdapter(Sampler sampler, ActivityProcessor activityProcessor, Resource resource)\n {\n this.sampler = sampler;\n this.activityProcessor = activityProcessor;\n this.resource = resource;\n }\n\n private ActivitySourceAdapter()\n {\n }\n\n \/\/\/ \n \/\/\/ Method that starts an .\n \/\/\/ <\/summary>\n \/\/\/ to be started.<\/param>\n public void Start(Activity activity)\n {\n this.RunGetRequestedData(activity);\n if (activity.IsAllDataRequested)\n {\n activity.SetResource(this.resource);\n this.activityProcessor.OnStart(activity);\n }\n }\n\n \/\/\/ \n \/\/\/ Method that stops an .\n \/\/\/ <\/summary>\n \/\/\/ to be stopped.<\/param>\n public void Stop(Activity activity)\n {\n if (activity.IsAllDataRequested)\n {\n this.activityProcessor.OnEnd(activity);\n }\n }\n\n internal void UpdateProcessor(ActivityProcessor processor)\n {\n this.activityProcessor = processor;\n }\n\n private void RunGetRequestedData(Activity activity)\n {\n ActivityContext parentContext;\n if (string.IsNullOrEmpty(activity.ParentId))\n {\n parentContext = default;\n }\n else if (activity.Parent != null)\n {\n parentContext = activity.Parent.Context;\n }\n else\n {\n parentContext = new ActivityContext(\n activity.TraceId,\n activity.ParentSpanId,\n activity.ActivityTraceFlags,\n activity.TraceStateString,\n isRemote: true);\n }\n\n var samplingParameters = new SamplingParameters(\n parentContext,\n activity.TraceId,\n activity.DisplayName,\n activity.Kind,\n activity.TagObjects,\n activity.Links);\n\n var samplingResult = this.sampler.ShouldSample(samplingParameters);\n\n switch (samplingResult.Decision)\n {\n case SamplingDecision.NotRecord:\n activity.IsAllDataRequested = false;\n break;\n case SamplingDecision.Record:\n activity.IsAllDataRequested = true;\n break;\n case SamplingDecision.RecordAndSampled:\n activity.IsAllDataRequested = true;\n activity.ActivityTraceFlags |= ActivityTraceFlags.Recorded;\n break;\n }\n }\n }\n}\n","lang_cluster":"C#","diff_tag":0,"review_comment":"","id":97} {"diff_hunk":"@@ -33,7 +33,7 @@ namespace Nethermind.JsonRpc.Modules\n \n private List _modules = new();\n private List _enabledModules = new();\n- \n+\n private Dictionary _methods\n = new(StringComparer.InvariantCulture);\n ","source_code":"\/\/ Copyright (c) 2021 Demerzel Solutions Limited\n\/\/ This file is part of the Nethermind library.\n\/\/ \n\/\/ The Nethermind library is free software: you can redistribute it and\/or modify\n\/\/ it under the terms of the GNU Lesser General Public License as published by\n\/\/ the Free Software Foundation, either version 3 of the License, or\n\/\/ (at your option) any later version.\n\/\/ \n\/\/ The Nethermind library is distributed in the hope that it will be useful,\n\/\/ but WITHOUT ANY WARRANTY; without even the implied warranty of\n\/\/ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\/\/ GNU Lesser General Public License for more details.\n\/\/ \n\/\/ You should have received a copy of the GNU Lesser General Public License\n\/\/ along with the Nethermind. If not, see .\n\nusing System;\nusing System.Collections.Generic;\nusing System.IO;\nusing System.IO.Abstractions;\nusing System.Linq;\nusing System.Reflection;\nusing System.Threading.Tasks;\nusing Nethermind.Logging;\nusing Newtonsoft.Json;\n\nnamespace Nethermind.JsonRpc.Modules\n{\n public class RpcModuleProvider : IRpcModuleProvider\n {\n private ILogger _logger;\n private IJsonRpcConfig _jsonRpcConfig;\n \n private List _modules = new();\n private List _enabledModules = new();\n \n private Dictionary _methods\n = new(StringComparer.InvariantCulture);\n \n private readonly Dictionary> RentModule, Action ReturnModule)> _pools\n = new();\n \n private IRpcMethodFilter _filter = NullRpcMethodFilter.Instance;\n\n public RpcModuleProvider(IFileSystem fileSystem, IJsonRpcConfig jsonRpcConfig, ILogManager logManager)\n {\n _logger = logManager?.GetClassLogger() ?? throw new ArgumentNullException(nameof(logManager));\n _jsonRpcConfig = jsonRpcConfig ?? throw new ArgumentNullException(nameof(jsonRpcConfig));\n if (fileSystem.File.Exists(_jsonRpcConfig.CallsFilterFilePath))\n {\n if(_logger.IsWarn) _logger.Warn(\"Applying JSON RPC filter.\");\n _filter = new RpcMethodFilter(_jsonRpcConfig.CallsFilterFilePath, fileSystem, _logger);\n }\n }\n\n public IReadOnlyCollection Converters { get; } = new List();\n\n public IReadOnlyCollection Enabled => _enabledModules;\n\n public IReadOnlyCollection All => _modules;\n\n public void Register(IRpcModulePool pool) where T : IRpcModule\n {\n RpcModuleAttribute attribute = typeof(T).GetCustomAttribute();\n if (attribute == null)\n {\n if(_logger.IsWarn) _logger.Warn(\n $\"Cannot register {typeof(T).Name} as a JSON RPC module because it does not have a {nameof(RpcModuleAttribute)} applied.\");\n return;\n }\n \n string moduleType = attribute.ModuleType;\n\n _pools[moduleType] = (async canBeShared => await pool.GetModule(canBeShared), m => pool.ReturnModule((T) m));\n _modules.Add(moduleType);\n\n ((List) Converters).AddRange(pool.Factory.GetConverters());\n\n foreach ((string name, (MethodInfo info, bool readOnly, RpcEndpoint availability)) in GetMethodDict(typeof(T)))\n {\n ResolvedMethodInfo resolvedMethodInfo = new(moduleType, info, readOnly, availability);\n if (_filter.AcceptMethod(resolvedMethodInfo.ToString()))\n {\n _methods[name] = resolvedMethodInfo;\n }\n }\n\n if (_jsonRpcConfig.EnabledModules.Contains(moduleType, StringComparer.InvariantCultureIgnoreCase))\n {\n _enabledModules.Add(moduleType);\n }\n }\n\n public ModuleResolution Check(string methodName, RpcEndpoint rpcEndpoint)\n {\n if (!_methods.TryGetValue(methodName, out ResolvedMethodInfo result)) return ModuleResolution.Unknown;\n\n if ((result.Availability & rpcEndpoint) == RpcEndpoint.None) return ModuleResolution.EndpointDisabled;\n \n return _enabledModules.Contains(result.ModuleType) ? ModuleResolution.Enabled : ModuleResolution.Disabled;\n }\n\n public (MethodInfo, bool) Resolve(string methodName)\n {\n if (!_methods.TryGetValue(methodName, out ResolvedMethodInfo result)) return (null, false);\n\n return (result.MethodInfo, result.ReadOnly);\n }\n\n public Task Rent(string methodName, bool canBeShared)\n {\n if (!_methods.TryGetValue(methodName, out ResolvedMethodInfo result)) return null;\n\n return _pools[result.ModuleType].RentModule(canBeShared);\n }\n\n public void Return(string methodName, IRpcModule rpcModule)\n {\n if (!_methods.TryGetValue(methodName, out ResolvedMethodInfo result))\n throw new InvalidOperationException(\"Not possible to return an unresolved module\");\n\n _pools[result.ModuleType].ReturnModule(rpcModule);\n }\n\n private IDictionary GetMethodDict(Type type)\n {\n var methods = type.GetMethods(BindingFlags.Public | BindingFlags.Instance | BindingFlags.DeclaredOnly);\n return methods.ToDictionary(\n x => x.Name.Trim(),\n x =>\n {\n JsonRpcMethodAttribute? jsonRpcMethodAttribute = x.GetCustomAttribute();\n return (x, jsonRpcMethodAttribute?.IsSharable ?? true, jsonRpcMethodAttribute?.Availability ?? RpcEndpoint.All);\n });\n }\n \n private class ResolvedMethodInfo\n {\n public ResolvedMethodInfo(\n string moduleType,\n MethodInfo methodInfo,\n bool readOnly,\n RpcEndpoint availability)\n {\n ModuleType = moduleType;\n MethodInfo = methodInfo;\n ReadOnly = readOnly;\n Availability = availability;\n }\n \n public string ModuleType { get; }\n public MethodInfo MethodInfo { get; }\n public bool ReadOnly { get; }\n public RpcEndpoint Availability { get; }\n\n public override string ToString()\n {\n return MethodInfo.Name;\n }\n }\n }\n}\n","lang_cluster":"C#","diff_tag":0,"review_comment":"","id":98} {"diff_hunk":"@@ -40,12 +40,12 @@ namespace Microsoft.CodeAnalysis.Sarif\n File.WriteAllText(outputFileName, sarif);\n }\n \n- if (inputFileName == null) { throw new ArgumentNullException(\"inputFileName\"); };\n- if (outputFileName == null) { throw new ArgumentNullException(\"outputFileName\"); };\n+ if (inputFileName == null) { throw new ArgumentNullException(nameof(inputFileName)); };\n+ if (outputFileName == null) { throw new ArgumentNullException(nameof(outputFileName)); };\n \n if (Directory.Exists(outputFileName))\n {\n- throw new ArgumentException(\"Specified file output path exists but is a directory.\", \"outputFileName\");\n+ throw new ArgumentException(\"Specified file output path exists but is a directory.\", nameof(outputFileName));\n }\n \n if (!conversionOptions.HasFlag(ToolFormatConversionOptions.OverwriteExistingOutputFile) && File.Exists(outputFileName))","source_code":"\ufeff\/\/ Copyright (c) Microsoft. All rights reserved.\n\/\/ Licensed under the MIT license. See LICENSE file in the project root for full license information.\n\nusing System;\nusing System.Collections.Generic;\nusing System.IO;\nusing System.Threading;\nusing Newtonsoft.Json;\nusing Microsoft.CodeAnalysis.Sarif.Converters;\nusing Microsoft.CodeAnalysis.Sarif.Writers;\nusing System.Runtime.InteropServices;\n\nnamespace Microsoft.CodeAnalysis.Sarif\n{\n \/\/\/ \n \/\/\/ A class that provides helpers for converting a log file produced by \n \/\/\/ one of a well-known set of tools to the SARIF format.\n \/\/\/ <\/summary>\n public class ToolFormatConverter\n {\n \/\/\/ Converts a tool log file into the SARIF format.<\/summary>\n \/\/\/ Thrown when one or more required arguments are null.<\/exception>\n \/\/\/ Thrown when one or more arguments have unsupported or\n \/\/\/ illegal values.<\/exception>\n \/\/\/ Thrown when the requested operation is invalid.<\/exception>\n \/\/\/ The tool format of the input file.<\/param>\n \/\/\/ The input log file name.<\/param>\n \/\/\/ The name of the file to which the resulting SARIF log shall be\n \/\/\/ written. This cannot be a directory.<\/param>\n \/\/\/ Options for controlling the conversion.<\/param>\n public void ConvertToStandardFormat(\n ToolFormat toolFormat,\n string inputFileName,\n string outputFileName,\n ToolFormatConversionOptions conversionOptions)\n {\n if (toolFormat == ToolFormat.PREfast)\n {\n string sarif = ConvertPREfastToStandardFormat(inputFileName);\n File.WriteAllText(outputFileName, sarif);\n }\n\n if (inputFileName == null) { throw new ArgumentNullException(\"inputFileName\"); };\n if (outputFileName == null) { throw new ArgumentNullException(\"outputFileName\"); };\n\n if (Directory.Exists(outputFileName))\n {\n throw new ArgumentException(\"Specified file output path exists but is a directory.\", \"outputFileName\");\n }\n\n if (!conversionOptions.HasFlag(ToolFormatConversionOptions.OverwriteExistingOutputFile) && File.Exists(outputFileName))\n {\n throw new InvalidOperationException(\"Output file already exists and option to overwrite was not specified.\");\n }\n\n \/\/ FileMode settings here will results in an exception being raised if the input \n \/\/ file does not exist, and that an existing output file will be overwritten\n using (var input = File.OpenRead(inputFileName))\n using (var outputTextStream = File.Create(outputFileName))\n using (var outputTextWriter = new StreamWriter(outputTextStream))\n using (var outputJson = new JsonTextWriter(outputTextWriter))\n {\n if (conversionOptions.HasFlag(ToolFormatConversionOptions.PrettyPrint))\n {\n outputJson.Formatting = Formatting.Indented;\n }\n\n using (var output = new ResultLogJsonWriter(outputJson))\n {\n ConvertToStandardFormat(toolFormat, input, output);\n }\n }\n }\n\n \/\/\/ Converts a tool log file to the SARIF format.<\/summary>\n \/\/\/ The tool format of the input file.<\/param>\n \/\/\/ The input log file name.<\/param>\n \/\/\/ The name of the file to which the resulting SARIF log shall be\n \/\/\/ written. This cannot be a directory.<\/param>\n public void ConvertToStandardFormat(\n ToolFormat toolFormat,\n string inputFileName,\n string outputFileName)\n {\n if (toolFormat == ToolFormat.PREfast)\n {\n string sarif = ConvertPREfastToStandardFormat(inputFileName);\n File.WriteAllText(outputFileName, sarif);\n return;\n }\n\n ConvertToStandardFormat(toolFormat, inputFileName, outputFileName, ToolFormatConversionOptions.None);\n }\n\n \/\/\/ Converts a tool log file represented as a stream into the SARIF format.<\/summary>\n \/\/\/ Thrown when one or more required arguments are null.<\/exception>\n \/\/\/ Thrown when one or more arguments have unsupported or\n \/\/\/ illegal values.<\/exception>\n \/\/\/ The tool format of the input file.<\/param>\n \/\/\/ A stream that contains tool log contents.<\/param>\n \/\/\/ A stream to which the converted output should be written.<\/param>\n public void ConvertToStandardFormat(\n ToolFormat toolFormat,\n Stream inputStream,\n IResultLogWriter outputStream)\n {\n if (toolFormat == ToolFormat.PREfast)\n {\n throw new ArgumentException(\"Cannot convert PREfast XML from stream. Call ConvertPREfastToStandardFormat helper instead.\");\n };\n\n if (inputStream == null) { throw new ArgumentNullException(\"inputStream\"); };\n if (outputStream == null) { throw new ArgumentNullException(\"outputStream\"); };\n\n Lazy converter;\n if (_converters.TryGetValue(toolFormat, out converter))\n {\n converter.Value.Convert(inputStream, outputStream);\n }\n else\n {\n throw new ArgumentException(\"Unrecognized tool specified: \" + toolFormat.ToString(), \"toolFormat\");\n }\n }\n\n private readonly IDictionary> _converters = CreateConverterRecords();\n\n private static Dictionary> CreateConverterRecords()\n {\n var result = new Dictionary>();\n CreateConverterRecord(result, ToolFormat.AndroidStudio);\n CreateConverterRecord(result, ToolFormat.CppCheck);\n CreateConverterRecord(result, ToolFormat.ClangAnalyzer);\n CreateConverterRecord(result, ToolFormat.Fortify);\n CreateConverterRecord(result, ToolFormat.FxCop);\n return result;\n }\n\n private static void CreateConverterRecord(IDictionary> dict, ToolFormat format)\n where T : IToolFileConverter, new()\n {\n dict.Add(format, new Lazy(() => new T(), LazyThreadSafetyMode.ExecutionAndPublication));\n }\n\n \/\/\/ Converts a legacy PREfast XML log file into the SARIF format.<\/summary>\n \/\/\/ Thrown when one or more required arguments are null.<\/exception>\n \/\/\/ Thrown when one or more arguments have unsupported or\n \/\/\/ illegal values.<\/exception>\n \/\/\/ Thrown when the requested operation is invalid.<\/exception>\n \/\/\/ The tool format of the input file.<\/param>\n \/\/\/ The input log file name.<\/param>\n \/\/\/ The converted PREfast log file in SARIF format.<\/returns>\n public static string ConvertPREfastToStandardFormat(string inputFileName)\n {\n if (inputFileName == null) { throw new ArgumentNullException(\"inputStream\"); };\n\n return ConvertToSarif(inputFileName);\n }\n\n [return: MarshalAs(UnmanagedType.BStr)]\n [DllImport(\"PREfastXmlSarifConverter\", CallingConvention = CallingConvention.StdCall, CharSet = CharSet.Unicode)]\n private static extern string ConvertToSarif([MarshalAs(UnmanagedType.BStr)][In]string prefastFilePath);\n }\n}\n","lang_cluster":"C#","diff_tag":0,"review_comment":"","id":99} {"diff_hunk":"@@ -17,6 +17,7 @@\n using System;\n using System.Collections.Concurrent;\n using System.Collections.Generic;\n+using System.Runtime.CompilerServices;\n \n namespace OpenTelemetry.Context\n {","source_code":"\ufeff\/\/ \r\n\/\/ Copyright The OpenTelemetry Authors\r\n\/\/\r\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\r\n\/\/ you may not use this file except in compliance with the License.\r\n\/\/ You may obtain a copy of the License at\r\n\/\/\r\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\r\n\/\/\r\n\/\/ Unless required by applicable law or agreed to in writing, software\r\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\r\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n\/\/ See the License for the specific language governing permissions and\r\n\/\/ limitations under the License.\r\n\/\/ <\/copyright>\r\n\r\nusing System;\r\nusing System.Collections.Concurrent;\r\nusing System.Collections.Generic;\r\n\r\nnamespace OpenTelemetry.Context\r\n{\r\n \/\/\/ \r\n \/\/\/ Generic runtime context management API.\r\n \/\/\/ <\/summary>\r\n public sealed class RuntimeContext\r\n {\r\n private static readonly ConcurrentDictionary Slots = new ConcurrentDictionary();\r\n\r\n \/\/\/ \r\n \/\/\/ Gets or sets the actual context carrier implementation.\r\n \/\/\/ <\/summary>\r\n#if !NET452\r\n public static Type ContextSlotType { get; set; } = typeof(AsyncLocalRuntimeContextSlot<>);\r\n#else\r\n public static Type ContextSlotType { get; set; } = typeof(RemotingRuntimeContextSlot<>);\r\n#endif\r\n\r\n \/\/\/ \r\n \/\/\/ Register a named context slot.\r\n \/\/\/ <\/summary>\r\n \/\/\/ The name of the context slot.<\/param>\r\n \/\/\/ The type of the underlying value.<\/typeparam>\r\n public static void RegisterSlot(string name)\r\n {\r\n lock (Slots)\r\n {\r\n if (Slots.ContainsKey(name))\r\n {\r\n throw new InvalidOperationException($\"The context slot {name} is already registered.\");\r\n }\r\n\r\n var type = ContextSlotType.MakeGenericType(typeof(T));\r\n var ctor = type.GetConstructor(new Type[] { typeof(string) });\r\n Slots[name] = ctor.Invoke(new object[] { name });\r\n }\r\n }\r\n\r\n \/*\r\n public static void Apply(IDictionary snapshot)\r\n {\r\n foreach (var entry in snapshot)\r\n {\r\n \/\/ TODO: revisit this part if we want Snapshot() to be used on critical paths\r\n dynamic value = entry.Value;\r\n SetValue(entry.Key, value);\r\n }\r\n }\r\n\r\n public static IDictionary Snapshot()\r\n {\r\n var retval = new Dictionary();\r\n foreach (var entry in Slots)\r\n {\r\n \/\/ TODO: revisit this part if we want Snapshot() to be used on critical paths\r\n dynamic slot = entry.Value;\r\n retval[entry.Key] = slot.Get();\r\n }\r\n return retval;\r\n }\r\n *\/\r\n\r\n \/\/\/ \r\n \/\/\/ Sets the value to a registered slot.\r\n \/\/\/ <\/summary>\r\n \/\/\/ The name of the context slot.<\/param>\r\n \/\/\/ The value to be set.<\/param>\r\n \/\/\/ The type of the value.<\/typeparam>\r\n public static void SetValue(string name, T value)\r\n {\r\n var slot = (RuntimeContextSlot)Slots[name];\r\n slot.Set(value);\r\n }\r\n\r\n \/\/\/ \r\n \/\/\/ Gets the value from a registered slot.\r\n \/\/\/ <\/summary>\r\n \/\/\/ The name of the context slot.<\/param>\r\n \/\/\/ The type of the value.<\/typeparam>\r\n \/\/\/ The value retrieved from the context slot.<\/returns>\r\n public static T GetValue(string name)\r\n {\r\n var slot = (RuntimeContextSlot)Slots[name];\r\n return slot.Get();\r\n }\r\n\r\n \/\/ For testing purpose\r\n \/\/ private static Clear\r\n }\r\n}\r\n","lang_cluster":"C#","diff_tag":0,"review_comment":"","id":100} {"diff_hunk":"@@ -47,13 +47,23 @@ def copy_files(use_gpu=False):\n \n \n def clear_path(path):\n- contents = os.listdir(path)\n- for file in contents:\n- file_path = os.path.join(path, file)\n- if os.path.isfile(file_path):\n- os.remove(file_path)\n- else:\n- shutil.rmtree(file_path)\n+ if os.path.isdir(path):\n+ contents = os.listdir(path)\n+ for file in contents:\n+ file_path = os.path.join(path, file)\n+ if os.path.isfile(file_path):\n+ os.remove(file_path)\n+ else:\n+ shutil.rmtree(file_path)\n+\n+\n+def silent_call(cmd):\n+ try:\n+ with open(os.devnull, \"w\") as shut_up:\n+ subprocess.check_output(cmd, stderr=shut_up)\n+ return 0\n+ except Exception:\n+ return 1\n \n \n def compile_cpp(use_mingw=False, use_gpu=False):","source_code":"# coding: utf-8\n# pylint: disable=invalid-name, exec-used, C0111\n\"\"\"Setup lightgbm package.\"\"\"\nfrom __future__ import absolute_import\n\nimport distutils\nimport os\nimport shutil\nimport struct\nimport sys\n\nfrom setuptools import find_packages, setup\nfrom setuptools.command.install import install\nfrom setuptools.command.install_lib import install_lib\nfrom setuptools.command.sdist import sdist\n\n\ndef find_lib():\n CURRENT_DIR = os.path.dirname(__file__)\n libpath_py = os.path.join(CURRENT_DIR, 'lightgbm\/libpath.py')\n libpath = {'__file__': libpath_py}\n exec(compile(open(libpath_py, \"rb\").read(), libpath_py, 'exec'), libpath, libpath)\n\n LIB_PATH = [os.path.relpath(path, CURRENT_DIR) for path in libpath['find_lib_path']()]\n print(\"Install lib_lightgbm from: %s\" % LIB_PATH)\n return LIB_PATH\n\n\ndef copy_files(use_gpu=False):\n\n def copy_files_helper(folder_name):\n src = os.path.join('..', folder_name)\n if os.path.exists(src):\n dst = os.path.join('.\/lightgbm', folder_name)\n shutil.rmtree(dst, ignore_errors=True)\n distutils.dir_util.copy_tree(src, dst)\n else:\n raise Exception('Cannot copy {} folder'.format(src))\n\n if not os.path.isfile('.\/_IS_SOURCE_PACKAGE.txt'):\n copy_files_helper('include')\n copy_files_helper('src')\n if use_gpu:\n copy_files_helper('compute')\n distutils.file_util.copy_file(\"..\/CMakeLists.txt\", \".\/lightgbm\/\")\n distutils.file_util.copy_file(\"..\/LICENSE\", \".\/\")\n\n\ndef clear_path(path):\n contents = os.listdir(path)\n for file in contents:\n file_path = os.path.join(path, file)\n if os.path.isfile(file_path):\n os.remove(file_path)\n else:\n shutil.rmtree(file_path)\n\n\ndef compile_cpp(use_mingw=False, use_gpu=False):\n\n if os.path.exists(\"build_cpp\"):\n shutil.rmtree(\"build_cpp\")\n os.makedirs(\"build_cpp\")\n os.chdir(\"build_cpp\")\n\n cmake_cmd = \"cmake \"\n build_cmd = \"make _lightgbm\"\n if use_gpu:\n cmake_cmd += \" -DUSE_GPU=ON \"\n if os.name == \"nt\":\n if use_mingw:\n cmake_cmd += \" -G \\\"MinGW Makefiles\\\" \"\n os.system(cmake_cmd + \" ..\/lightgbm\/\")\n build_cmd = \"mingw32-make.exe _lightgbm\"\n else:\n vs_versions = [\"Visual Studio 15 2017 Win64\", \"Visual Studio 14 2015 Win64\", \"Visual Studio 12 2013 Win64\"]\n try_vs = 1\n for vs in vs_versions:\n tmp_cmake_cmd = \"%s -G \\\"%s\\\"\" % (cmake_cmd, vs)\n try_vs = os.system(tmp_cmake_cmd + \" ..\/lightgbm\/\")\n if try_vs == 0:\n cmake_cmd = tmp_cmake_cmd\n break\n else:\n clear_path(\".\/\")\n if try_vs != 0:\n raise Exception('Please install Visual Studio or MS Build first')\n\n build_cmd = \"cmake --build . --target _lightgbm --config Release\"\n print(\"Start to compile library.\")\n os.system(cmake_cmd + \" ..\/lightgbm\/\")\n os.system(build_cmd)\n os.chdir(\"..\")\n\n\nclass CustomInstallLib(install_lib):\n\n def install(self):\n outfiles = install_lib.install(self)\n src = find_lib()[0]\n dst = os.path.join(self.install_dir, 'lightgbm')\n dst, _ = self.copy_file(src, dst)\n outfiles.append(dst)\n return outfiles\n\n\nclass CustomInstall(install):\n\n user_options = install.user_options + [\n ('mingw', 'm', 'compile with mingw'),\n ('gpu', 'g', 'compile gpu version'),\n ('precompile', 'p', 'use precompile library')\n ]\n\n def initialize_options(self):\n install.initialize_options(self)\n self.mingw = 0\n self.gpu = 0\n self.precompile = 0\n\n def run(self):\n if not self.precompile:\n copy_files(use_gpu=self.gpu)\n compile_cpp(use_mingw=self.mingw, use_gpu=self.gpu)\n self.distribution.data_files = [('lightgbm', find_lib())]\n install.run(self)\n\n\nclass CustomSdist(sdist):\n\n def run(self):\n copy_files(use_gpu=True)\n open(\".\/_IS_SOURCE_PACKAGE.txt\", 'w').close()\n if os.path.exists(\".\/lightgbm\/Release\/\"):\n shutil.rmtree('.\/lightgbm\/Release\/')\n if os.path.isfile('.\/lightgbm\/lib_lightgbm.so'):\n os.remove('.\/lightgbm\/lib_lightgbm.so')\n sdist.run(self)\n if os.path.isfile('.\/_IS_SOURCE_PACKAGE.txt'):\n os.remove('.\/_IS_SOURCE_PACKAGE.txt')\n\n\nif __name__ == \"__main__\":\n if (8 * struct.calcsize(\"P\")) != 64:\n raise Exception('Cannot install LightGBM in 32-bit python, please use 64-bit python instead.')\n\n dir_path = os.path.dirname(os.path.realpath(__file__))\n if os.path.isfile(os.path.join('..', 'VERSION.txt')):\n distutils.file_util.copy_file(\n os.path.join('..', 'VERSION.txt'),\n os.path.join('.', 'lightgbm'))\n if os.path.isfile(os.path.join(dir_path, 'lightgbm', 'VERSION.txt')):\n version = open(os.path.join(dir_path, 'lightgbm', 'VERSION.txt')).read().strip()\n\n sys.path.insert(0, '.')\n\n setup(name='lightgbm',\n version=version,\n description='LightGBM Python Package',\n long_description=open('README.rst').read(),\n install_requires=[\n 'numpy',\n 'scipy',\n 'scikit-learn'\n ],\n maintainer='Guolin Ke',\n maintainer_email='guolin.ke@microsoft.com',\n zip_safe=False,\n cmdclass={\n 'install': CustomInstall,\n 'install_lib': CustomInstallLib,\n 'sdist': CustomSdist,\n },\n packages=find_packages(),\n include_package_data=True,\n data_files=[],\n license='The MIT License (Microsoft)',\n url='https:\/\/github.com\/Microsoft\/LightGBM')\n","lang_cluster":"C++","diff_tag":0,"review_comment":"","id":101} {"diff_hunk":"@@ -48,7 +48,7 @@ static PJ_LP fouc_s_s_inverse (PJ_XY xy, PJ *P) { \/* Spheroidal, inver\n lp.phi = xy.y < 0. ? -M_HALFPI : M_HALFPI;\n } else\n lp.phi = aasin(P->ctx,xy.y);\n- V = cos(lp.phi);\n+ const double V = cos(lp.phi);\n lp.lam = xy.x * (Q->n + Q->n1 * V) \/ V;\n return lp;\n }","source_code":"#define PJ_LIB__\n\n#include \n#include \n\n#include \"proj.h\"\n#include \"proj_internal.h\"\n\nPROJ_HEAD(fouc_s, \"Foucaut Sinusoidal\") \"\\n\\tPCyl, Sph\";\n\n#define MAX_ITER 10\n#define LOOP_TOL 1e-7\n\nnamespace { \/\/ anonymous namespace\nstruct pj_opaque {\n double n, n1;\n};\n} \/\/ anonymous namespace\n\n\nstatic PJ_XY fouc_s_s_forward (PJ_LP lp, PJ *P) { \/* Spheroidal, forward *\/\n PJ_XY xy = {0.0,0.0};\n struct pj_opaque *Q = static_cast(P->opaque);\n double t;\n\n t = cos(lp.phi);\n xy.x = lp.lam * t \/ (Q->n + Q->n1 * t);\n xy.y = Q->n * lp.phi + Q->n1 * sin(lp.phi);\n return xy;\n}\n\n\nstatic PJ_LP fouc_s_s_inverse (PJ_XY xy, PJ *P) { \/* Spheroidal, inverse *\/\n PJ_LP lp = {0.0,0.0};\n struct pj_opaque *Q = static_cast(P->opaque);\n double V;\n int i;\n\n if (Q->n != 0.0) {\n lp.phi = xy.y;\n for (i = MAX_ITER; i ; --i) {\n lp.phi -= V = (Q->n * lp.phi + Q->n1 * sin(lp.phi) - xy.y ) \/\n (Q->n + Q->n1 * cos(lp.phi));\n if (fabs(V) < LOOP_TOL)\n break;\n }\n if (!i)\n lp.phi = xy.y < 0. ? -M_HALFPI : M_HALFPI;\n } else\n lp.phi = aasin(P->ctx,xy.y);\n V = cos(lp.phi);\n lp.lam = xy.x * (Q->n + Q->n1 * V) \/ V;\n return lp;\n}\n\n\nPJ *PROJECTION(fouc_s) {\n struct pj_opaque *Q = static_cast(pj_calloc (1, sizeof (struct pj_opaque)));\n if (nullptr==Q)\n return pj_default_destructor (P, ENOMEM);\n P->opaque = Q;\n\n Q->n = pj_param(P->ctx, P->params, \"dn\").f;\n if (Q->n < 0. || Q->n > 1.)\n return pj_default_destructor (P, PJD_ERR_N_OUT_OF_RANGE);\n\n Q->n1 = 1. - Q->n;\n P->es = 0;\n P->inv = fouc_s_s_inverse;\n P->fwd = fouc_s_s_forward;\n return P;\n}\n","lang_cluster":"C++","diff_tag":0,"review_comment":"","id":102} {"diff_hunk":"@@ -49,7 +49,7 @@ void DataLayer::DataLayerSetUp(const vector*>& bottom,\n int crop_size = this->layer_param_.transform_param().crop_size();\n if (crop_size > 0) {\n top[0]->Reshape(this->layer_param_.data_param().batch_size(),\n- datum.channels(), crop_size, crop_size);\n+ datum.channels(), crop_size, crop_size);\n this->prefetch_data_.Reshape(this->layer_param_.data_param().batch_size(),\n datum.channels(), crop_size, crop_size);\n this->transformed_data_.Reshape(1, datum.channels(), crop_size, crop_size);","source_code":"#include \n\n#include \n\n#include \n#include \n\n#include \"caffe\/common.hpp\"\n#include \"caffe\/data_layers.hpp\"\n#include \"caffe\/layer.hpp\"\n#include \"caffe\/proto\/caffe.pb.h\"\n#include \"caffe\/util\/benchmark.hpp\"\n#include \"caffe\/util\/io.hpp\"\n#include \"caffe\/util\/math_functions.hpp\"\n#include \"caffe\/util\/rng.hpp\"\n\nnamespace caffe {\n\ntemplate \nDataLayer::~DataLayer() {\n this->JoinPrefetchThread();\n}\n\ntemplate \nvoid DataLayer::DataLayerSetUp(const vector*>& bottom,\n const vector*>& top) {\n \/\/ Initialize DB\n db_.reset(db::GetDB(this->layer_param_.data_param().backend()));\n db_->Open(this->layer_param_.data_param().source(), db::READ);\n cursor_.reset(db_->NewCursor());\n\n \/\/ Check if we should randomly skip a few data points\n if (this->layer_param_.data_param().rand_skip()) {\n unsigned int skip = caffe_rng_rand() %\n this->layer_param_.data_param().rand_skip();\n LOG(INFO) << \"Skipping first \" << skip << \" data points.\";\n while (skip-- > 0) {\n cursor_->Next();\n }\n }\n \/\/ Read a data point, and use it to initialize the top blob.\n Datum datum;\n datum.ParseFromString(cursor_->value());\n\n if (DecodeDatum(&datum)) {\n LOG(INFO) << \"Decoding Datum\";\n }\n \/\/ image\n int crop_size = this->layer_param_.transform_param().crop_size();\n if (crop_size > 0) {\n top[0]->Reshape(this->layer_param_.data_param().batch_size(),\n datum.channels(), crop_size, crop_size);\n this->prefetch_data_.Reshape(this->layer_param_.data_param().batch_size(),\n datum.channels(), crop_size, crop_size);\n this->transformed_data_.Reshape(1, datum.channels(), crop_size, crop_size);\n } else {\n top[0]->Reshape(\n this->layer_param_.data_param().batch_size(), datum.channels(),\n datum.height(), datum.width());\n this->prefetch_data_.Reshape(this->layer_param_.data_param().batch_size(),\n datum.channels(), datum.height(), datum.width());\n this->transformed_data_.Reshape(1, datum.channels(),\n datum.height(), datum.width());\n }\n LOG(INFO) << \"output data size: \" << top[0]->num() << \",\"\n << top[0]->channels() << \",\" << top[0]->height() << \",\"\n << top[0]->width();\n \/\/ label\n if (this->output_labels_) {\n top[1]->Reshape(this->layer_param_.data_param().batch_size(), 1, 1, 1);\n this->prefetch_label_.Reshape(this->layer_param_.data_param().batch_size(),\n 1, 1, 1);\n }\n}\n\n\/\/ This function is used to create a thread that prefetches the data.\ntemplate \nvoid DataLayer::InternalThreadEntry() {\n CPUTimer batch_timer;\n batch_timer.Start();\n double read_time = 0;\n double trans_time = 0;\n CPUTimer timer;\n CHECK(this->prefetch_data_.count());\n CHECK(this->transformed_data_.count());\n Dtype* top_data = this->prefetch_data_.mutable_cpu_data();\n Dtype* top_label = NULL; \/\/ suppress warnings about uninitialized variables\n\n if (this->output_labels_) {\n top_label = this->prefetch_label_.mutable_cpu_data();\n }\n const int batch_size = this->layer_param_.data_param().batch_size();\n for (int item_id = 0; item_id < batch_size; ++item_id) {\n timer.Start();\n \/\/ get a blob\n Datum datum;\n datum.ParseFromString(cursor_->value());\n\n cv::Mat cv_img;\n if (datum.encoded()) {\n cv_img = DecodeDatumToCVMat(datum);\n }\n read_time += timer.MicroSeconds();\n timer.Start();\n\n \/\/ Apply data transformations (mirror, scale, crop...)\n int offset = this->prefetch_data_.offset(item_id);\n this->transformed_data_.set_cpu_data(top_data + offset);\n if (datum.encoded()) {\n this->data_transformer_.Transform(cv_img, &(this->transformed_data_));\n } else {\n this->data_transformer_.Transform(datum, &(this->transformed_data_));\n }\n if (this->output_labels_) {\n top_label[item_id] = datum.label();\n }\n trans_time += timer.MicroSeconds();\n \/\/ go to the next iter\n cursor_->Next();\n if (!cursor_->valid()) {\n DLOG(INFO) << \"Restarting data prefetching from start.\";\n cursor_->SeekToFirst();\n }\n }\n batch_timer.Stop();\n DLOG(INFO) << \"Prefetch batch: \" << batch_timer.MilliSeconds() << \" ms.\";\n DLOG(INFO) << \" Read time: \" << read_time \/ 1000 << \" ms.\";\n DLOG(INFO) << \"Transform time: \" << trans_time \/ 1000 << \" ms.\";\n}\n\nINSTANTIATE_CLASS(DataLayer);\nREGISTER_LAYER_CLASS(Data);\n\n} \/\/ namespace caffe\n","lang_cluster":"C++","diff_tag":0,"review_comment":"","id":103} {"diff_hunk":"@@ -84,7 +84,26 @@ int main()\n }\n \n \n-\/* Output\n+\/* \n+Input:\n+8\n+14\n+0 1\n+0 2\n+1 2\n+1 4\n+2 0\n+2 3\n+3 3\n+3 6\n+4 0\n+4 5\n+5 6\n+5 7\n+6 2\n+7 3\n+\n+Output:\n \n Breadth First Traversal is : 0 1 2 4 3 5 6 7\n ","source_code":"#include \n#include \n#include \n\nusing namespace std;\n\nclass Graph\n{\n int numberVertex;\n vector *adjacency;\n\n public:\n \/\/ Constructor to initialise graph\n Graph(int numberVertex)\n {\n this->numberVertex = numberVertex;\n adjacency = new vector [numberVertex];\n }\n\n \/\/ Function to add edge between source and destination\n void addEdge(int source, int destination)\n {\n adjacency[source].push_back(destination);\n }\n\n \/\/ Function to perform Breadth First Search\n void bfs(int starting);\n};\n\nvoid Graph::bfs(int starting)\n{\n bool visited[numberVertex];\n\n for (int i = 0; i < numberVertex; i++)\n visited[i] = false;\n\n queue queue_vertex;\n\n visited[starting] = true;\n queue_vertex.push(starting);\n\n while (!queue_vertex.empty())\n {\n starting = queue_vertex.front();\n cout << starting << \" \";\n queue_vertex.pop();\n\n for (vector :: iterator it = adjacency[starting].begin(); it != adjacency[starting].end(); ++it)\n {\n if(!visited[*it])\n {\n visited[*it] = true;\n queue_vertex.push(*it);\n }\n }\n }\n}\n\nint main()\n{\n \/\/ Number of vertices is 8\n Graph graph(8);\n\n \/\/ Create edges between vertices\n graph.addEdge(0, 1);\n graph.addEdge(0, 2);\n graph.addEdge(1, 2);\n graph.addEdge(1, 4);\n graph.addEdge(2, 0);\n graph.addEdge(2, 3);\n graph.addEdge(3, 3);\n graph.addEdge(3, 6);\n graph.addEdge(4, 0);\n graph.addEdge(4, 5);\n graph.addEdge(5, 6);\n graph.addEdge(5, 7);\n graph.addEdge(6, 2);\n graph.addEdge(7, 3);\n\n cout << \"Breadth First Traversal is : \";\n graph.bfs(0);\n\n return 0;\n}\n\n\n\/* Output\n\nBreadth First Traversal is : 0 1 2 4 3 5 6 7\n\n*\/\n","lang_cluster":"C++","diff_tag":0,"review_comment":"","id":104} {"diff_hunk":"@@ -1,6 +1,7 @@\n #include \n \n #include \n+#include \n #include \n \n #include ","source_code":"#include \n\n#include \n#include \n\n#include \n\n#include \n\n#ifdef ADIOS2_HAVE_MPI\n\nTEST(ADIOSInterface, MPICommRemoved)\n{\n MPI_Comm myComm;\n MPI_Comm_dup(MPI_COMM_WORLD, &myComm);\n adios2::ADIOS adios(myComm);\n adios2::IO io = adios.DeclareIO(\"TestIO\");\n MPI_Comm_free(&myComm);\n\n adios2::Engine engine = io.Open(\"test.bp\", adios2::Mode::Write);\n}\n\n#endif\n\nclass ADIOS2_CXX11_API : public ::testing::Test\n{\npublic:\n ADIOS2_CXX11_API()\n#ifdef ADIOS2_HAVE_MPI\n : ad(MPI_COMM_WORLD, adios2::DebugON)\n#else\n : ad(adios2::DebugON)\n#endif\n {\n#ifdef ADIOS2_HAVE_MPI\n MPI_Comm_rank(MPI_COMM_WORLD, &rank);\n MPI_Comm_size(MPI_COMM_WORLD, &size);\n#endif\n }\n\n adios2::ADIOS ad;\n int rank = 0;\n int size = 1;\n};\n\nclass ADIOS2_CXX11_API_IO : public ADIOS2_CXX11_API\n{\npublic:\n ADIOS2_CXX11_API_IO() : io(ad.DeclareIO(\"CXX11_API_TestIO\")) {}\n\n adios2::IO io;\n};\n\nTEST_F(ADIOS2_CXX11_API_IO, Engine)\n{\n io.SetEngine(\"bpfile\");\n EXPECT_EQ(io.EngineType(), \"bpfile\");\n\n adios2::Engine engine = io.Open(\"types.bp\", adios2::Mode::Write);\n EXPECT_EQ(engine.Name(), \"types.bp\");\n EXPECT_EQ(engine.Type(), \"BP3\");\n\n EXPECT_EQ(io.EngineType(), \"bp\"); \/\/ FIXME? Is it expected that adios2_open\n \/\/ changes the engine_type string?\n}\n\nTEST_F(ADIOS2_CXX11_API_IO, EngineDefault)\n{\n io.SetEngine(\"\");\n EXPECT_EQ(io.EngineType(), \"\");\n\n adios2::Engine engine = io.Open(\"types.bp\", adios2::Mode::Write);\n EXPECT_EQ(engine.Name(), \"types.bp\");\n EXPECT_EQ(engine.Type(), \"BP3\");\n\n EXPECT_EQ(io.EngineType(), \"bp\"); \/\/ FIXME? Is it expected that adios2_open\n \/\/ changes the engine_type string?\n}\n\nint main(int argc, char **argv)\n{\n#ifdef ADIOS2_HAVE_MPI\n MPI_Init(nullptr, nullptr);\n#endif\n\n int result;\n ::testing::InitGoogleTest(&argc, argv);\n result = RUN_ALL_TESTS();\n\n#ifdef ADIOS2_HAVE_MPI\n MPI_Finalize();\n#endif\n\n return result;\n}\n","lang_cluster":"C++","diff_tag":0,"review_comment":"","id":105} {"diff_hunk":"@@ -29,6 +29,7 @@ THE SOFTWARE.\n #include \n #include \n #include \n+#include \n \n #define NUM_GROUPS 1\n #define GROUP_SIZE 1","source_code":"\/*\nCopyright (c) 2020-present Advanced Micro Devices, Inc. All rights reserved.\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n*\/\n\n#include \n#include \"hip\/hip_runtime.h\"\n#ifdef __HIP_PLATFORM_HCC__\n#include \"hip\/hip_ext.h\"\n#endif\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n#define NUM_GROUPS 1\n#define GROUP_SIZE 1\n#define WARMUP_RUN_COUNT 10\n#define TIMING_RUN_COUNT 100\n#define TOTAL_RUN_COUNT WARMUP_RUN_COUNT + TIMING_RUN_COUNT\n\n__global__ void EmptyKernel() {}\n\n\/\/ Helper to print various timing metrics\nvoid print_timing(std::string test, std::array &results, int batch = 1)\n{\n\n float total_us = 0.0f, mean_us = 0.0f, stddev_us = 0.0f;\n\n \/\/ remove top outliers due to nature of variability across large number of multi-threaded runs\n std::sort(results.begin(), results.end(), std::greater());\n auto start_iter = std::next(results.begin(), WARMUP_RUN_COUNT);\n auto end_iter = results.end();\n\n \/\/ mean\n std::for_each(start_iter, end_iter, [&](const float &run_ms) {\n total_us += (run_ms * 1000) \/ batch;\n });\n mean_us = total_us \/ TIMING_RUN_COUNT;\n\n \/\/ stddev\n total_us = 0;\n std::for_each(start_iter, end_iter, [&](const float &run_ms) {\n float dev_us = ((run_ms * 1000) \/ batch) - mean_us;\n total_us += dev_us * dev_us;\n });\n stddev_us = sqrt(total_us \/ TIMING_RUN_COUNT);\n\n printf(\"\\n %s: %.1f us, std: %.1f us\\n\", test.c_str(), mean_us, stddev_us);\n}\n\n\/\/ Measure time taken to enqueue a kernel on the GPU using hipModuleLaunchKernel\nvoid hipModuleLaunchKernel_enqueue_rate(std::atomic_int* shared, int max_threads)\n{\n \/\/resources necessary for this thread\n hipStream_t stream;\n hipStreamCreate(&stream);\n hipModule_t module;\n hipFunction_t function;\n hipModuleLoad(&module, \"test_kernel.code\");\n hipModuleGetFunction(&function, module, \"test\");\n void* kernel_params = nullptr;\n std::array results;\n\n \/\/synchronize all threads, before running\n int tid = shared->fetch_add(1, std::memory_order_release);\n while (max_threads != shared->load(std::memory_order_acquire)) {}\n\n for (auto i = 0; i < TOTAL_RUN_COUNT; ++i) {\n auto start = std::chrono::high_resolution_clock::now();\n hipModuleLaunchKernel(function, 1, 1, 1, 1, 1, 1, 0, stream, &kernel_params, nullptr);\n auto stop = std::chrono::high_resolution_clock::now();\n results[i] = std::chrono::duration(stop - start).count();\n }\n print_timing(\"Thread ID : \" + std::to_string(tid) + \" , \" + \"hipModuleLaunchKernel enqueue rate\", results);\n}\n\n\/\/ Measure time taken to enqueue a kernel on the GPU using hipLaunchKernelGGL\nvoid hipLaunchKernelGGL_enqueue_rate(std::atomic_int* shared, int max_threads)\n{\n \/\/resources necessary for this thread\n hipStream_t stream;\n hipStreamCreate(&stream);\n std::array results;\n\n \/\/synchronize all threads, before running\n int tid = shared->fetch_add(1, std::memory_order_release);\n while (max_threads != shared->load(std::memory_order_acquire)) {}\n\n for (auto i = 0; i < TOTAL_RUN_COUNT; ++i) {\n auto start = std::chrono::high_resolution_clock::now();\n hipLaunchKernelGGL((EmptyKernel), dim3(NUM_GROUPS), dim3(GROUP_SIZE), 0, stream);\n auto stop = std::chrono::high_resolution_clock::now();\n results[i] = std::chrono::duration(stop - start).count();\n }\n print_timing(\"Thread ID : \" + std::to_string(tid) + \" , \" + \"hipLaunchKernelGGL enqueue rate\", results);\n}\n\n\/\/ Simple thread pool\nstruct thread_pool {\n thread_pool(int total_threads) : max_threads(total_threads) {}\n void start(std::function f) {\n for (int i = 0; i < max_threads; ++i) {\n threads.push_back(std::async(std::launch::async, f, &shared, max_threads));\n }\n }\n void finish() {\n for (auto&&thread : threads) {\n thread.get();\n }\n threads.clear();\n shared = {0};\n }\n ~thread_pool() {\n finish();\n }\nprivate:\n std::atomic_int shared {0};\n std::vector> threads;\n int max_threads = 1;\n};\n\n\nint main(int argc, char* argv[])\n{\n if (argc != 3) {\n std::cerr << \"Run test as 'hipDispatchEnqueueRateMT <0-hipModuleLaunchKernel \/1-hipLaunchKernelGGL>'\\n\";\n return -1;\n }\n\n int max_threads = atoi(argv[1]);\n int run_module_test = atoi(argv[2]);\n if(max_threads < 1 || run_module_test < 0 || run_module_test > 1) {\n std::cerr << \"Invalid Input.\\n\";\n std::cerr << \"Run test as 'hipDispatchEnqueueRateMT <0-hipModuleLaunchKernel \/1-hipLaunchKernelGGL>'\\n\";\n return -1;\n }\n thread_pool task(max_threads);\n\n if(run_module_test == 0) {\n task.start(hipModuleLaunchKernel_enqueue_rate);\n task.finish();\n } else {\n task.start(hipLaunchKernelGGL_enqueue_rate);\n task.finish();\n }\n\n return 0;\n}\n\n","lang_cluster":"C++","diff_tag":0,"review_comment":"","id":106} {"diff_hunk":"@@ -95,4 +95,4 @@ PeriodicTable *PeriodicTable::getTable() {\n return ds_instance.get();\n }\n \n-} \/\/ end of namespace\n+} \/\/ namespace RDKit","source_code":"\/\/ $Id$\n\/\/\n\/\/ Copyright (C) 2001-2006 Rational Discovery LLC\n\/\/\n\/\/ @@ All Rights Reserved @@\n\/\/ This file is part of the RDKit.\n\/\/ The contents are covered by the terms of the BSD license\n\/\/ which is included in the file license.txt, found at the root\n\/\/ of the RDKit source tree.\n\/\/\n#include \"PeriodicTable.h\"\n#include \n#include \ntypedef boost::tokenizer> tokenizer;\n#include \n#include \n\n#if RDK_BUILD_THREADSAFE_SSS\n#include \n#endif\n\nnamespace RDKit {\n\nclass std::unique_ptr PeriodicTable::ds_instance = nullptr;\n\nPeriodicTable::PeriodicTable() {\n \/\/ it is assumed that the atomic atomData string constains atoms\n \/\/ in sequence and no atoms are missing in between\n byanum.clear();\n byname.clear();\n\n boost::char_separator eolSep(\"\\n\");\n tokenizer tokens(periodicTableAtomData, eolSep);\n for (tokenizer::iterator token = tokens.begin(); token != tokens.end();\n ++token) {\n if (*token != \" \") {\n atomicData adata(*token);\n byanum.push_back(adata);\n std::string enam = adata.Symbol();\n byname[enam] = adata.AtomicNum();\n }\n }\n\n unsigned int lidx = 0;\n std::istringstream istr;\n istr.imbue(std::locale(\"C\"));\n while (isotopesAtomData[lidx] != \"\" && isotopesAtomData[lidx] != \"EOS\") {\n tokenizer lines(isotopesAtomData[lidx++], eolSep);\n boost::char_separator spaceSep(\" \\t\");\n for (tokenizer::iterator line = lines.begin(); line != lines.end();\n ++line) {\n if (*line != \" \") {\n tokenizer tokens(*line, spaceSep);\n tokenizer::iterator token = tokens.begin();\n int anum;\n istr.clear();\n istr.str(*token);\n istr >> anum;\n atomicData &adata = byanum[anum];\n ++token;\n if (token == tokens.end()) continue;\n ++token;\n if (token == tokens.end()) continue;\n unsigned int isotope;\n istr.clear();\n istr.str(*token);\n istr >> isotope;\n ++token;\n if (token == tokens.end()) continue;\n double mass;\n istr.clear();\n istr.str(*token);\n istr >> mass;\n ++token;\n if (token == tokens.end()) continue;\n double abundance;\n istr.clear();\n istr.str(*token);\n istr >> abundance;\n adata.d_isotopeInfoMap[isotope] = std::make_pair(mass, abundance);\n }\n }\n }\n}\n\nvoid PeriodicTable::initInstance() { ds_instance = std::unique_ptr(new PeriodicTable()); }\n\nPeriodicTable *PeriodicTable::getTable() {\n#if RDK_BUILD_THREADSAFE_SSS\n static std::once_flag pt_init_once;\n std::call_once(pt_init_once, initInstance);\n#else\n if (!ds_instance) initInstance();\n#endif\n return ds_instance.get();\n}\n\n} \/\/ end of namespace\n","lang_cluster":"C++","diff_tag":0,"review_comment":"","id":107} {"diff_hunk":"@@ -13,30 +13,35 @@ SyncedMemory::~SyncedMemory() {\n if (cpu_ptr_ && own_cpu_data_) {\n CaffeFreeHost(cpu_ptr_);\n }\n-\n- if (gpu_ptr_) {\n- CUDA_CHECK(cudaFree(gpu_ptr_));\n+ if (gpu_data_) {\n+ CUDA_CHECK(cudaFree(gpu_data_));\n }\n }\n \n inline void SyncedMemory::to_cpu() {\n switch (head_) {\n case UNINITIALIZED:\n- CaffeMallocHost(&cpu_ptr_, size_);\n- memset(cpu_ptr_, 0, size_);\n+ cpu_resize();\n head_ = HEAD_AT_CPU;\n own_cpu_data_ = true;\n break;\n case HEAD_AT_GPU:\n+ gpu_resize();\n+ cpu_resize();\n+ CUDA_CHECK(cudaMemcpy(cpu_data_, gpu_data_, size_, cudaMemcpyDeviceToHost));\n if (cpu_ptr_ == NULL) {\n CaffeMallocHost(&cpu_ptr_, size_);\n own_cpu_data_ = true;\n }\n- CUDA_CHECK(cudaMemcpy(cpu_ptr_, gpu_ptr_, size_, cudaMemcpyDeviceToHost));\n head_ = SYNCED;\n break;\n case HEAD_AT_CPU:\n+ cpu_resize();\n+ break;\n case SYNCED:\n+ if (cpu_resize()) {\n+ head_ = HEAD_AT_CPU;\n+ }\n break;\n }\n }","source_code":"\/\/ Copyright 2014 BVLC and contributors.\n\n#include \n\n#include \n\n#include \"caffe\/common.hpp\"\n#include \"caffe\/syncedmem.hpp\"\n\nnamespace caffe {\n\nSyncedMemory::~SyncedMemory() {\n if (cpu_ptr_ && own_cpu_data_) {\n CaffeFreeHost(cpu_ptr_);\n }\n\n if (gpu_ptr_) {\n CUDA_CHECK(cudaFree(gpu_ptr_));\n }\n}\n\ninline void SyncedMemory::to_cpu() {\n switch (head_) {\n case UNINITIALIZED:\n CaffeMallocHost(&cpu_ptr_, size_);\n memset(cpu_ptr_, 0, size_);\n head_ = HEAD_AT_CPU;\n own_cpu_data_ = true;\n break;\n case HEAD_AT_GPU:\n if (cpu_ptr_ == NULL) {\n CaffeMallocHost(&cpu_ptr_, size_);\n own_cpu_data_ = true;\n }\n CUDA_CHECK(cudaMemcpy(cpu_ptr_, gpu_ptr_, size_, cudaMemcpyDeviceToHost));\n head_ = SYNCED;\n break;\n case HEAD_AT_CPU:\n case SYNCED:\n break;\n }\n}\n\ninline void SyncedMemory::to_gpu() {\n switch (head_) {\n case UNINITIALIZED:\n CUDA_CHECK(cudaMalloc(&gpu_ptr_, size_));\n CUDA_CHECK(cudaMemset(gpu_ptr_, 0, size_));\n head_ = HEAD_AT_GPU;\n break;\n case HEAD_AT_CPU:\n if (gpu_ptr_ == NULL) {\n CUDA_CHECK(cudaMalloc(&gpu_ptr_, size_));\n }\n CUDA_CHECK(cudaMemcpy(gpu_ptr_, cpu_ptr_, size_, cudaMemcpyHostToDevice));\n head_ = SYNCED;\n break;\n case HEAD_AT_GPU:\n case SYNCED:\n break;\n }\n}\n\nconst void* SyncedMemory::cpu_data() {\n to_cpu();\n return (const void*)cpu_ptr_;\n}\n\nvoid SyncedMemory::set_cpu_data(void* data) {\n CHECK(data);\n if (own_cpu_data_) {\n CaffeFreeHost(cpu_ptr_);\n }\n cpu_ptr_ = data;\n head_ = HEAD_AT_CPU;\n own_cpu_data_ = false;\n}\n\nconst void* SyncedMemory::gpu_data() {\n to_gpu();\n return (const void*)gpu_ptr_;\n}\n\nvoid* SyncedMemory::mutable_cpu_data() {\n to_cpu();\n head_ = HEAD_AT_CPU;\n return cpu_ptr_;\n}\n\nvoid* SyncedMemory::mutable_gpu_data() {\n to_gpu();\n head_ = HEAD_AT_GPU;\n return gpu_ptr_;\n}\n\n\n} \/\/ namespace caffe\n\n","lang_cluster":"C++","diff_tag":0,"review_comment":"","id":108} {"diff_hunk":"@@ -22,6 +22,7 @@\n #include \"protocol.h\"\n #include \"outputmessage.h\"\n #include \"rsa.h\"\n+#include \"xtea.h\"\n \n extern RSA g_RSA;\n ","source_code":"\/**\n * The Forgotten Server - a free and open-source MMORPG server emulator\n * Copyright (C) 2018 Mark Samman \n *\n * This program is free software; you can redistribute it and\/or modify\n * it under the terms of the GNU General Public License as published by\n * the Free Software Foundation; either version 2 of the License, or\n * (at your option) any later version.\n *\n * This program is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n * GNU General Public License for more details.\n *\n * You should have received a copy of the GNU General Public License along\n * with this program; if not, write to the Free Software Foundation, Inc.,\n * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n *\/\n\n#include \"otpch.h\"\n\n#include \"protocol.h\"\n#include \"outputmessage.h\"\n#include \"rsa.h\"\n\nextern RSA g_RSA;\n\nvoid Protocol::onSendMessage(const OutputMessage_ptr& msg) const\n{\n\tif (!rawMessages) {\n\t\tmsg->writeMessageLength();\n\n\t\tif (encryptionEnabled) {\n\t\t\tXTEA_encrypt(*msg);\n\t\t\tmsg->addCryptoHeader(checksumEnabled);\n\t\t}\n\t}\n}\n\nvoid Protocol::onRecvMessage(NetworkMessage& msg)\n{\n\tif (encryptionEnabled && !XTEA_decrypt(msg)) {\n\t\treturn;\n\t}\n\n\tparsePacket(msg);\n}\n\nOutputMessage_ptr Protocol::getOutputBuffer(int32_t size)\n{\n\t\/\/dispatcher thread\n\tif (!outputBuffer) {\n\t\toutputBuffer = OutputMessagePool::getOutputMessage();\n\t} else if ((outputBuffer->getLength() + size) > NetworkMessage::MAX_PROTOCOL_BODY_LENGTH) {\n\t\tsend(outputBuffer);\n\t\toutputBuffer = OutputMessagePool::getOutputMessage();\n\t}\n\treturn outputBuffer;\n}\n\nvoid Protocol::XTEA_encrypt(OutputMessage& msg) const\n{\n\tconst uint32_t delta = 0x61C88647;\n\n\t\/\/ The message must be a multiple of 8\n\tsize_t paddingBytes = msg.getLength() % 8;\n\tif (paddingBytes != 0) {\n\t\tmsg.addPaddingBytes(8 - paddingBytes);\n\t}\n\n\tuint8_t* buffer = msg.getOutputBuffer();\n\tconst size_t messageLength = msg.getLength();\n\tsize_t readPos = 0;\n\tconst uint32_t k[] = {key[0], key[1], key[2], key[3]};\n\twhile (readPos < messageLength) {\n\t\tuint32_t v0;\n\t\tmemcpy(&v0, buffer + readPos, 4);\n\t\tuint32_t v1;\n\t\tmemcpy(&v1, buffer + readPos + 4, 4);\n\n\t\tuint32_t sum = 0;\n\n\t\tfor (int32_t i = 32; --i >= 0;) {\n\t\t\tv0 += ((v1 << 4 ^ v1 >> 5) + v1) ^ (sum + k[sum & 3]);\n\t\t\tsum -= delta;\n\t\t\tv1 += ((v0 << 4 ^ v0 >> 5) + v0) ^ (sum + k[(sum >> 11) & 3]);\n\t\t}\n\n\t\tmemcpy(buffer + readPos, &v0, 4);\n\t\treadPos += 4;\n\t\tmemcpy(buffer + readPos, &v1, 4);\n\t\treadPos += 4;\n\t}\n}\n\nbool Protocol::XTEA_decrypt(NetworkMessage& msg) const\n{\n\tif (((msg.getLength() - 6) & 7) != 0) {\n\t\treturn false;\n\t}\n\n\tconst uint32_t delta = 0x61C88647;\n\n\tuint8_t* buffer = msg.getBuffer() + msg.getBufferPosition();\n\tconst size_t messageLength = (msg.getLength() - 6);\n\tsize_t readPos = 0;\n\tconst uint32_t k[] = {key[0], key[1], key[2], key[3]};\n\twhile (readPos < messageLength) {\n\t\tuint32_t v0;\n\t\tmemcpy(&v0, buffer + readPos, 4);\n\t\tuint32_t v1;\n\t\tmemcpy(&v1, buffer + readPos + 4, 4);\n\n\t\tuint32_t sum = 0xC6EF3720;\n\n\t\tfor (int32_t i = 32; --i >= 0;) {\n\t\t\tv1 -= ((v0 << 4 ^ v0 >> 5) + v0) ^ (sum + k[(sum >> 11) & 3]);\n\t\t\tsum += delta;\n\t\t\tv0 -= ((v1 << 4 ^ v1 >> 5) + v1) ^ (sum + k[sum & 3]);\n\t\t}\n\n\t\tmemcpy(buffer + readPos, &v0, 4);\n\t\treadPos += 4;\n\t\tmemcpy(buffer + readPos, &v1, 4);\n\t\treadPos += 4;\n\t}\n\n\tint innerLength = msg.get();\n\tif (innerLength > msg.getLength() - 8) {\n\t\treturn false;\n\t}\n\n\tmsg.setLength(innerLength);\n\treturn true;\n}\n\nbool Protocol::RSA_decrypt(NetworkMessage& msg)\n{\n\tif ((msg.getLength() - msg.getBufferPosition()) < 128) {\n\t\treturn false;\n\t}\n\n\tg_RSA.decrypt(reinterpret_cast(msg.getBuffer()) + msg.getBufferPosition()); \/\/does not break strict aliasing\n\treturn msg.getByte() == 0;\n}\n\nuint32_t Protocol::getIP() const\n{\n\tif (auto connection = getConnection()) {\n\t\treturn connection->getIP();\n\t}\n\n\treturn 0;\n}\n","lang_cluster":"C++","diff_tag":0,"review_comment":"","id":109} {"diff_hunk":"@@ -12,6 +12,10 @@\n #include \"storage\/test\/TestUtils.h\"\n #include \"fs\/TempDir.h\"\n \n+DECLARE_int32(load_data_interval_secs);\n+DECLARE_int32(heartbeat_interval_secs);\n+DECLARE_uint32(heartbeat_interval);\n+\n namespace nebula {\n namespace meta {\n ","source_code":"\/* Copyright (c) 2019 vesoft inc. All rights reserved.\n *\n * This source code is licensed under Apache 2.0 License,\n * attached with Common Clause Condition 1.0, found in the LICENSES directory.\n *\/\n#include \"base\/Base.h\"\n#include \n#include \n#include \n#include \"meta\/processors\/admin\/Balancer.h\"\n#include \"meta\/test\/TestUtils.h\"\n#include \"storage\/test\/TestUtils.h\"\n#include \"fs\/TempDir.h\"\n\nnamespace nebula {\nnamespace meta {\n\nTEST(BalanceIntegrationTest, SimpleTest) {\n auto sc = std::make_unique();\n auto handler = std::make_shared(nullptr, nullptr);\n sc->mockCommon(\"storage\", 0, handler);\n LOG(INFO) << \"Start storage server on \" << sc->port_;\n}\n\n} \/\/ namespace meta\n} \/\/ namespace nebula\n\nint main(int argc, char** argv) {\n testing::InitGoogleTest(&argc, argv);\n folly::init(&argc, &argv, true);\n google::SetStderrLogging(google::INFO);\n return RUN_ALL_TESTS();\n}\n\n\n","lang_cluster":"C++","diff_tag":0,"review_comment":"","id":110} {"diff_hunk":"@@ -74,7 +74,7 @@ TraverseExecutor::makeTraverseExecutor(Sentence *sentence, ExecutionContext *ect\n return executor;\n }\n \n-void Collector::collect(VariantType &var, RowWriter *writer) const {\n+Status Collector::collect(VariantType &var, RowWriter *writer) {\n switch (var.which()) {\n case VAR_INT64:\n (*writer) << boost::get(var);","source_code":"\/* Copyright (c) 2018 vesoft inc. All rights reserved.\n *\n * This source code is licensed under Apache 2.0 License,\n * attached with Common Clause Condition 1.0, found in the LICENSES directory.\n *\/\n\n#include \"base\/Base.h\"\n#include \"graph\/TraverseExecutor.h\"\n#include \"parser\/TraverseSentences.h\"\n#include \"graph\/GoExecutor.h\"\n#include \"graph\/PipeExecutor.h\"\n#include \"graph\/OrderByExecutor.h\"\n#include \"graph\/FetchVerticesExecutor.h\"\n#include \"graph\/FetchEdgesExecutor.h\"\n#include \"dataman\/RowReader.h\"\n#include \"dataman\/RowWriter.h\"\n#include \"graph\/SetExecutor.h\"\n#include \"graph\/FindExecutor.h\"\n#include \"graph\/MatchExecutor.h\"\n#include \"graph\/FindPathExecutor.h\"\n#include \"graph\/LimitExecutor.h\"\n\nnamespace nebula {\nnamespace graph {\n\nstd::unique_ptr TraverseExecutor::makeTraverseExecutor(Sentence *sentence) {\n return makeTraverseExecutor(sentence, ectx());\n}\n\n\n\/\/ static\nstd::unique_ptr\nTraverseExecutor::makeTraverseExecutor(Sentence *sentence, ExecutionContext *ectx) {\n auto kind = sentence->kind();\n std::unique_ptr executor;\n switch (kind) {\n case Sentence::Kind::kGo:\n executor = std::make_unique(sentence, ectx);\n break;\n case Sentence::Kind::kPipe:\n executor = std::make_unique(sentence, ectx);\n break;\n case Sentence::Kind::kOrderBy:\n executor = std::make_unique(sentence, ectx);\n break;\n case Sentence::Kind::kFetchVertices:\n executor = std::make_unique(sentence, ectx);\n break;\n case Sentence::Kind::kFetchEdges:\n executor = std::make_unique(sentence, ectx);\n break;\n case Sentence::Kind::kSet:\n executor = std::make_unique(sentence, ectx);\n break;\n case Sentence::Kind::kMatch:\n executor = std::make_unique(sentence, ectx);\n break;\n case Sentence::Kind::kFind:\n executor = std::make_unique(sentence, ectx);\n break;\n case Sentence::Kind::kFindPath:\n executor = std::make_unique(sentence, ectx);\n break;\n case Sentence::Kind::kLimit:\n executor = std::make_unique(sentence, ectx);\n break;\n case Sentence::Kind::kUnknown:\n LOG(FATAL) << \"Sentence kind unknown\";\n break;\n default:\n LOG(FATAL) << \"Sentence kind illegal: \" << kind;\n break;\n }\n return executor;\n}\n\nvoid Collector::collect(VariantType &var, RowWriter *writer) const {\n switch (var.which()) {\n case VAR_INT64:\n (*writer) << boost::get(var);\n break;\n case VAR_DOUBLE:\n (*writer) << boost::get(var);\n break;\n case VAR_BOOL:\n (*writer) << boost::get(var);\n break;\n case VAR_STR:\n (*writer) << boost::get(var);\n break;\n default:\n LOG(FATAL) << \"Unknown VariantType: \" << var.which();\n }\n}\n\nVariantType Collector::getProp(const std::string &prop,\n const RowReader *reader) const {\n DCHECK(reader != nullptr);\n DCHECK(schema_ != nullptr);\n using nebula::cpp2::SupportedType;\n auto type = schema_->getFieldType(prop).type;\n switch (type) {\n case SupportedType::BOOL: {\n bool v;\n reader->getBool(prop, v);\n VLOG(3) << \"get prop: \" << prop << \", value: \" << v;\n return v;\n }\n case SupportedType::TIMESTAMP:\n case SupportedType::INT: {\n int64_t v;\n reader->getInt(prop, v);\n VLOG(3) << \"get prop: \" << prop << \", value: \" << v;\n return v;\n }\n case SupportedType::VID: {\n VertexID v;\n reader->getVid(prop, v);\n VLOG(3) << \"get prop: \" << prop << \", value: \" << v;\n return v;\n }\n case SupportedType::FLOAT: {\n float v;\n reader->getFloat(prop, v);\n VLOG(3) << \"get prop: \" << prop << \", value: \" << v;\n return static_cast(v);\n }\n case SupportedType::DOUBLE: {\n double v;\n reader->getDouble(prop, v);\n VLOG(3) << \"get prop: \" << prop << \", value: \" << v;\n return v;\n }\n case SupportedType::STRING: {\n folly::StringPiece v;\n reader->getString(prop, v);\n VLOG(3) << \"get prop: \" << prop << \", value: \" << v;\n return v.toString();\n }\n default:\n LOG(FATAL) << \"Unknown type: \" << static_cast(type);\n return \"\";\n }\n}\n\n} \/\/ namespace graph\n} \/\/ namespace nebula\n","lang_cluster":"C++","diff_tag":0,"review_comment":"","id":111} {"diff_hunk":"@@ -103,7 +103,7 @@ rtps::Time_t::Time_t(\n \n int64_t rtps::Time_t::to_ns() const\n {\n- int64_t nano = seconds_ * 1000000000ULL;\n+ int64_t nano = seconds_ * C_SECONDS;\n nano += nanosec_;\n return nano;\n }","source_code":"\/\/ Copyright 2019 Proyectos y Sistemas de Mantenimiento SL (eProsima).\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/**\n * @file Time_t.cpp\n *\/\n#include \n\nusing namespace eprosima::fastrtps;\n\nTime_t::Time_t()\n{\n seconds = 0;\n nanosec = 0;\n}\n\nTime_t::Time_t(\n int32_t sec,\n uint32_t nsec)\n{\n seconds = sec;\n nanosec = nsec;\n}\n\nTime_t::Time_t(\n long double sec)\n{\n seconds = static_cast(sec);\n nanosec = static_cast((sec - seconds) * 1000000000ULL);\n}\n\nvoid Time_t::fraction(\n uint32_t frac)\n{\n nanosec = (frac == 0xffffffff)\n ? 0xffffffff\n : static_cast(std::lroundl(frac * rtps::FRACTION_TO_NANO));\n}\n\nuint32_t Time_t::fraction() const\n{\n if (nanosec == 0xffffffff)\n {\n return nanosec;\n }\n\n uint32_t fraction = static_cast(std::lroundl(nanosec * rtps::NANO_TO_FRACTION));\n uint32_t nano_check = static_cast(std::lroundl(fraction * rtps::FRACTION_TO_NANO));\n while (nano_check != nanosec)\n {\n nano_check = static_cast(std::lroundl(++fraction * rtps::FRACTION_TO_NANO));\n }\n\n return fraction;\n}\n\nint64_t Time_t::to_ns() const\n{\n int64_t nano = seconds * 1000000000ULL;\n nano += nanosec;\n return nano;\n}\n\nrtps::Time_t::Time_t()\n{\n seconds_ = 0;\n fraction_ = 0;\n nanosec_ = 0;\n}\n\nrtps::Time_t::Time_t(\n int32_t sec,\n uint32_t frac)\n{\n seconds_ = sec;\n set_fraction(frac);\n}\n\nrtps::Time_t::Time_t(\n long double sec)\n{\n seconds_ = static_cast(sec);\n set_fraction(static_cast((sec - seconds_) * 4294967296ULL));\n}\n\nrtps::Time_t::Time_t(\n const eprosima::fastrtps::Time_t& time)\n{\n seconds_ = time.seconds;\n set_nanosec(time.nanosec);\n}\n\nint64_t rtps::Time_t::to_ns() const\n{\n int64_t nano = seconds_ * 1000000000ULL;\n nano += nanosec_;\n return nano;\n}\n\nint32_t rtps::Time_t::seconds() const\n{\n return seconds_;\n}\n\nint32_t& rtps::Time_t::seconds()\n{\n return seconds_;\n}\n\nvoid rtps::Time_t::seconds(\n int32_t sec)\n{\n seconds_ = sec;\n}\n\nuint32_t rtps::Time_t::nanosec() const\n{\n return nanosec_;\n}\n\nvoid rtps::Time_t::nanosec(\n uint32_t nanos)\n{\n const uint32_t s_to_nano = 1000000000UL;\n if (nanos >= s_to_nano)\n {\n nanos %= s_to_nano; \/\/ Remove the seconds\n }\n set_nanosec(nanos);\n}\n\nuint32_t rtps::Time_t::fraction() const\n{\n return fraction_;\n}\n\nvoid rtps::Time_t::fraction(\n uint32_t frac)\n{\n set_fraction(frac);\n}\n\nDuration_t rtps::Time_t::to_duration_t() const\n{\n return Duration_t(seconds_, nanosec_);\n}\n\nvoid rtps::Time_t::from_duration_t(const Duration_t& duration)\n{\n seconds_ = duration.seconds;\n set_nanosec(duration.nanosec);\n}\n\nvoid rtps::Time_t::set_fraction(\n uint32_t frac)\n{\n fraction_ = frac;\n nanosec_ = (fraction_ == 0xffffffff)\n ? 0xffffffff\n : static_cast(std::lroundl(fraction_ * FRACTION_TO_NANO));\n}\n\nvoid rtps::Time_t::set_nanosec(\n uint32_t nanos)\n{\n nanosec_ = nanos;\n fraction_ = (nanosec_ == 0xffffffff)\n ? 0xffffffff\n : static_cast(std::lroundl(nanosec_ * NANO_TO_FRACTION));\n\n if (fraction_ != 0xffffffff)\n {\n uint32_t nano_check = static_cast(std::lroundl(fraction_ * FRACTION_TO_NANO));\n while (nano_check != nanosec_)\n {\n nano_check = static_cast(std::lroundl(++fraction_ * FRACTION_TO_NANO));\n }\n }\n}","lang_cluster":"C++","diff_tag":0,"review_comment":"","id":112} {"diff_hunk":"@@ -38,19 +38,24 @@ int main(int argc, char const *argv[]) {\n .set_max_iteration_count(5)\n .set_accuracy_threshold(0.001);\n \n- const auto result_train = dal::train(kmeans_desc, x_train, initial_centroids);\n+ #ifdef MPI \n+ oneapi::dal::network::mpi::network net;\n+ #else\n+ oneapi::dal::network::empty_network net;\n+ #endif\n \n- std::cout << \"Iteration count: \" << result_train.get_iteration_count() << std::endl;\n- std::cout << \"Objective function value: \" << result_train.get_objective_function_value()\n+ const auto result_train = dal::train(kmeans_desc, x_train, initial_centroids, net);\n+\n+ std::cout << \"[\" << myRank << \"]\" << \"Iteration count: \" << result_train.get_iteration_count() << std::endl;\n+ std::cout << \"[\" << myRank << \"]\" << \"Objective function value: \" << result_train.get_objective_function_value()\n << std::endl;\n- std::cout << \"Lables:\" << std::endl << result_train.get_labels() << std::endl;\n- std::cout << \"Centroids:\" << std::endl << result_train.get_model().get_centroids() << std::endl;\n+ std::cout << \"[\" << myRank << \"]\" << \"Centroids:\" << std::endl << result_train.get_model().get_centroids() << std::endl;\n \n const auto result_test = dal::infer(kmeans_desc, result_train.get_model(), x_test);\n \n- std::cout << \"Infer result:\" << std::endl << result_test.get_labels() << std::endl;\n-\n- std::cout << \"Ground truth:\" << std::endl << y_test << std::endl;\n+ #ifdef MPI\n+ MPI_Finalize();\n+ #endif\n \n return 0;\n }","source_code":"\/*******************************************************************************\n* Copyright 2020 Intel Corporation\n*\n* Licensed under the Apache License, Version 2.0 (the \"License\");\n* you may not use this file except in compliance with the License.\n* You may obtain a copy of the License at\n*\n* http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n*\n* Unless required by applicable law or agreed to in writing, software\n* distributed under the License is distributed on an \"AS IS\" BASIS,\n* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n* See the License for the specific language governing permissions and\n* limitations under the License.\n*******************************************************************************\/\n\n#include \"example_util\/utils.hpp\"\n#include \"oneapi\/dal\/algo\/kmeans.hpp\"\n#include \"oneapi\/dal\/io\/csv.hpp\"\n\nusing namespace oneapi;\n\n\nint main(int argc, char const *argv[]) {\n const std::string train_data_file_name = get_data_path(\"kmeans_dense_train_data.csv\");\n const std::string initial_centroids_file_name = get_data_path(\"kmeans_dense_train_centroids.csv\");\n const std::string test_data_file_name = get_data_path(\"kmeans_dense_test_data.csv\");\n const std::string test_label_file_name = get_data_path(\"kmeans_dense_test_label.csv\");\n\n const auto x_train = dal::read(dal::csv::data_source{train_data_file_name});\n const auto initial_centroids = dal::read(dal::csv::data_source{initial_centroids_file_name});\n\n const auto x_test = dal::read(dal::csv::data_source{test_data_file_name});\n const auto y_test = dal::read(dal::csv::data_source{test_label_file_name});\n\n const auto kmeans_desc = dal::kmeans::descriptor<>()\n .set_cluster_count(20)\n .set_max_iteration_count(5)\n .set_accuracy_threshold(0.001);\n\n const auto result_train = dal::train(kmeans_desc, x_train, initial_centroids);\n\n std::cout << \"Iteration count: \" << result_train.get_iteration_count() << std::endl;\n std::cout << \"Objective function value: \" << result_train.get_objective_function_value()\n << std::endl;\n std::cout << \"Lables:\" << std::endl << result_train.get_labels() << std::endl;\n std::cout << \"Centroids:\" << std::endl << result_train.get_model().get_centroids() << std::endl;\n\n const auto result_test = dal::infer(kmeans_desc, result_train.get_model(), x_test);\n\n std::cout << \"Infer result:\" << std::endl << result_test.get_labels() << std::endl;\n\n std::cout << \"Ground truth:\" << std::endl << y_test << std::endl;\n\n return 0;\n}\n","lang_cluster":"C++","diff_tag":0,"review_comment":"","id":113} {"diff_hunk":"@@ -123,17 +123,11 @@ except ImportError:\n default_client = None\n wait = None\n \n- class dask_Array:\n+ class dask_Array: # type: ignore\n \"\"\"Dummy class for dask.array.Array.\"\"\"\n \n- pass\n-\n- class dask_DataFrame:\n+ class dask_DataFrame: # type: ignore\n \"\"\"Dummy class for dask.dataframe.DataFrame.\"\"\"\n \n- pass\n-\n- class dask_Series:\n+ class dask_Series: # type: ignore\n \"\"\"Dummy class for dask.dataframe.Series.\"\"\"\n-\n- pass","source_code":"# coding: utf-8\n\"\"\"Compatibility library.\"\"\"\n\n\"\"\"pandas\"\"\"\ntry:\n from pandas import DataFrame as pd_DataFrame\n from pandas import Series as pd_Series\n from pandas import concat\n from pandas.api.types import is_sparse as is_dtype_sparse\n PANDAS_INSTALLED = True\nexcept ImportError:\n PANDAS_INSTALLED = False\n\n class pd_Series:\n \"\"\"Dummy class for pandas.Series.\"\"\"\n\n pass\n\n class pd_DataFrame:\n \"\"\"Dummy class for pandas.DataFrame.\"\"\"\n\n pass\n\n concat = None\n is_dtype_sparse = None\n\n\"\"\"matplotlib\"\"\"\ntry:\n import matplotlib\n MATPLOTLIB_INSTALLED = True\nexcept ImportError:\n MATPLOTLIB_INSTALLED = False\n\n\"\"\"graphviz\"\"\"\ntry:\n import graphviz\n GRAPHVIZ_INSTALLED = True\nexcept ImportError:\n GRAPHVIZ_INSTALLED = False\n\n\"\"\"datatable\"\"\"\ntry:\n import datatable\n if hasattr(datatable, \"Frame\"):\n dt_DataTable = datatable.Frame\n else:\n dt_DataTable = datatable.DataTable\n DATATABLE_INSTALLED = True\nexcept ImportError:\n DATATABLE_INSTALLED = False\n\n class dt_DataTable:\n \"\"\"Dummy class for datatable.DataTable.\"\"\"\n\n pass\n\n\n\"\"\"sklearn\"\"\"\ntry:\n from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin\n from sklearn.preprocessing import LabelEncoder\n from sklearn.utils.class_weight import compute_sample_weight\n from sklearn.utils.multiclass import check_classification_targets\n from sklearn.utils.validation import assert_all_finite, check_array, check_X_y\n try:\n from sklearn.exceptions import NotFittedError\n from sklearn.model_selection import GroupKFold, StratifiedKFold\n except ImportError:\n from sklearn.cross_validation import GroupKFold, StratifiedKFold\n from sklearn.utils.validation import NotFittedError\n try:\n from sklearn.utils.validation import _check_sample_weight\n except ImportError:\n from sklearn.utils.validation import check_consistent_length\n\n # dummy function to support older version of scikit-learn\n def _check_sample_weight(sample_weight, X, dtype=None):\n check_consistent_length(sample_weight, X)\n return sample_weight\n\n SKLEARN_INSTALLED = True\n _LGBMModelBase = BaseEstimator\n _LGBMRegressorBase = RegressorMixin\n _LGBMClassifierBase = ClassifierMixin\n _LGBMLabelEncoder = LabelEncoder\n LGBMNotFittedError = NotFittedError\n _LGBMStratifiedKFold = StratifiedKFold\n _LGBMGroupKFold = GroupKFold\n _LGBMCheckXY = check_X_y\n _LGBMCheckArray = check_array\n _LGBMCheckSampleWeight = _check_sample_weight\n _LGBMAssertAllFinite = assert_all_finite\n _LGBMCheckClassificationTargets = check_classification_targets\n _LGBMComputeSampleWeight = compute_sample_weight\nexcept ImportError:\n SKLEARN_INSTALLED = False\n _LGBMModelBase = object\n _LGBMClassifierBase = object\n _LGBMRegressorBase = object\n _LGBMLabelEncoder = None\n LGBMNotFittedError = ValueError\n _LGBMStratifiedKFold = None\n _LGBMGroupKFold = None\n _LGBMCheckXY = None\n _LGBMCheckArray = None\n _LGBMCheckSampleWeight = None\n _LGBMAssertAllFinite = None\n _LGBMCheckClassificationTargets = None\n _LGBMComputeSampleWeight = None\n\n\"\"\"dask\"\"\"\ntry:\n from dask import delayed\n from dask.array import Array as dask_Array\n from dask.dataframe import DataFrame as dask_DataFrame\n from dask.dataframe import Series as dask_Series\n from dask.distributed import Client, default_client, wait\n DASK_INSTALLED = True\nexcept ImportError:\n DASK_INSTALLED = False\n delayed = None\n Client = object\n default_client = None\n wait = None\n\n class dask_Array:\n \"\"\"Dummy class for dask.array.Array.\"\"\"\n\n pass\n\n class dask_DataFrame:\n \"\"\"Dummy class for dask.dataframe.DataFrame.\"\"\"\n\n pass\n\n class dask_Series:\n \"\"\"Dummy class for dask.dataframe.Series.\"\"\"\n\n pass\n","lang_cluster":"C++","diff_tag":0,"review_comment":"","id":114} {"diff_hunk":"@@ -146,7 +146,7 @@ def _HeightFirstSplit(cluster, n):\n if len(cluster) == n:\n return cluster.GetPoints()\n clusters = [cluster]\n- for i in range(n - 1):\n+ for _ in range(n - 1):\n nxtIdx = 0\n while nxtIdx < len(clusters) and len(clusters[nxtIdx]) == 1:\n nxtIdx += 1","source_code":"# $Id$\n#\n# Copyright (C) 2001-2008 greg Landrum\n#\n# @@ All Rights Reserved @@\n# This file is part of the RDKit.\n# The contents are covered by the terms of the BSD license\n# which is included in the file license.txt, found at the root\n# of the RDKit source tree.\n#\n\"\"\"utility functions for clustering\n\n\"\"\"\n\n\ndef GetNodeList(cluster):\n \"\"\"returns an ordered list of all nodes below cluster\n\n the ordering is done using the lengths of the child nodes\n\n **Arguments**\n\n - cluster: the cluster in question\n\n **Returns**\n\n - a list of the leaves below this cluster\n\n \"\"\"\n if len(cluster) == 1:\n return [cluster]\n else:\n children = cluster.GetChildren()\n children.sort(key=lambda x: len(x), reverse=True)\n res = []\n for child in children:\n res += GetNodeList(child)\n res += [cluster]\n return res\n\n\ndef GetNodesDownToCentroids(cluster, above=1):\n \"\"\"returns an ordered list of all nodes below cluster\n\n\n \"\"\"\n if hasattr(cluster, '_isCentroid'):\n cluster._aboveCentroid = 0\n above = -1\n else:\n cluster._aboveCentroid = above\n if len(cluster) == 1:\n return [cluster]\n else:\n res = []\n children = cluster.GetChildren()\n children.sort(lambda x, y: cmp(len(y), len(x)))\n for child in children:\n res = res + GetNodesDownToCentroids(child, above)\n res = res + [cluster]\n return res\n\n\ndef FindClusterCentroidFromDists(cluster, dists):\n \"\"\" find the point in a cluster which has the smallest summed \n Euclidean distance to all others\n\n **Arguments**\n\n - cluster: the cluster to work with\n\n - dists: the distance matrix to use for the points\n\n **Returns**\n\n - the index of the centroid point\n\n \"\"\"\n children = cluster.GetPoints()\n pts = [x.GetData() for x in children]\n\n best = 1e24\n bestIdx = -1\n for pt in pts:\n dAccum = 0.0\n # loop over others and add'em up\n for other in pts:\n if other != pt:\n if other > pt:\n row, col = pt, other\n else:\n row, col = other, pt\n dAccum += dists[col * (col - 1) \/ 2 + row]\n if dAccum >= best:\n # minor efficiency hack\n break\n if dAccum < best:\n best = dAccum\n bestIdx = pt\n for i in range(len(pts)):\n pt = pts[i]\n if pt != bestIdx:\n if pt > bestIdx:\n row, col = bestIdx, pt\n else:\n row, col = pt, bestIdx\n children[i]._distToCenter = dists[col * (col - 1) \/ 2 + row]\n else:\n children[i]._distToCenter = 0.0\n children[i]._clustCenter = bestIdx\n cluster._clustCenter = bestIdx\n cluster._distToCenter = 0.0\n\n return bestIdx\n\n\ndef _BreadthFirstSplit(cluster, n):\n \"\"\" *Internal Use Only*\n\n \"\"\"\n if len(cluster) < n:\n raise ValueError('Cannot split cluster of length %d into %d pieces' % (len(cluster), n))\n if len(cluster) == n:\n return cluster.GetPoints()\n clusters = [cluster]\n nxtIdx = 0\n for i in range(n - 1):\n while nxtIdx < len(clusters) and len(clusters[nxtIdx]) == 1:\n nxtIdx += 1\n assert nxtIdx < len(clusters)\n\n children = clusters[nxtIdx].GetChildren()\n children.sort(key=lambda x: x.GetMetric(), reverse=True)\n for child in children:\n clusters.append(child)\n del clusters[nxtIdx]\n return clusters\n\n\ndef _HeightFirstSplit(cluster, n):\n \"\"\" *Internal Use Only*\n\n \"\"\"\n if len(cluster) < n:\n raise ValueError('Cannot split cluster of length %d into %d pieces' % (len(cluster), n))\n if len(cluster) == n:\n return cluster.GetPoints()\n clusters = [cluster]\n for i in range(n - 1):\n nxtIdx = 0\n while nxtIdx < len(clusters) and len(clusters[nxtIdx]) == 1:\n nxtIdx += 1\n assert nxtIdx < len(clusters)\n\n children = clusters[nxtIdx].GetChildren()\n for child in children:\n clusters.append(child)\n del clusters[nxtIdx]\n clusters.sort(key=lambda x: x.GetMetric(), reverse=True)\n return clusters\n\n\ndef SplitIntoNClusters(cluster, n, breadthFirst=1):\n \"\"\" splits a cluster tree into a set of branches\n\n **Arguments**\n\n - cluster: the root of the cluster tree\n\n - n: the number of clusters to include in the split\n\n - breadthFirst: toggles breadth first (vs depth first) cleavage\n of the cluster tree.\n\n **Returns**\n\n - a list of sub clusters\n\n \"\"\"\n if breadthFirst:\n return _BreadthFirstSplit(cluster, n)\n else:\n return _HeightFirstSplit(cluster, n)\n","lang_cluster":"C++","diff_tag":0,"review_comment":"","id":115} {"diff_hunk":"@@ -147,7 +147,7 @@ int main() {\n for (int t = 0; t < TEST_ITERS; t++) {\n hipEventRecord(start);\n for (int i = 0; i < DISPATCHES_PER_TEST; i++) {\n- hipLaunchKernelGGL(NearlyNull, dim3(NUM_GROUPS), dim3(GROUP_SIZE), 0, stream, Ad);\n+ hipExtLaunchKernelGGL((EmptyKernel), dim3(NUM_GROUPS), dim3(GROUP_SIZE), 0, stream, start, stop, 0);\n }\n stopTest(start, stop, \"StreamASyncDispatchNoWait\", DISPATCHES_PER_TEST);\n }","source_code":"\/*\nCopyright (c) 2015-present Advanced Micro Devices, Inc. All rights reserved.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n*\/\n\n#include \"hip\/hip_runtime.h\"\n#include \n#include \n#include \"ResultDatabase.h\"\n\n#define PRINT_PROGRESS 0\n\n#define check(cmd) \\\n { \\\n hipError_t status = cmd; \\\n if (status != hipSuccess) { \\\n printf(\"error: '%s'(%d) from %s at %s:%d\\n\", hipGetErrorString(status), status, #cmd, \\\n __FILE__, __LINE__); \\\n abort(); \\\n } \\\n }\n\n#define LEN 1024 * 1024\n\n#define NUM_GROUPS 1\n#define GROUP_SIZE 64\n#define TEST_ITERS 20\n#define DISPATCHES_PER_TEST 100\n\nconst unsigned p_tests = 0xfffffff;\n\n\n\/\/ HCC optimizes away fully NULL kernel calls, so run one that is nearly null:\n__global__ void NearlyNull(float* Ad) {\n if (Ad) {\n Ad[0] = 42;\n }\n}\n\n\nResultDatabase resultDB;\n\n\nvoid stopTest(hipEvent_t start, hipEvent_t stop, const char* msg, int iters) {\n float mS = 0;\n check(hipEventRecord(stop));\n check(hipDeviceSynchronize());\n check(hipEventElapsedTime(&mS, start, stop));\n resultDB.AddResult(std::string(msg), \"\", \"uS\", mS * 1000 \/ iters);\n if (PRINT_PROGRESS & 0x1) {\n std::cout << msg << \"\\t\\t\" << mS * 1000 \/ iters << \" uS\" << std::endl;\n }\n if (PRINT_PROGRESS & 0x2) {\n resultDB.DumpSummary(std::cout);\n }\n}\n\n\nint main() {\n hipError_t err;\n float* Ad;\n check(hipMalloc(&Ad, 4));\n\n\n hipStream_t stream;\n check(hipStreamCreate(&stream));\n\n\n hipEvent_t start, sync, stop;\n check(hipEventCreate(&start));\n check(hipEventCreateWithFlags(&sync, hipEventBlockingSync));\n check(hipEventCreate(&stop));\n\n\n hipStream_t stream0 = 0;\n\n\n if (p_tests & 0x1) {\n hipEventRecord(start);\n hipLaunchKernelGGL(NearlyNull, dim3(NUM_GROUPS), dim3(GROUP_SIZE), 0, stream0, Ad);\n stopTest(start, stop, \"FirstKernelLaunch\", 1);\n }\n\n\n if (p_tests & 0x2) {\n hipEventRecord(start);\n hipLaunchKernelGGL(NearlyNull, dim3(NUM_GROUPS), dim3(GROUP_SIZE), 0, stream0, Ad);\n stopTest(start, stop, \"SecondKernelLaunch\", 1);\n }\n\n\n if (p_tests & 0x4) {\n for (int t = 0; t < TEST_ITERS; t++) {\n hipEventRecord(start);\n for (int i = 0; i < DISPATCHES_PER_TEST; i++) {\n hipLaunchKernelGGL(NearlyNull, dim3(NUM_GROUPS), dim3(GROUP_SIZE), 0, stream0, Ad);\n hipEventRecord(sync);\n hipEventSynchronize(sync);\n }\n stopTest(start, stop, \"NullStreamASyncDispatchWait\", DISPATCHES_PER_TEST);\n }\n }\n\n\n if (p_tests & 0x10) {\n for (int t = 0; t < TEST_ITERS; t++) {\n hipEventRecord(start);\n for (int i = 0; i < DISPATCHES_PER_TEST; i++) {\n hipLaunchKernelGGL(NearlyNull, dim3(NUM_GROUPS), dim3(GROUP_SIZE), 0, stream, Ad);\n hipEventRecord(sync);\n hipEventSynchronize(sync);\n }\n stopTest(start, stop, \"StreamASyncDispatchWait\", DISPATCHES_PER_TEST);\n }\n }\n\n#if 1\n\n if (p_tests & 0x40) {\n for (int t = 0; t < TEST_ITERS; t++) {\n hipEventRecord(start);\n for (int i = 0; i < DISPATCHES_PER_TEST; i++) {\n hipLaunchKernelGGL(NearlyNull, dim3(NUM_GROUPS), dim3(GROUP_SIZE), 0, stream0, Ad);\n }\n stopTest(start, stop, \"NullStreamASyncDispatchNoWait\", DISPATCHES_PER_TEST);\n }\n }\n\n if (p_tests & 0x80) {\n for (int t = 0; t < TEST_ITERS; t++) {\n hipEventRecord(start);\n for (int i = 0; i < DISPATCHES_PER_TEST; i++) {\n hipLaunchKernelGGL(NearlyNull, dim3(NUM_GROUPS), dim3(GROUP_SIZE), 0, stream, Ad);\n }\n stopTest(start, stop, \"StreamASyncDispatchNoWait\", DISPATCHES_PER_TEST);\n }\n }\n#endif\n resultDB.DumpSummary(std::cout);\n\n\n check(hipEventDestroy(start));\n check(hipEventDestroy(sync));\n check(hipEventDestroy(stop));\n}\n","lang_cluster":"C++","diff_tag":0,"review_comment":"","id":116} {"diff_hunk":"@@ -163,7 +163,7 @@ class Canvas(CanvasBase):\n \n def addCanvasPolygon(self, ps, color=(0, 0, 0), fill=True, stroke=False, **kwargs):\n if not fill and not stroke:\n- return\n+ return \n dps = []\n for p in ps:\n dps.extend(p)","source_code":"#\n# Copyright (C) 2008 Greg Landrum\n#\n# @@ All Rights Reserved @@\n# This file is part of the RDKit.\n# The contents are covered by the terms of the BSD license\n# which is included in the file license.txt, found at the root\n# of the RDKit source tree.\n#\nimport os\nimport re\n\nfrom aggdraw import Brush, Pen\nfrom aggdraw import Draw\nfrom aggdraw import Font\nfrom rdkit import RDConfig\nfrom rdkit.Chem.Draw.canvasbase import CanvasBase\n\nfaceMap = {'sans': os.path.join(RDConfig.RDCodeDir, 'Chem', 'Draw', 'FreeSans.ttf')}\n\n\ndef convertColor(color):\n color = (int(color[0] * 255), int(color[1] * 255), int(color[2] * 255))\n return color\n\n\nclass Canvas(CanvasBase):\n # fonts appear smaller in aggdraw than with cairo\n # fix that here:\n fontScale = 1.2\n\n def __init__(self,\n img=None,\n imageType=None, # determines file type\n fileName=None, # if set determines output file name\n size=None, ):\n if img is None:\n try:\n import Image\n except ImportError:\n from PIL import Image\n if size is None:\n raise ValueError('please provide either an image or a size')\n img = Image.new('RGBA', size, \"white\")\n self.image = img\n self.draw = Draw(img)\n self.draw.setantialias(True)\n if size is None:\n self.size = self.draw.size\n else:\n self.size = size\n if imageType and imageType not in ('png', 'jpg'):\n raise ValueError('unsupported image type for agg canvas')\n self.drawType = imageType\n self.fileName = fileName\n\n def _doLine(self, p1, p2, pen, **kwargs):\n if kwargs.get('dash', (0, 0)) == (0, 0):\n self.draw.line((p1[0], p1[1], p2[0], p2[1]), pen)\n else:\n dash = kwargs['dash']\n pts = self._getLinePoints(p1, p2, dash)\n\n currDash = 0\n dashOn = True\n while currDash < (len(pts) - 1):\n if dashOn:\n p1 = pts[currDash]\n p2 = pts[currDash + 1]\n self.draw.line((p1[0], p1[1], p2[0], p2[1]), pen)\n currDash += 1\n dashOn = not dashOn\n\n def addCanvasLine(self, p1, p2, color=(0, 0, 0), color2=None, **kwargs):\n if color2 and color2 != color:\n mp = (p1[0] + p2[0]) \/ 2., (p1[1] + p2[1]) \/ 2.\n color = convertColor(color)\n self._doLine(p1, mp, Pen(color, kwargs.get('linewidth', 1)), **kwargs)\n color2 = convertColor(color2)\n self._doLine(mp, p2, Pen(color2, kwargs.get('linewidth', 1)), **kwargs)\n else:\n color = convertColor(color)\n self._doLine(p1, p2, Pen(color, kwargs.get('linewidth', 1)), **kwargs)\n\n def addCanvasText(self, text, pos, font, color=(0, 0, 0), **kwargs):\n orientation = kwargs.get('orientation', 'E')\n color = convertColor(color)\n aggFont = Font(color, faceMap[font.face], size=font.size * self.fontScale)\n\n blocks = list(re.finditer(r'\\<(.+?)\\>(.+?)\\<\/\\1\\>', text))\n w, h = 0, 0\n supH = 0\n subH = 0\n if not len(blocks):\n w, h = self.draw.textsize(text, aggFont)\n tw, th = w, h\n offset = w * pos[2]\n dPos = pos[0] - w \/ 2. + offset, pos[1] - h \/ 2.\n self.draw.text(dPos, text, aggFont)\n else:\n dblocks = []\n idx = 0\n for block in blocks:\n blockStart, blockEnd = block.span(0)\n if blockStart != idx:\n # untagged text:\n tblock = text[idx:blockStart]\n tw, th = self.draw.textsize(tblock, aggFont)\n w += tw\n h = max(h, th)\n dblocks.append((tblock, '', tw, th))\n fmt = block.groups()[0]\n tblock = block.groups()[1]\n if fmt in ('sub', 'sup'):\n lFont = Font(color, faceMap[font.face], size=0.8 * font.size * self.fontScale)\n else:\n lFont = aggFont\n tw, th = self.draw.textsize(tblock, lFont)\n w += tw\n if fmt == 'sub':\n subH = max(subH, th)\n elif fmt == 'sup':\n supH = max(supH, th)\n else:\n h = max(h, th)\n dblocks.append((tblock, fmt, tw, th))\n idx = blockEnd\n if idx != len(text):\n # untagged text:\n tblock = text[idx:]\n tw, th = self.draw.textsize(tblock, aggFont)\n w += tw\n h = max(h, th)\n dblocks.append((tblock, '', tw, th))\n\n supH *= 0.5\n subH *= 0.5\n h += supH + subH\n offset = w * pos[2]\n dPos = [pos[0] - w \/ 2. + offset, pos[1] - h \/ 2.]\n if orientation == 'W':\n dPos = [pos[0] - w + offset, pos[1] - h \/ 2.]\n elif orientation == 'E':\n dPos = [pos[0] + offset, pos[1] - h \/ 2.]\n else:\n dPos = [pos[0] - w \/ 2 + offset, pos[1] - h \/ 2.]\n\n if supH:\n dPos[1] += supH\n for txt, fmt, tw, th in dblocks:\n tPos = dPos[:]\n if fmt == 'sub':\n tPos[1] += subH\n elif fmt == 'sup':\n tPos[1] -= supH\n if fmt in ('sub', 'sup'):\n lFont = Font(color, faceMap[font.face], size=0.8 * font.size * self.fontScale)\n else:\n lFont = aggFont\n self.draw.text(tPos, txt, lFont)\n dPos[0] += tw\n return (tw + th * .4, th + th * .4, offset)\n\n def addCanvasPolygon(self, ps, color=(0, 0, 0), fill=True, stroke=False, **kwargs):\n if not fill and not stroke:\n return\n dps = []\n for p in ps:\n dps.extend(p)\n color = convertColor(color)\n brush = None\n pen = None\n if fill:\n brush = Brush(color)\n if stroke:\n pen = Pen(color)\n self.draw.polygon(dps, pen, brush)\n\n def addCanvasDashedWedge(self, p1, p2, p3, dash=(2, 2), color=(0, 0, 0), color2=None, **kwargs):\n pen = Pen(color, kwargs.get('linewidth', 1))\n dash = (3, 3)\n pts1 = self._getLinePoints(p1, p2, dash)\n pts2 = self._getLinePoints(p1, p3, dash)\n\n if len(pts2) < len(pts1):\n pts2, pts1 = pts1, pts2\n\n for i in range(len(pts1)):\n self.draw.line((pts1[i][0], pts1[i][1], pts2[i][0], pts2[i][1]), pen)\n\n def flush(self):\n self.draw.flush()\n if self.fileName:\n self.image.save(self.fileName)\n","lang_cluster":"C++","diff_tag":0,"review_comment":"","id":117} {"diff_hunk":"@@ -52,11 +52,18 @@ class EditableMol : boost::noncopyable {\n PRECONDITION(atom, \"bad atom\");\n return dp_mol->addAtom(atom, true, false);\n };\n- void ReplaceAtom(unsigned int idx, Atom *atom) {\n+ void ReplaceAtom(unsigned int idx, Atom *atom,\n+ bool updateLabels, bool preserveProps) {\n PRECONDITION(dp_mol, \"no molecule\");\n PRECONDITION(atom, \"bad atom\");\n- dp_mol->replaceAtom(idx, atom);\n+ dp_mol->replaceAtom(idx, atom, updateLabels, preserveProps);\n };\n+ void ReplaceBond(unsigned int idx, Bond *bond, bool preserveProps) {\n+ PRECONDITION(dp_mol, \"no molecule\");\n+ PRECONDITION(bond, \"bad bond\");\n+ dp_mol->replaceBond(idx, bond, preserveProps);\n+ };\n+ \n ROMol *GetMol() const {\n PRECONDITION(dp_mol, \"no molecule\");\n ROMol *res = new ROMol(*dp_mol);","source_code":"\/\/ $Id$\n\/\/\n\/\/ Copyright (C) 2007 Greg Landrum\n\/\/\n\/\/ @@ All Rights Reserved @@\n\/\/ This file is part of the RDKit.\n\/\/ The contents are covered by the terms of the BSD license\n\/\/ which is included in the file license.txt, found at the root\n\/\/ of the RDKit source tree.\n\/\/\n\/\/ there's a compiler bug in some versions of g++ that causes this file to not\n\/\/ compile unless\n\/\/ we skip the docstrings:\n\/\/#define BOOST_PYTHON_NO_PY_SIGNATURES\n\n#define NO_IMPORT_ARRAY\n#include \n#include \n\n#include \"rdchem.h\"\n\/\/ ours\n#include \n\nnamespace python = boost::python;\n\nnamespace RDKit {\n\nnamespace {\nclass EditableMol : boost::noncopyable {\n public:\n EditableMol(const ROMol &m) { dp_mol = new RWMol(m); };\n ~EditableMol() {\n PRECONDITION(dp_mol, \"no molecule\");\n delete dp_mol;\n };\n\n void RemoveAtom(unsigned int idx) {\n PRECONDITION(dp_mol, \"no molecule\");\n dp_mol->removeAtom(idx);\n };\n void RemoveBond(unsigned int idx1, unsigned int idx2) {\n PRECONDITION(dp_mol, \"no molecule\");\n dp_mol->removeBond(idx1, idx2);\n };\n int AddBond(unsigned int begAtomIdx, unsigned int endAtomIdx,\n Bond::BondType order = Bond::UNSPECIFIED) {\n PRECONDITION(dp_mol, \"no molecule\");\n return dp_mol->addBond(begAtomIdx, endAtomIdx, order);\n };\n int AddAtom(Atom *atom) {\n PRECONDITION(dp_mol, \"no molecule\");\n PRECONDITION(atom, \"bad atom\");\n return dp_mol->addAtom(atom, true, false);\n };\n void ReplaceAtom(unsigned int idx, Atom *atom) {\n PRECONDITION(dp_mol, \"no molecule\");\n PRECONDITION(atom, \"bad atom\");\n dp_mol->replaceAtom(idx, atom);\n };\n ROMol *GetMol() const {\n PRECONDITION(dp_mol, \"no molecule\");\n ROMol *res = new ROMol(*dp_mol);\n return res;\n };\n\n private:\n RWMol *dp_mol;\n};\n}\n\nstruct EditableMol_wrapper {\n static void wrap() {\n std::string molClassDoc =\n \"The EditableMol class.\\n\\n\\\n This class can be used to add\/remove bonds and atoms to\\n\\\n a molecule.\\n\\\n In order to use it, you need to first construct an EditableMol\\n\\\n from a standard Mol:\\n\\\n >>> m = Chem.MolFromSmiles('CCC')\\n\\\n >>> em = Chem.EditableMol(m)\\n\\\n >>> em.AddAtom(Chem.Atom(8))\\n\\\n >>> em.AddBond(0,3,Chem.BondType.SINGLE)\\n\\\n >>> m2 = em.GetMol()\\n\\\n >>> Chem.SanitizeMol(m2)\\n\\\n >>> Chem.MolToSmiles(m2)\\n\\\n 'CCCO'\\n\\\n\\n\\\n *Note*: It is very, very easy to shoot yourself in the foot with\\n\\\n this class by constructing an unreasonable molecule.\\n\\\n\";\n python::class_(\n \"EditableMol\", \"an editable molecule class\",\n python::init(\"Construct from a Mol\"))\n .def(\"RemoveAtom\", &EditableMol::RemoveAtom,\n \"Remove the specified atom from the molecule\")\n .def(\"RemoveBond\", &EditableMol::RemoveBond,\n \"Remove the specified bond from the molecule\")\n\n .def(\"AddBond\", &EditableMol::AddBond,\n (python::arg(\"mol\"), python::arg(\"beginAtomIdx\"),\n python::arg(\"endAtomIdx\"),\n python::arg(\"order\") = Bond::UNSPECIFIED),\n \"add a bond, returns the index of the newly added bond\")\n\n .def(\"AddAtom\", &EditableMol::AddAtom,\n (python::arg(\"mol\"), python::arg(\"atom\")),\n \"add an atom, returns the index of the newly added atom\")\n .def(\"ReplaceAtom\", &EditableMol::ReplaceAtom,\n (python::arg(\"mol\"), python::arg(\"index\"), python::arg(\"newAtom\")),\n \"replaces the specified atom with the provided one\")\n .def(\"GetMol\", &EditableMol::GetMol,\n \"Returns a Mol (a normal molecule)\",\n python::return_value_policy());\n };\n};\n\n} \/\/ end of namespace\nvoid wrap_EditableMol() { RDKit::EditableMol_wrapper::wrap(); }\n","lang_cluster":"C++","diff_tag":0,"review_comment":"","id":118} {"diff_hunk":"@@ -167,13 +167,18 @@ void SYCLInternal::initialize(const sycl::queue& q) {\n void SYCLInternal::finalize() {\n SYCL().fence();\n was_finalized = true;\n- if (nullptr != m_scratchSpace || nullptr != m_scratchFlags) {\n- \/\/ FIXME_SYCL\n- std::abort();\n- }\n \n- using RecordSYCL =\n- Kokkos::Impl::SharedAllocationRecord;\n+ using RecordSYCL = Kokkos::Impl::SharedAllocationRecord;\n+ if (nullptr != m_scratchSpace)\n+ RecordSYCL::decrement(RecordSYCL::get_record(m_scratchSpace));\n+ if (nullptr != m_scratchFlags)\n+ RecordSYCL::decrement(RecordSYCL::get_record(m_scratchFlags));\n+ m_syclDev = -1;\n+ m_scratchSpaceCount = 0;\n+ m_scratchSpace = nullptr;\n+ m_scratchFlagsCount = 0;\n+ m_scratchFlags = nullptr;\n+\n RecordSYCL::decrement(RecordSYCL::get_record(m_scratchConcurrentBitset));\n m_scratchConcurrentBitset = nullptr;\n ","source_code":"\/*\n\/\/@HEADER\n\/\/ ************************************************************************\n\/\/\n\/\/ Kokkos v. 3.0\n\/\/ Copyright (2020) National Technology & Engineering\n\/\/ Solutions of Sandia, LLC (NTESS).\n\/\/\n\/\/ Under the terms of Contract DE-NA0003525 with NTESS,\n\/\/ the U.S. Government retains certain rights in this software.\n\/\/\n\/\/ Redistribution and use in source and binary forms, with or without\n\/\/ modification, are permitted provided that the following conditions are\n\/\/ met:\n\/\/\n\/\/ 1. Redistributions of source code must retain the above copyright\n\/\/ notice, this list of conditions and the following disclaimer.\n\/\/\n\/\/ 2. Redistributions in binary form must reproduce the above copyright\n\/\/ notice, this list of conditions and the following disclaimer in the\n\/\/ documentation and\/or other materials provided with the distribution.\n\/\/\n\/\/ 3. Neither the name of the Corporation nor the names of the\n\/\/ contributors may be used to endorse or promote products derived from\n\/\/ this software without specific prior written permission.\n\/\/\n\/\/ THIS SOFTWARE IS PROVIDED BY NTESS \"AS IS\" AND ANY\n\/\/ EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n\/\/ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n\/\/ PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE\n\/\/ CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n\/\/ EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n\/\/ PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n\/\/ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\n\/\/ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\n\/\/ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n\/\/ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\/\/\n\/\/ Questions? Contact Christian R. Trott (crtrott@sandia.gov)\n\/\/\n\/\/ ************************************************************************\n\/\/@HEADER\n*\/\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace Kokkos {\nnamespace Experimental {\nnamespace Impl {\n\nstd::vector*> SYCLInternal::all_queues;\nstd::mutex SYCLInternal::mutex;\n\nSYCLInternal::~SYCLInternal() {\n if (!was_finalized || m_scratchSpace || m_scratchFlags ||\n m_scratchConcurrentBitset) {\n std::cerr << \"Kokkos::Experimental::SYCL ERROR: Failed to call \"\n \"Kokkos::Experimental::SYCL::finalize()\"\n << std::endl;\n std::cerr.flush();\n }\n}\n\nint SYCLInternal::verify_is_initialized(const char* const label) const {\n if (!is_initialized()) {\n std::cerr << \"Kokkos::Experimental::SYCL::\" << label\n << \" : ERROR device not initialized\" << std::endl;\n }\n return is_initialized();\n}\nSYCLInternal& SYCLInternal::singleton() {\n static SYCLInternal self;\n return self;\n}\n\nvoid SYCLInternal::initialize(const sycl::device& d) {\n auto exception_handler = [](sycl::exception_list exceptions) {\n bool asynchronous_error = false;\n for (std::exception_ptr const& e : exceptions) {\n try {\n std::rethrow_exception(e);\n } catch (sycl::exception const& e) {\n std::cerr << e.what() << '\\n';\n asynchronous_error = true;\n }\n }\n if (asynchronous_error)\n Kokkos::Impl::throw_runtime_exception(\n \"There was an asynchronous SYCL error!\\n\");\n };\n initialize(sycl::queue{d, exception_handler});\n}\n\n\/\/ FIXME_SYCL\nvoid SYCLInternal::initialize(const sycl::queue& q) {\n if (was_finalized)\n Kokkos::abort(\"Calling SYCL::initialize after SYCL::finalize is illegal\\n\");\n\n if (is_initialized()) return;\n\n if (!HostSpace::execution_space::impl_is_initialized()) {\n const std::string msg(\n \"SYCL::initialize ERROR : HostSpace::execution_space is not \"\n \"initialized\");\n Kokkos::Impl::throw_runtime_exception(msg);\n }\n\n const bool ok_init = nullptr == m_scratchSpace || nullptr == m_scratchFlags;\n const bool ok_dev = true;\n if (ok_init && ok_dev) {\n m_queue = q;\n \/\/ guard pushing to all_queues\n {\n std::lock_guard lock(mutex);\n all_queues.push_back(&m_queue);\n }\n const sycl::device& d = m_queue->get_device();\n std::cout << SYCL::SYCLDevice(d) << '\\n';\n\n m_maxWorkgroupSize =\n d.template get_info();\n \/\/ FIXME_SYCL this should give the correct value for NVIDIA GPUs\n m_maxConcurrency =\n m_maxWorkgroupSize * 2 *\n d.template get_info();\n\n \/\/ Setup concurent bitset for obtaining unique tokens from within an\n \/\/ executing kernel.\n {\n const int32_t buffer_bound =\n Kokkos::Impl::concurrent_bitset::buffer_bound(m_maxConcurrency);\n using Record = Kokkos::Impl::SharedAllocationRecord<\n Kokkos::Experimental::SYCLDeviceUSMSpace, void>;\n Record* const r =\n Record::allocate(Kokkos::Experimental::SYCLDeviceUSMSpace(),\n \"Kokkos::SYCL::InternalScratchBitset\",\n sizeof(uint32_t) * buffer_bound);\n Record::increment(r);\n m_scratchConcurrentBitset = reinterpret_cast(r->data());\n auto event = m_queue->memset(m_scratchConcurrentBitset, 0,\n sizeof(uint32_t) * buffer_bound);\n fence(event);\n }\n\n m_maxShmemPerBlock =\n d.template get_info();\n m_indirectKernelMem.reset(*m_queue);\n m_indirectReducerMem.reset(*m_queue);\n } else {\n std::ostringstream msg;\n msg << \"Kokkos::Experimental::SYCL::initialize(...) FAILED\";\n\n if (!ok_init) {\n msg << \" : Already initialized\";\n }\n Kokkos::Impl::throw_runtime_exception(msg.str());\n }\n}\n\nvoid SYCLInternal::finalize() {\n SYCL().fence();\n was_finalized = true;\n if (nullptr != m_scratchSpace || nullptr != m_scratchFlags) {\n \/\/ FIXME_SYCL\n std::abort();\n }\n\n using RecordSYCL =\n Kokkos::Impl::SharedAllocationRecord;\n RecordSYCL::decrement(RecordSYCL::get_record(m_scratchConcurrentBitset));\n m_scratchConcurrentBitset = nullptr;\n\n m_indirectKernelMem.reset();\n m_indirectReducerMem.reset();\n \/\/ guard erasing from all_queues\n {\n std::lock_guard lock(mutex);\n all_queues.erase(std::find(all_queues.begin(), all_queues.end(), &m_queue));\n }\n m_queue.reset();\n}\n\n} \/\/ namespace Impl\n} \/\/ namespace Experimental\n} \/\/ namespace Kokkos\n","lang_cluster":"C++","diff_tag":0,"review_comment":"","id":119} {"diff_hunk":"@@ -91,8 +91,21 @@ void StorageHttpIngestHandler::onError(ProxygenError error) noexcept {\n << proxygen::getErrorString(error);\n }\n \n-bool StorageHttpIngestHandler::ingestSSTFiles(GraphSpaceID space) {\n- auto code = kvstore_->ingest(space);\n+bool StorageHttpIngestHandler::ingestSSTFiles() {\n+ kvstore::ResultCode code;\n+ if (edge_.has_value()) {\n+ LOG(INFO) << folly::stringPrintf(\n+ \"ingest space %d edge %d\", spaceID_, edge_.value());\n+ code = kvstore_->ingestEdge(spaceID_, edge_.value());\n+ } else if (tag_.has_value()) {\n+ LOG(INFO) << folly::stringPrintf(\n+ \"ingest space %d tag %d\", spaceID_, tag_.value());\n+ code = kvstore_->ingestTag(spaceID_, tag_.value());\n+ } else {\n+ LOG(INFO) << folly::stringPrintf(\n+ \"ingest space %d\", spaceID_);\n+ code = kvstore_->ingest(spaceID_);\n+ }\n if (code == kvstore::ResultCode::SUCCEEDED) {\n return true;\n } else {","source_code":"\/* Copyright (c) 2019 vesoft inc. All rights reserved.\n *\n * This source code is licensed under Apache 2.0 License,\n * attached with Common Clause Condition 1.0, found in the LICENSES directory.\n *\/\n\n#include \"storage\/http\/StorageHttpIngestHandler.h\"\n#include \n#include \n#include \n\nnamespace nebula {\nnamespace storage {\n\nusing proxygen::HTTPMessage;\nusing proxygen::HTTPMethod;\nusing proxygen::ProxygenError;\nusing proxygen::UpgradeProtocol;\nusing proxygen::ResponseBuilder;\n\nvoid StorageHttpIngestHandler::init(nebula::kvstore::KVStore *kvstore) {\n kvstore_ = kvstore;\n CHECK_NOTNULL(kvstore_);\n}\n\nvoid StorageHttpIngestHandler::onRequest(std::unique_ptr headers) noexcept {\n if (headers->getMethod().value() != HTTPMethod::GET) {\n \/\/ Unsupported method\n err_ = HttpCode::E_UNSUPPORTED_METHOD;\n return;\n }\n\n if (!headers->hasQueryParam(\"space\")) {\n err_ = HttpCode::E_ILLEGAL_ARGUMENT;\n return;\n }\n\n space_ = headers->getIntQueryParam(\"space\");\n}\n\nvoid StorageHttpIngestHandler::onBody(std::unique_ptr) noexcept {\n \/\/ Do nothing, we only support GET\n}\n\nvoid StorageHttpIngestHandler::onEOM() noexcept {\n switch (err_) {\n case HttpCode::E_UNSUPPORTED_METHOD:\n ResponseBuilder(downstream_)\n .status(WebServiceUtils::to(HttpStatusCode::METHOD_NOT_ALLOWED),\n WebServiceUtils::toString(HttpStatusCode::METHOD_NOT_ALLOWED))\n .sendWithEOM();\n return;\n case HttpCode::E_ILLEGAL_ARGUMENT:\n ResponseBuilder(downstream_)\n .status(WebServiceUtils::to(HttpStatusCode::BAD_REQUEST),\n WebServiceUtils::toString(HttpStatusCode::BAD_REQUEST))\n .sendWithEOM();\n return;\n default:\n break;\n }\n\n if (ingestSSTFiles(space_)) {\n LOG(ERROR) << \"SSTFile ingest successfully \";\n ResponseBuilder(downstream_)\n .status(WebServiceUtils::to(HttpStatusCode::OK),\n WebServiceUtils::toString(HttpStatusCode::OK))\n .body(\"SSTFile ingest successfully\")\n .sendWithEOM();\n } else {\n LOG(ERROR) << \"SSTFile ingest failed\";\n ResponseBuilder(downstream_)\n .status(WebServiceUtils::to(HttpStatusCode::FORBIDDEN),\n WebServiceUtils::toString(HttpStatusCode::FORBIDDEN))\n .body(\"SSTFile ingest failed\")\n .sendWithEOM();\n }\n}\n\nvoid StorageHttpIngestHandler::onUpgrade(UpgradeProtocol) noexcept {\n \/\/ Do nothing\n}\n\n\nvoid StorageHttpIngestHandler::requestComplete() noexcept {\n delete this;\n}\n\nvoid StorageHttpIngestHandler::onError(ProxygenError error) noexcept {\n LOG(ERROR) << \"Web Service MetaHttpIngestHandler Failed: \"\n << proxygen::getErrorString(error);\n}\n\nbool StorageHttpIngestHandler::ingestSSTFiles(GraphSpaceID space) {\n auto code = kvstore_->ingest(space);\n if (code == kvstore::ResultCode::SUCCEEDED) {\n return true;\n } else {\n LOG(ERROR) << \"SSTFile Ingest Failed: \" << code;\n return false;\n }\n}\n\n} \/\/ namespace storage\n} \/\/ namespace nebula\n","lang_cluster":"C++","diff_tag":0,"review_comment":"","id":120} {"diff_hunk":"@@ -92,7 +92,7 @@ StatusOr> ListHostsProcessor::allHostsWithStatus(\n if (kvRet != kvstore::ResultCode::SUCCEEDED) {\n LOG(ERROR) << \"List Hosts Failed: No partitions\";\n resp_.set_code(cpp2::ErrorCode::E_NOT_FOUND);\n- return Status::Error(\"Cant't find any partitions\");\n+ return Status::Error(\"Can't find any partitions\");\n }\n while (iter->valid()) {\n PartitionID partId = MetaServiceUtils::parsePartKeyPartId(iter->key());","source_code":"\/* Copyright (c) 2018 vesoft inc. All rights reserved.\n *\n * This source code is licensed under Apache 2.0 License,\n * attached with Common Clause Condition 1.0, found in the LICENSES directory.\n *\/\n\n#include \"meta\/processors\/partsMan\/ListHostsProcessor.h\"\n#include \"meta\/ActiveHostsMan.h\"\n#include \"meta\/processors\/admin\/AdminClient.h\"\n\nDECLARE_int32(expired_threshold_sec);\nDEFINE_int32(removed_threshold_sec, 24 * 60 * 60,\n \"Hosts will be removed in this time if no heartbeat received\");\n\nnamespace nebula {\nnamespace meta {\n\nvoid ListHostsProcessor::process(const cpp2::ListHostsReq& req) {\n UNUSED(req);\n std::unordered_map spaceIdNameMap;\n std::vector hostItems;\n {\n folly::SharedMutex::ReadHolder rHolder(LockUtils::spaceLock());\n auto status = allHostsWithStatus(spaceIdNameMap);\n if (!status.ok()) {\n onFinished();\n return;\n }\n hostItems = std::move(status.value());\n }\n getLeaderDist(hostItems, spaceIdNameMap);\n resp_.set_hosts(std::move(hostItems));\n onFinished();\n}\n\nStatusOr> ListHostsProcessor::allHostsWithStatus(\n std::unordered_map& spaceIdNameMap) {\n std::vector hostItems;\n\n const auto& hostPrefix = MetaServiceUtils::hostPrefix();\n std::unique_ptr iter;\n auto kvRet = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, hostPrefix, &iter);\n if (kvRet != kvstore::ResultCode::SUCCEEDED) {\n LOG(ERROR) << \"List Hosts Failed: No hosts\";\n resp_.set_code(cpp2::ErrorCode::E_NO_HOSTS);\n return Status::Error(\"Can't access kvstore, ret = %d\", static_cast(kvRet));\n }\n\n auto now = time::WallClock::fastNowInMilliSec();\n std::vector removeHostsKey;\n while (iter->valid()) {\n cpp2::HostItem item;\n auto host = MetaServiceUtils::parseHostKey(iter->key());\n item.set_hostAddr(std::move(host));\n HostInfo info = HostInfo::decode(iter->val());\n if (now - info.lastHBTimeInMilliSec_ < FLAGS_removed_threshold_sec * 1000) {\n if (now - info.lastHBTimeInMilliSec_ < FLAGS_expired_threshold_sec * 1000) {\n item.set_status(cpp2::HostStatus::ONLINE);\n } else {\n item.set_status(cpp2::HostStatus::OFFLINE);\n }\n hostItems.emplace_back(item);\n } else {\n removeHostsKey.emplace_back(iter->key());\n }\n iter->next();\n }\n\n \/\/ Get all spaces\n std::vector spaces;\n const auto& spacePrefix = MetaServiceUtils::spacePrefix();\n kvRet = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, spacePrefix, &iter);\n if (kvRet != kvstore::ResultCode::SUCCEEDED) {\n return hostItems;\n }\n while (iter->valid()) {\n auto spaceId = MetaServiceUtils::spaceId(iter->key());\n spaces.emplace_back(spaceId);\n spaceIdNameMap.emplace(spaceId, MetaServiceUtils::spaceName(iter->val()));\n iter->next();\n }\n\n std::unordered_map>> allParts;\n for (const auto& spaceId : spaces) {\n \/\/ get space name by space id\n auto spaceName = spaceIdNameMap[spaceId];\n\n std::unordered_map> hostParts;\n const auto& partPrefix = MetaServiceUtils::partPrefix(spaceId);\n kvRet = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, partPrefix, &iter);\n if (kvRet != kvstore::ResultCode::SUCCEEDED) {\n LOG(ERROR) << \"List Hosts Failed: No partitions\";\n resp_.set_code(cpp2::ErrorCode::E_NOT_FOUND);\n return Status::Error(\"Cant't find any partitions\");\n }\n while (iter->valid()) {\n PartitionID partId = MetaServiceUtils::parsePartKeyPartId(iter->key());\n auto partHosts = MetaServiceUtils::parsePartVal(iter->val());\n for (auto& host : partHosts) {\n hostParts[HostAddr(host.ip, host.port)].emplace_back(partId);\n }\n iter->next();\n }\n\n for (const auto& hostEntry : hostParts) {\n allParts[hostEntry.first][spaceName] = std::move(hostEntry.second);\n }\n }\n\n for (const auto& hostEntry : allParts) {\n auto hostAddr = toThriftHost(hostEntry.first);\n auto it = std::find_if(hostItems.begin(), hostItems.end(), [&](const auto& item) {\n return item.get_hostAddr() == hostAddr;\n });\n if (it != hostItems.end()) {\n \/\/ set default leader parts of all space to empty\n std::unordered_map> leaderParts;\n for (auto& spaceEntry : hostEntry.second) {\n leaderParts[spaceEntry.first] = {};\n }\n it->set_leader_parts(std::move(leaderParts));\n it->set_all_parts(std::move(hostEntry.second));\n }\n }\n\n \/\/ Remove hosts that long time at OFFLINE status\n if (!removeHostsKey.empty()) {\n kvstore_->asyncMultiRemove(kDefaultSpaceId,\n kDefaultPartId,\n std::move(removeHostsKey),\n [] (kvstore::ResultCode code) {\n if (code != kvstore::ResultCode::SUCCEEDED) {\n LOG(ERROR) << \"Async remove long time offline hosts failed: \" << code;\n }\n });\n }\n return hostItems;\n}\n\nvoid ListHostsProcessor::getLeaderDist(\n std::vector& hostItems,\n std::unordered_map& spaceIdNameMap) {\n if (adminClient_ == nullptr) {\n return;\n }\n HostLeaderMap hostLeaderMap;\n auto ret = adminClient_->getLeaderDist(&hostLeaderMap).get();\n if (!ret.ok()) {\n LOG(ERROR) << \"Get leader distribution failed\";\n return;\n }\n for (auto& hostEntry : hostLeaderMap) {\n auto hostAddr = toThriftHost(hostEntry.first);\n auto it = std::find_if(hostItems.begin(), hostItems.end(), [&](const auto& item) {\n return item.get_hostAddr() == hostAddr;\n });\n\n if (it != hostItems.end()) {\n for (auto& leaderEntry : hostEntry.second) {\n \/\/ get space name by space id\n auto spaceId = leaderEntry.first;\n auto spaceIter = spaceIdNameMap.find(spaceId);\n if (spaceIter == spaceIdNameMap.end()) {\n continue;\n }\n auto spaceName = spaceIter->second;\n\n it->leader_parts[spaceName] = std::move(leaderEntry.second);\n }\n }\n }\n}\n\n} \/\/ namespace meta\n} \/\/ namespace nebula\n\n","lang_cluster":"C++","diff_tag":0,"review_comment":"","id":121} {"diff_hunk":"@@ -61,6 +61,10 @@ double convertInt64ToDouble(const Int64 &src)\n return (double) src;\n }\n \n+double convertUInt64ToDouble(const UInt64 &src)\n+{\n+ return (double) src;\n+}\n \n Int64 uint32ArrayToInt64(const UInt32 array[2])\n {","source_code":"\/**********************************************************************\n\/\/ @@@ START COPYRIGHT @@@\n\/\/\n\/\/ Licensed to the Apache Software Foundation (ASF) under one\n\/\/ or more contributor license agreements. See the NOTICE file\n\/\/ distributed with this work for additional information\n\/\/ regarding copyright ownership. The ASF licenses this file\n\/\/ to you under the Apache License, Version 2.0 (the\n\/\/ \"License\"); you may not use this file except in compliance\n\/\/ with the License. You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing,\n\/\/ software distributed under the License is distributed on an\n\/\/ \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n\/\/ KIND, either express or implied. See the License for the\n\/\/ specific language governing permissions and limitations\n\/\/ under the License.\n\/\/\n\/\/ @@@ END COPYRIGHT @@@\n**********************************************************************\/\n\/* -*-C++-*-\n**************************************************************************\n*\n* File: Int64.C\n* Description: 64-bit integer\n* Created: 3\/5\/96\n* Language: C++\n*\n*\n*\n*\n**************************************************************************\n*\/\n\n\n#include \"Int64.h\"\n#include \"NABoolean.h\"\n#include \"str.h\"\n#include \"NAStdlib.h\"\n\nInt64 uint32ToInt64(UInt32 value)\n{\n return (Int64) value;\n}\n\nInt32 int64ToInt32(Int64 value)\n{\n UInt32 val32u;\n Int32 val32;\n\n val32u = (UInt32) value;\n val32 = (Int32)val32u;\n\n return val32;\n}\n\ndouble convertInt64ToDouble(const Int64 &src)\n{\n return (double) src;\n}\n\n\nInt64 uint32ArrayToInt64(const UInt32 array[2])\n{\n Int64 result = uint32ToInt64(array[0]);\n Int64 array1 = uint32ToInt64(array[1]);\n Int64 shift = INT_MAX;\t\/\/ 2^31 - 1\n shift += 1;\t\t\t\/\/ 2^31\n result *= shift;\n result *= 2;\t\t\t\/\/ 2*32, so result now has array[0] in high word\n result += array1;\t\t\/\/ and array[1] in low word\n return result;\n}\n\nInt32 aToInt32(const char* src)\n{\n NABoolean isNeg = FALSE;\n if (*src == '-')\n {\n isNeg = TRUE;\n src++;\n }\n\n Int32 tgt = 0;\n while ((*src >= '0') && (*src <= '9')) {\n tgt = tgt * 10 + (*src - '0');\n src++;\n }\n \n if (isNeg)\n return -tgt;\n else\n return tgt;\n}\n\nInt64 atoInt64(const char* src)\n{\n NABoolean isNeg = FALSE;\n if (*src == '-')\n {\n isNeg = TRUE;\n src++;\n }\n\n Int64 tgt = 0;\n while ((*src >= '0') && (*src <= '9')) {\n tgt = tgt * 10 + (*src - '0');\n src++;\n }\n \n if (isNeg)\n return -tgt;\n else\n return tgt;\n\n \n}\n\nvoid convertInt64ToAscii(const Int64 &src, char* tgt)\n{\n Int64 temp = src; \/\/ (src >= 0) ? src : - src;\n char buffer[21];\n char *s = &buffer[21];\n *--s = '\\0';\n do {\n char c = (char) (temp % 10);\n if (c < 0)\n c = -c;\n *--s = (char)(c + '0');\n temp \/= 10;\n } while (temp != 0);\n if (src < 0)\n *--s = '-';\n strcpy(tgt, s);\n}\n\nvoid convertInt64ToUInt32Array(const Int64 &src, UInt32 *tgt)\n{\n Lng32 *tPtr = (Lng32 *) &src;\n#ifdef NA_LITTLE_ENDIAN\n tgt[0] = tPtr[1];\n tgt[1] = tPtr[0];\n#else\n tgt[0] = tPtr[0];\n tgt[1] = tPtr[1];\n#endif\n}\n\n\/\/\n\/\/ End of File\n\/\/\n","lang_cluster":"C++","diff_tag":0,"review_comment":"","id":122} {"diff_hunk":"@@ -114,10 +114,10 @@ template \n void im2col_nd_cpu(const Dtype* data_im, const int num_spatial_axes,\n const int* im_shape, const int* col_shape,\n const int* kernel_shape, const int* pad, const int* stride,\n- Dtype* data_col) {\n+ const int* dilation, Dtype* data_col) {\n const bool kIm2Col = true;\n im2col_nd_core_cpu(data_im, kIm2Col, num_spatial_axes, im_shape, col_shape,\n- kernel_shape, pad, stride, data_col);\n+ kernel_shape, pad, stride, dilation, data_col);\n }\n \n \/\/ Explicit instantiation","source_code":"#include \n\n#include \"caffe\/util\/im2col.hpp\"\n#include \"caffe\/util\/math_functions.hpp\"\n\nnamespace caffe {\n\ntemplate \nvoid im2col_cpu(const Dtype* data_im, const int channels,\n const int height, const int width, const int kernel_h, const int kernel_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n Dtype* data_col) {\n const int height_col = (height + 2 * pad_h - kernel_h) \/ stride_h + 1;\n const int width_col = (width + 2 * pad_w - kernel_w) \/ stride_w + 1;\n const int channels_col = channels * kernel_h * kernel_w;\n for (int c_col = 0; c_col < channels_col; ++c_col) {\n int w_offset = c_col % kernel_w;\n int h_offset = (c_col \/ kernel_w) % kernel_h;\n int c_im = c_col \/ kernel_h \/ kernel_w;\n for (int h_col = 0; h_col < height_col; ++h_col) {\n for (int w_col = 0; w_col < width_col; ++w_col) {\n int h_im = h_col * stride_h - pad_h + h_offset;\n int w_im = w_col * stride_w - pad_w + w_offset;\n data_col[(c_col * height_col + h_col) * width_col + w_col] =\n (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ?\n data_im[(c_im * height + h_im) * width + w_im] : 0;\n }\n }\n }\n}\n\n\/\/ Explicit instantiation\ntemplate void im2col_cpu(const float* data_im, const int channels,\n const int height, const int width, const int kernel_h, const int kernel_w,\n const int pad_h, const int pad_w, const int stride_h,\n const int stride_w, float* data_col);\ntemplate void im2col_cpu(const double* data_im, const int channels,\n const int height, const int width, const int kernel_h, const int kernel_w,\n const int pad_h, const int pad_w, const int stride_h,\n const int stride_w, double* data_col);\n\ntemplate \ninline void im2col_nd_core_cpu(const Dtype* data_input, const bool im2col,\n const int num_spatial_axes, const int* im_shape, const int* col_shape,\n const int* kernel_shape, const int* pad, const int* stride,\n Dtype* data_output) {\n if (!im2col) {\n int im_size = im_shape[0];\n for (int i = 0; i < num_spatial_axes; ++i) {\n im_size *= im_shape[1 + i];\n }\n caffe_set(im_size, Dtype(0), data_output);\n }\n int kernel_size = 1;\n for (int i = 0; i < num_spatial_axes; ++i) {\n kernel_size *= kernel_shape[i];\n }\n const int channels_col = col_shape[0];\n vector d_offset(num_spatial_axes, 0);\n vector d_iter(num_spatial_axes, 0);\n for (int c_col = 0; c_col < channels_col; ++c_col) {\n \/\/ Loop over spatial axes in reverse order to compute a per-axis offset.\n int offset = c_col;\n for (int d_i = num_spatial_axes - 1; d_i >= 0; --d_i) {\n if (d_i < num_spatial_axes - 1) {\n offset \/= kernel_shape[d_i + 1];\n }\n d_offset[d_i] = offset % kernel_shape[d_i];\n }\n for (bool incremented = true; incremented; ) {\n \/\/ Loop over spatial axes in forward order to compute the indices in the\n \/\/ image and column, and whether the index lies in the padding.\n int index_col = c_col;\n int index_im = c_col \/ kernel_size;\n bool is_padding = false;\n for (int d_i = 0; d_i < num_spatial_axes; ++d_i) {\n const int d = d_iter[d_i];\n const int d_im = d * stride[d_i] - pad[d_i] + d_offset[d_i];\n is_padding |= d_im < 0 || d_im >= im_shape[d_i + 1];\n index_col *= col_shape[d_i + 1];\n index_col += d;\n index_im *= im_shape[d_i + 1];\n index_im += d_im;\n }\n if (im2col) {\n if (is_padding) {\n data_output[index_col] = 0;\n } else {\n data_output[index_col] = data_input[index_im];\n }\n } else if (!is_padding) { \/\/ col2im\n data_output[index_im] += data_input[index_col];\n }\n \/\/ Loop over spatial axes in reverse order to choose an index,\n \/\/ like counting.\n incremented = false;\n for (int d_i = num_spatial_axes - 1; d_i >= 0; --d_i) {\n const int d_max = col_shape[d_i + 1];\n DCHECK_LT(d_iter[d_i], d_max);\n if (d_iter[d_i] == d_max - 1) {\n d_iter[d_i] = 0;\n } else { \/\/ d_iter[d_i] < d_max - 1\n ++d_iter[d_i];\n incremented = true;\n break;\n }\n }\n } \/\/ while(incremented) {\n } \/\/ for (int c = 0; c < channels_col; ++c) {\n}\n\ntemplate \nvoid im2col_nd_cpu(const Dtype* data_im, const int num_spatial_axes,\n const int* im_shape, const int* col_shape,\n const int* kernel_shape, const int* pad, const int* stride,\n Dtype* data_col) {\n const bool kIm2Col = true;\n im2col_nd_core_cpu(data_im, kIm2Col, num_spatial_axes, im_shape, col_shape,\n kernel_shape, pad, stride, data_col);\n}\n\n\/\/ Explicit instantiation\ntemplate void im2col_nd_cpu(const float* data_im,\n const int num_spatial_axes,\n const int* im_shape, const int* col_shape,\n const int* kernel_shape, const int* pad, const int* stride,\n float* data_col);\ntemplate void im2col_nd_cpu(const double* data_im,\n const int num_spatial_axes,\n const int* im_shape, const int* col_shape,\n const int* kernel_shape, const int* pad, const int* stride,\n double* data_col);\n\ntemplate \nvoid col2im_cpu(const Dtype* data_col, const int channels,\n const int height, const int width, const int kernel_h, const int kernel_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n Dtype* data_im) {\n caffe_set(height * width * channels, Dtype(0), data_im);\n const int height_col = (height + 2 * pad_h - kernel_h) \/ stride_h + 1;\n const int width_col = (width + 2 * pad_w - kernel_w) \/ stride_w + 1;\n const int channels_col = channels * kernel_h * kernel_w;\n for (int c_col = 0; c_col < channels_col; ++c_col) {\n int w_offset = c_col % kernel_w;\n int h_offset = (c_col \/ kernel_w) % kernel_h;\n int c_im = c_col \/ kernel_h \/ kernel_w;\n for (int h_col = 0; h_col < height_col; ++h_col) {\n for (int w_col = 0; w_col < width_col; ++w_col) {\n int h_im = h_col * stride_h - pad_h + h_offset;\n int w_im = w_col * stride_w - pad_w + w_offset;\n if (h_im >= 0 && h_im < height && w_im >= 0 && w_im < width)\n data_im[(c_im * height + h_im) * width + w_im] +=\n data_col[(c_col * height_col + h_col) * width_col + w_col];\n }\n }\n }\n}\n\n\/\/ Explicit instantiation\ntemplate void col2im_cpu(const float* data_col, const int channels,\n const int height, const int width, const int kernel_h, const int kernel_w,\n const int pad_h, const int pad_w, const int stride_h,\n const int stride_w, float* data_im);\ntemplate void col2im_cpu(const double* data_col, const int channels,\n const int height, const int width, const int kernel_h, const int kernel_w,\n const int pad_h, const int pad_w, const int stride_h,\n const int stride_w, double* data_im);\n\ntemplate \nvoid col2im_nd_cpu(const Dtype* data_col, const int num_spatial_axes,\n const int* im_shape, const int* col_shape,\n const int* kernel_shape, const int* pad, const int* stride,\n Dtype* data_im) {\n const bool kIm2Col = false;\n im2col_nd_core_cpu(data_col, kIm2Col, num_spatial_axes, im_shape, col_shape,\n kernel_shape, pad, stride, data_im);\n}\n\n\/\/ Explicit instantiation\ntemplate void col2im_nd_cpu(const float* data_col,\n const int num_spatial_axes,\n const int* im_shape, const int* col_shape,\n const int* kernel_shape, const int* pad, const int* stride,\n float* data_im);\ntemplate void col2im_nd_cpu(const double* data_col,\n const int num_spatial_axes,\n const int* im_shape, const int* col_shape,\n const int* kernel_shape, const int* pad, const int* stride,\n double* data_im);\n\n\n} \/\/ namespace caffe\n","lang_cluster":"C++","diff_tag":0,"review_comment":"","id":123} {"diff_hunk":"@@ -104,7 +104,8 @@ Status TablePlugin::HandleRequest(const RoutingAlgorithmsInterface &algorithms,\n {\n const auto &table_index = row * num_destinations + column;\n BOOST_ASSERT(table_index < result_tables_pair.first.size());\n- if (result_tables_pair.first[table_index] == MAXIMAL_EDGE_DURATION)\n+ if (params.fallback_speed > 0 &&\n+ result_tables_pair.first[table_index] == MAXIMAL_EDGE_DURATION)\n {\n const auto &source =\n snapped_phantoms[params.sources.empty() ? row : params.sources[row]];","source_code":"#include \"engine\/plugins\/table.hpp\"\n\n#include \"engine\/api\/table_api.hpp\"\n#include \"engine\/api\/table_parameters.hpp\"\n#include \"engine\/routing_algorithms\/many_to_many.hpp\"\n#include \"engine\/search_engine_data.hpp\"\n#include \"util\/coordinate_calculation.hpp\"\n#include \"util\/json_container.hpp\"\n#include \"util\/string_util.hpp\"\n\n#include \n\n#include \n#include \n#include \n#include \n\n#include \n\nnamespace osrm\n{\nnamespace engine\n{\nnamespace plugins\n{\n\nTablePlugin::TablePlugin(const int max_locations_distance_table)\n : max_locations_distance_table(max_locations_distance_table)\n{\n}\n\nStatus TablePlugin::HandleRequest(const RoutingAlgorithmsInterface &algorithms,\n const api::TableParameters ¶ms,\n util::json::Object &result) const\n{\n if (!algorithms.HasManyToManySearch())\n {\n return Error(\"NotImplemented\",\n \"Many to many search is not implemented for the chosen search algorithm.\",\n result);\n }\n\n BOOST_ASSERT(params.IsValid());\n\n if (!CheckAllCoordinates(params.coordinates))\n {\n return Error(\"InvalidOptions\", \"Coordinates are invalid\", result);\n }\n\n if (params.bearings.size() > 0 && params.coordinates.size() != params.bearings.size())\n {\n return Error(\n \"InvalidOptions\", \"Number of bearings does not match number of coordinates\", result);\n }\n\n \/\/ Empty sources or destinations means the user wants all of them included, respectively\n \/\/ The ManyToMany routing algorithm we dispatch to below already handles this perfectly.\n const auto num_sources =\n params.sources.empty() ? params.coordinates.size() : params.sources.size();\n const auto num_destinations =\n params.destinations.empty() ? params.coordinates.size() : params.destinations.size();\n\n if (max_locations_distance_table > 0 &&\n ((num_sources * num_destinations) >\n static_cast(max_locations_distance_table * max_locations_distance_table)))\n {\n return Error(\"TooBig\", \"Too many table coordinates\", result);\n }\n\n if (!CheckAlgorithms(params, algorithms, result))\n return Status::Error;\n\n const auto &facade = algorithms.GetFacade();\n auto phantom_nodes = GetPhantomNodes(facade, params);\n\n if (phantom_nodes.size() != params.coordinates.size())\n {\n return Error(\"NoSegment\",\n std::string(\"Could not find a matching segment for coordinate \") +\n std::to_string(phantom_nodes.size()),\n result);\n }\n\n auto snapped_phantoms = SnapPhantomNodes(phantom_nodes);\n\n bool request_distance = params.annotations & api::TableParameters::AnnotationsType::Distance;\n bool request_duration = params.annotations & api::TableParameters::AnnotationsType::Duration;\n\n auto result_tables_pair = algorithms.ManyToManySearch(\n snapped_phantoms, params.sources, params.destinations, request_distance);\n\n if ((request_duration && result_tables_pair.first.empty()) ||\n (request_distance && result_tables_pair.second.empty()))\n {\n return Error(\"NoTable\", \"No table found\", result);\n }\n\n \/\/ Scan table for null results - if any exist, replace with distance estimates\n if (params.fallback_speed > 0)\n {\n for (std::size_t row = 0; row < num_sources; row++)\n {\n for (std::size_t column = 0; column < num_destinations; column++)\n {\n const auto &table_index = row * num_destinations + column;\n BOOST_ASSERT(table_index < result_tables_pair.first.size());\n if (result_tables_pair.first[table_index] == MAXIMAL_EDGE_DURATION)\n {\n const auto &source =\n snapped_phantoms[params.sources.empty() ? row : params.sources[row]];\n const auto &destination =\n snapped_phantoms[params.destinations.empty() ? column\n : params.destinations[column]];\n\n auto distance_estimate =\n params.fallback_coordinate_type ==\n api::TableParameters::FallbackCoordinateType::Input\n ? util::coordinate_calculation::fccApproximateDistance(\n source.input_location, destination.input_location)\n : util::coordinate_calculation::fccApproximateDistance(\n source.location, destination.location);\n\n result_tables_pair.first[table_index] =\n distance_estimate \/ (double)params.fallback_speed;\n if (!result_tables_pair.second.empty())\n {\n result_tables_pair.second[table_index] = distance_estimate;\n }\n }\n }\n }\n }\n\n api::TableAPI table_api{facade, params};\n table_api.MakeResponse(result_tables_pair, snapped_phantoms, result);\n\n return Status::Ok;\n}\n}\n}\n}\n","lang_cluster":"C++","diff_tag":0,"review_comment":"","id":124} {"diff_hunk":"@@ -55,6 +55,23 @@ void AddListenerProcessor::process(const cpp2::AddListenerReq& req) {\n data.emplace_back(MetaKeyUtils::listenerKey(space, parts[i], type),\n MetaKeyUtils::serializeHostAddr(hosts[i % hosts.size()]));\n }\n+\n+ nebula::cpp2::ErrorCode code = nebula::cpp2::ErrorCode::SUCCEEDED;\n+ for (auto& host : hosts) {\n+ auto machineKey = MetaKeyUtils::machineKey(host.host, host.port);\n+ if (machineExist(machineKey) == nebula::cpp2::ErrorCode::SUCCEEDED) {\n+ LOG(ERROR) << \"The host \" << host << \" have existed!\";\n+ code = nebula::cpp2::ErrorCode::E_EXISTED;\n+ break;\n+ }\n+ data.emplace_back(machineKey, \"\");\n+ }\n+\n+ if (code != nebula::cpp2::ErrorCode::SUCCEEDED) {\n+ handleErrorCode(code);\n+ onFinished();\n+ return;\n+ }\n doSyncPutAndUpdate(std::move(data));\n }\n ","source_code":"\/* Copyright (c) 2020 vesoft inc. All rights reserved.\n *\n * This source code is licensed under Apache 2.0 License.\n *\/\n\n#include \"meta\/processors\/listener\/ListenerProcessor.h\"\n\n#include \"meta\/ActiveHostsMan.h\"\n\nDECLARE_int32(heartbeat_interval_secs);\nDECLARE_uint32(expired_time_factor);\n\nnamespace nebula {\nnamespace meta {\n\nvoid AddListenerProcessor::process(const cpp2::AddListenerReq& req) {\n auto space = req.get_space_id();\n CHECK_SPACE_ID_AND_RETURN(space);\n auto type = req.get_type();\n const auto& hosts = req.get_hosts();\n auto ret = listenerExist(space, type);\n if (ret != nebula::cpp2::ErrorCode::E_LISTENER_NOT_FOUND) {\n if (ret == nebula::cpp2::ErrorCode::SUCCEEDED) {\n LOG(ERROR) << \"Add listener failed, listener already exists.\";\n ret = nebula::cpp2::ErrorCode::E_EXISTED;\n } else {\n LOG(ERROR) << \"Add listener failed, error: \" << apache::thrift::util::enumNameSafe(ret);\n }\n handleErrorCode(ret);\n onFinished();\n return;\n }\n\n \/\/ TODO : (sky) if type is elasticsearch, need check text search service.\n folly::SharedMutex::WriteHolder wHolder(LockUtils::listenerLock());\n folly::SharedMutex::ReadHolder rHolder(LockUtils::spaceLock());\n const auto& prefix = MetaKeyUtils::partPrefix(space);\n auto iterRet = doPrefix(prefix);\n if (!nebula::ok(iterRet)) {\n auto retCode = nebula::error(iterRet);\n LOG(ERROR) << \"List parts failed, error: \" << apache::thrift::util::enumNameSafe(retCode);\n handleErrorCode(retCode);\n onFinished();\n return;\n }\n\n std::vector parts;\n auto iter = nebula::value(iterRet).get();\n while (iter->valid()) {\n parts.emplace_back(MetaKeyUtils::parsePartKeyPartId(iter->key()));\n iter->next();\n }\n std::vector data;\n for (size_t i = 0; i < parts.size(); i++) {\n data.emplace_back(MetaKeyUtils::listenerKey(space, parts[i], type),\n MetaKeyUtils::serializeHostAddr(hosts[i % hosts.size()]));\n }\n doSyncPutAndUpdate(std::move(data));\n}\n\nvoid RemoveListenerProcessor::process(const cpp2::RemoveListenerReq& req) {\n auto space = req.get_space_id();\n CHECK_SPACE_ID_AND_RETURN(space);\n auto type = req.get_type();\n auto ret = listenerExist(space, type);\n if (ret != nebula::cpp2::ErrorCode::SUCCEEDED) {\n if (ret == nebula::cpp2::ErrorCode::E_LISTENER_NOT_FOUND) {\n LOG(ERROR) << \"Remove listener failed, listener not exists.\";\n } else {\n LOG(ERROR) << \"Remove listener failed, error: \" << apache::thrift::util::enumNameSafe(ret);\n }\n handleErrorCode(ret);\n onFinished();\n return;\n }\n\n folly::SharedMutex::WriteHolder wHolder(LockUtils::listenerLock());\n std::vector keys;\n const auto& prefix = MetaKeyUtils::listenerPrefix(space, type);\n auto iterRet = doPrefix(prefix);\n if (!nebula::ok(iterRet)) {\n auto retCode = nebula::error(iterRet);\n LOG(ERROR) << \"Remove listener failed, error: \" << apache::thrift::util::enumNameSafe(retCode);\n handleErrorCode(retCode);\n onFinished();\n return;\n }\n\n auto iter = nebula::value(iterRet).get();\n while (iter->valid()) {\n keys.emplace_back(iter->key());\n iter->next();\n }\n doSyncMultiRemoveAndUpdate(std::move(keys));\n}\n\nvoid ListListenerProcessor::process(const cpp2::ListListenerReq& req) {\n auto space = req.get_space_id();\n CHECK_SPACE_ID_AND_RETURN(space);\n folly::SharedMutex::ReadHolder rHolder(LockUtils::listenerLock());\n const auto& prefix = MetaKeyUtils::listenerPrefix(space);\n auto iterRet = doPrefix(prefix);\n if (!nebula::ok(iterRet)) {\n auto retCode = nebula::error(iterRet);\n LOG(ERROR) << \"List listener failed, error: \" << apache::thrift::util::enumNameSafe(retCode);\n handleErrorCode(retCode);\n onFinished();\n return;\n }\n\n auto activeHostsRet =\n ActiveHostsMan::getActiveHosts(kvstore_,\n FLAGS_heartbeat_interval_secs * FLAGS_expired_time_factor,\n cpp2::HostRole::LISTENER);\n if (!nebula::ok(activeHostsRet)) {\n handleErrorCode(nebula::error(activeHostsRet));\n onFinished();\n return;\n }\n\n std::vector listeners;\n auto activeHosts = std::move(nebula::value(activeHostsRet));\n auto iter = nebula::value(iterRet).get();\n while (iter->valid()) {\n cpp2::ListenerInfo listener;\n listener.set_type(MetaKeyUtils::parseListenerType(iter->key()));\n listener.set_host(MetaKeyUtils::deserializeHostAddr(iter->val()));\n listener.set_part_id(MetaKeyUtils::parseListenerPart(iter->key()));\n if (std::find(activeHosts.begin(), activeHosts.end(), *listener.host_ref()) !=\n activeHosts.end()) {\n listener.set_status(cpp2::HostStatus::ONLINE);\n } else {\n listener.set_status(cpp2::HostStatus::OFFLINE);\n }\n listeners.emplace_back(std::move(listener));\n iter->next();\n }\n resp_.set_listeners(std::move(listeners));\n handleErrorCode(nebula::cpp2::ErrorCode::SUCCEEDED);\n onFinished();\n}\n\n} \/\/ namespace meta\n} \/\/ namespace nebula\n","lang_cluster":"C++","diff_tag":0,"review_comment":"","id":125} {"diff_hunk":"@@ -65,8 +65,7 @@ def EStateIndices(mol, force=True):\n tmp = (Is[i] - Is[j]) \/ (p * p)\n accum[i] += tmp\n accum[j] -= tmp\n-\n- res = accum + Is\n+ res = numpy.add(accum, Is, dtype='float')\n mol._eStateIndices = res\n return res\n ","source_code":"# $Id$\n#\n# Copyright (C) 2002-2006 greg Landrum and Rational Discovery LLC\n#\n# @@ All Rights Reserved @@\n# This file is part of the RDKit.\n# The contents are covered by the terms of the BSD license\n# which is included in the file license.txt, found at the root\n# of the RDKit source tree.\n#\n\"\"\" Basic EState definitions\n\n\"\"\"\n\nimport numpy\nfrom rdkit import Chem\n\n\ndef GetPrincipleQuantumNumber(atNum):\n \"\"\" Get principal quantum number for atom number \"\"\"\n if atNum <= 2:\n return 1\n elif atNum <= 10:\n return 2\n elif atNum <= 18:\n return 3\n elif atNum <= 36:\n return 4\n elif atNum <= 54:\n return 5\n elif atNum <= 86:\n return 6\n else:\n return 7\n\n\ndef EStateIndices(mol, force=True):\n \"\"\" returns a tuple of EState indices for the molecule\n\n Reference: Hall, Mohney and Kier. JCICS _31_ 76-81 (1991)\n\n \"\"\"\n if not force and hasattr(mol, '_eStateIndices'):\n return mol._eStateIndices\n\n tbl = Chem.GetPeriodicTable()\n nAtoms = mol.GetNumAtoms()\n Is = numpy.zeros(nAtoms, numpy.float)\n for i in range(nAtoms):\n at = mol.GetAtomWithIdx(i)\n atNum = at.GetAtomicNum()\n d = at.GetDegree()\n if d > 0:\n h = at.GetTotalNumHs()\n dv = tbl.GetNOuterElecs(atNum) - h\n N = GetPrincipleQuantumNumber(atNum)\n Is[i] = (4. \/ (N * N) * dv + 1) \/ d\n dists = Chem.GetDistanceMatrix(mol, useBO=0, useAtomWts=0)\n dists += 1\n accum = numpy.zeros(nAtoms, numpy.float)\n for i in range(nAtoms):\n for j in range(i + 1, nAtoms):\n p = dists[i, j]\n if p < 1e6:\n tmp = (Is[i] - Is[j]) \/ (p * p)\n accum[i] += tmp\n accum[j] -= tmp\n\n res = accum + Is\n mol._eStateIndices = res\n return res\n\n\nEStateIndices.version = '1.0.0'\n\n\ndef MaxEStateIndex(mol, force=1):\n return max(EStateIndices(mol, force))\n\n\nMaxEStateIndex.version = \"1.0.0\"\n\n\ndef MinEStateIndex(mol, force=1):\n return min(EStateIndices(mol, force))\n\n\nMinEStateIndex.version = \"1.0.0\"\n\n\ndef MaxAbsEStateIndex(mol, force=1):\n return max([abs(x) for x in EStateIndices(mol, force)])\n\n\nMaxAbsEStateIndex.version = \"1.0.0\"\n\n\ndef MinAbsEStateIndex(mol, force=1):\n return min([abs(x) for x in EStateIndices(mol, force)])\n\n\nMinAbsEStateIndex.version = \"1.0.0\"\n\n\ndef _exampleCode():\n \"\"\" Example code for calculating E-state indices \"\"\"\n smis = ['CCCC', 'CCCCC', 'CCCCCC', 'CC(N)C(=O)O', 'CC(N)C(=O)[O-].[Na+]']\n for smi in smis:\n m = Chem.MolFromSmiles(smi)\n print(smi)\n inds = EStateIndices(m)\n print('\\t', inds)\n\n\nif __name__ == '__main__': # pragma: nocover\n _exampleCode()\n","lang_cluster":"C++","diff_tag":0,"review_comment":"","id":126} {"diff_hunk":"@@ -117,16 +117,18 @@ void ShowExecutor::showSpaces() {\n onFinish_();\n };\n \n- auto error = [this] (auto &&e) {\n- LOG(ERROR) << \"Exception caught: \" << e.what();\n- DCHECK(onError_);\n- onError_(Status::Error(\"Internal error\"));\n- return;\n- };\n-\n+ LOG_AND_PROCESS_ERROR();\n std::move(future).via(runner).thenValue(cb).thenError(error);\n }\n \n+void ShowExecutor::showTags() {\n+ \/\/ TODO(darion) support show tags via MetaClient\n+}\n+\n+void ShowExecutor::showEdges() {\n+ \/\/ TODO(darion) support show edges via MetaClient\n+}\n+\n void ShowExecutor::setupResponse(cpp2::ExecutionResponse &resp) {\n resp = std::move(*resp_);\n }","source_code":"\/* Copyright (c) 2018 - present, VE Software Inc. All rights reserved\n *\n * This source code is licensed under Apache 2.0 License\n * (found in the LICENSE.Apache file in the root directory)\n *\/\n\n#include \"graph\/ShowExecutor.h\"\n#include \"network\/NetworkUtils.h\"\n\nnamespace nebula {\nnamespace graph {\n\nusing nebula::network::NetworkUtils;\n\nShowExecutor::ShowExecutor(Sentence *sentence,\n ExecutionContext *ectx) : Executor(ectx) {\n sentence_ = static_cast(sentence);\n}\n\n\nStatus ShowExecutor::prepare() {\n return Status::OK();\n}\n\n\nvoid ShowExecutor::execute() {\n auto showType = sentence_->showType();\n switch (showType) {\n case ShowSentence::ShowType::kShowHosts:\n showHosts();\n break;\n case ShowSentence::ShowType::kShowSpaces:\n showSpaces();\n break;\n case ShowSentence::ShowType::kUnknown:\n onError_(Status::Error(\"Type unknown\"));\n break;\n \/\/ intentionally no `default'\n }\n}\n\n\nvoid ShowExecutor::showHosts() {\n auto future = ectx()->getMetaClient()->listHosts();\n auto *runner = ectx()->rctx()->runner();\n\n auto cb = [this] (auto &&resp) {\n if (!resp.ok()) {\n DCHECK(onError_);\n onError_(std::move(resp).status());\n return;\n }\n\n auto retShowHosts = std::move(resp).value();\n std::vector rows;\n std::vector header;\n resp_ = std::make_unique();\n\n header.push_back(\"Ip\");\n header.push_back(\"Port\");\n resp_->set_column_names(std::move(header));\n\n for (auto &host : retShowHosts) {\n std::vector row;\n row.resize(2);\n row[0].set_str(NetworkUtils::ipFromHostAddr(host));\n row[1].set_str(folly::to(NetworkUtils::portFromHostAddr(host)));\n rows.emplace_back();\n rows.back().set_columns(std::move(row));\n }\n resp_->set_rows(std::move(rows));\n\n DCHECK(onFinish_);\n onFinish_();\n };\n\n auto error = [this] (auto &&e) {\n LOG(ERROR) << \"Exception caught: \" << e.what();\n DCHECK(onError_);\n onError_(Status::Error(\"Internal error\"));\n return;\n };\n\n std::move(future).via(runner).thenValue(cb).thenError(error);\n}\n\n\nvoid ShowExecutor::showSpaces() {\n auto future = ectx()->getMetaClient()->listSpaces();\n auto *runner = ectx()->rctx()->runner();\n\n auto cb = [this] (auto &&resp) {\n if (!resp.ok()) {\n DCHECK(onError_);\n onError_(std::move(resp).status());\n return;\n }\n\n auto retShowSpaces = std::move(resp).value();\n std::vector rows;\n std::vector header;\n resp_ = std::make_unique();\n\n header.push_back(\"Name\");\n resp_->set_column_names(std::move(header));\n\n for (auto &space : retShowSpaces) {\n std::vector row;\n row.emplace_back();\n row.back().set_str(std::move(space.second));\n rows.emplace_back();\n rows.back().set_columns(std::move(row));\n }\n resp_->set_rows(std::move(rows));\n\n DCHECK(onFinish_);\n onFinish_();\n };\n\n auto error = [this] (auto &&e) {\n LOG(ERROR) << \"Exception caught: \" << e.what();\n DCHECK(onError_);\n onError_(Status::Error(\"Internal error\"));\n return;\n };\n\n std::move(future).via(runner).thenValue(cb).thenError(error);\n}\n\nvoid ShowExecutor::setupResponse(cpp2::ExecutionResponse &resp) {\n resp = std::move(*resp_);\n}\n\n} \/\/ namespace graph\n} \/\/ namespace nebula\n","lang_cluster":"C++","diff_tag":0,"review_comment":"","id":127} {"diff_hunk":"@@ -19,6 +19,7 @@\n \n #include \n #include \n+#include \n \n #include \n ","source_code":"\/\/ Copyright 2020 Proyectos y Sistemas de Mantenimiento SL (eProsima).\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/**\n * @file EDPServerListener2.cpp\n *\n *\/\n\n#include \n#include \n\n#include \n\n#include \".\/EDPServerListeners2.hpp\"\n#include \".\/EDPServer2.hpp\"\n#include \"..\/participant\/PDPServer2.hpp\"\n\nnamespace eprosima {\nnamespace fastdds {\nnamespace rtps {\n\nusing namespace eprosima::fastrtps::rtps;\n\nEDPServerPUBListener2::EDPServerPUBListener2(\n EDPServer2* sedp)\n : EDPBasePUBListener(sedp->mp_RTPSParticipant->getAttributes().allocation.locators,\n sedp->mp_RTPSParticipant->getAttributes().allocation.data_limits)\n , sedp_(sedp)\n{\n}\n\nvoid EDPServerPUBListener2::onNewCacheChangeAdded(\n RTPSReader* reader,\n const CacheChange_t* const change_in)\n{\n (void)reader;\n (void)change_in;\n \/\/ TODO DISCOVERY SERVER VERSION 2\n}\n\nvoid EDPServerPUBListener2::onWriterChangeReceivedByAll(\n RTPSWriter* writer,\n CacheChange_t* change)\n{\n (void)writer;\n\n if (ChangeKind_t::NOT_ALIVE_DISPOSED_UNREGISTERED == change->kind)\n {\n WriterHistory* writer_history =\n sedp_->publications_writer_.second;\n\n writer_history->remove_change(change);\n }\n}\n\nEDPServerSUBListener2::EDPServerSUBListener2(\n EDPServer2* sedp)\n : EDPBaseSUBListener(sedp->mp_RTPSParticipant->getAttributes().allocation.locators,\n sedp->mp_RTPSParticipant->getAttributes().allocation.data_limits)\n , sedp_(sedp)\n{\n}\n\nvoid EDPServerSUBListener2::onNewCacheChangeAdded(\n RTPSReader* reader,\n const CacheChange_t* const change_in)\n{\n (void)reader;\n (void)change_in;\n \/\/ TODO DISCOVERY SERVER VERSION 2\n}\n\nvoid EDPServerSUBListener2::onWriterChangeReceivedByAll(\n RTPSWriter* writer,\n CacheChange_t* change)\n{\n (void)writer;\n\n if (ChangeKind_t::NOT_ALIVE_DISPOSED_UNREGISTERED == change->kind)\n {\n WriterHistory* writer_history =\n sedp_->subscriptions_writer_.second;\n\n writer_history->remove_change(change);\n }\n\n}\n\n} \/* namespace rtps *\/\n} \/\/ namespace fastdds\n} \/* namespace eprosima *\/\n","lang_cluster":"C++","diff_tag":0,"review_comment":"","id":128} {"diff_hunk":"@@ -82,8 +82,8 @@ shaderc_shader_kind MapShadercType(VkShaderStageFlagBits vkShader) {\n \n \/\/ Compile a given string containing GLSL into SPIR-V\n \/\/ Return value of false means an error was encountered\n-bool VkTestFramework::GLSLtoSPV(const VkShaderStageFlagBits shader_type, const char *pshader, std::vector &spirv,\n- bool debug, uint32_t spirv_minor_version) {\n+bool VkTestFramework::GLSLtoSPV(VkPhysicalDeviceLimits const *const device_limits, const VkShaderStageFlagBits shader_type,\n+ const char *pshader, std::vector &spirv, bool debug, uint32_t spirv_minor_version) {\n \/\/ On Android, use shaderc instead.\n shaderc::Compiler compiler;\n shaderc::CompileOptions options;","source_code":"\/\/ VK tests\n\/\/\n\/\/ Copyright (c) 2015-2019 The Khronos Group Inc.\n\/\/ Copyright (c) 2015-2019 Valve Corporation\n\/\/ Copyright (c) 2015-2019 LunarG, Inc.\n\/\/ Copyright (c) 2015-2019 Google, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n#include \"vktestframeworkandroid.h\"\n#include \"shaderc\/shaderc.hpp\"\n#include \n\nVkTestFramework::VkTestFramework() {}\nVkTestFramework::~VkTestFramework() {}\n\n\/\/ Define static elements\nbool VkTestFramework::m_devsim_layer = false;\nANativeWindow *VkTestFramework::window = nullptr;\n\nVkFormat VkTestFramework::GetFormat(VkInstance instance, vk_testing::Device *device) {\n VkFormatProperties format_props;\n vk::GetPhysicalDeviceFormatProperties(device->phy().handle(), VK_FORMAT_B8G8R8A8_UNORM, &format_props);\n if (format_props.linearTilingFeatures & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT ||\n format_props.optimalTilingFeatures & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT) {\n return VK_FORMAT_B8G8R8A8_UNORM;\n }\n vk::GetPhysicalDeviceFormatProperties(device->phy().handle(), VK_FORMAT_R8G8B8A8_UNORM, &format_props);\n if (format_props.linearTilingFeatures & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT ||\n format_props.optimalTilingFeatures & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT) {\n return VK_FORMAT_R8G8B8A8_UNORM;\n }\n printf(\"Error - device does not support VK_FORMAT_B8G8R8A8_UNORM nor VK_FORMAT_R8G8B8A8_UNORM - exiting\\n\");\n exit(0);\n}\n\nvoid VkTestFramework::InitArgs(int *argc, char *argv[]) {}\nvoid VkTestFramework::Finish() {}\n\nvoid TestEnvironment::SetUp() {\n vk_testing::set_error_callback(test_error_callback);\n\n vk::InitDispatchTable();\n}\n\nvoid TestEnvironment::TearDown() {}\n\n\/\/ Android specific helper functions for shaderc.\nstruct shader_type_mapping {\n VkShaderStageFlagBits vkshader_type;\n shaderc_shader_kind shaderc_type;\n};\n\nstatic const shader_type_mapping shader_map_table[] = {\n {VK_SHADER_STAGE_VERTEX_BIT, shaderc_glsl_vertex_shader},\n {VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT, shaderc_glsl_tess_control_shader},\n {VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT, shaderc_glsl_tess_evaluation_shader},\n {VK_SHADER_STAGE_GEOMETRY_BIT, shaderc_glsl_geometry_shader},\n {VK_SHADER_STAGE_FRAGMENT_BIT, shaderc_glsl_fragment_shader},\n {VK_SHADER_STAGE_COMPUTE_BIT, shaderc_glsl_compute_shader},\n};\n\nshaderc_shader_kind MapShadercType(VkShaderStageFlagBits vkShader) {\n for (auto shader : shader_map_table) {\n if (shader.vkshader_type == vkShader) {\n return shader.shaderc_type;\n }\n }\n assert(false);\n return shaderc_glsl_infer_from_source;\n}\n\n\/\/ Compile a given string containing GLSL into SPIR-V\n\/\/ Return value of false means an error was encountered\nbool VkTestFramework::GLSLtoSPV(const VkShaderStageFlagBits shader_type, const char *pshader, std::vector &spirv,\n bool debug, uint32_t spirv_minor_version) {\n \/\/ On Android, use shaderc instead.\n shaderc::Compiler compiler;\n shaderc::CompileOptions options;\n if (debug) {\n options.SetOptimizationLevel(shaderc_optimization_level_zero);\n options.SetGenerateDebugInfo();\n }\n\n switch (spirv_minor_version) {\n default:\n case 0:\n options.SetTargetSpirv(shaderc_spirv_version_1_0);\n break;\n case 1:\n options.SetTargetSpirv(shaderc_spirv_version_1_1);\n break;\n case 2:\n options.SetTargetSpirv(shaderc_spirv_version_1_2);\n break;\n case 3:\n options.SetTargetSpirv(shaderc_spirv_version_1_3);\n break;\n case 4:\n options.SetTargetSpirv(shaderc_spirv_version_1_4);\n break;\n }\n\n shaderc::SpvCompilationResult result =\n compiler.CompileGlslToSpv(pshader, strlen(pshader), MapShadercType(shader_type), \"shader\", options);\n if (result.GetCompilationStatus() != shaderc_compilation_status_success) {\n __android_log_print(ANDROID_LOG_ERROR, \"VulkanLayerValidationTests\", \"GLSLtoSPV compilation failed: %s\",\n result.GetErrorMessage().c_str());\n return false;\n }\n\n for (auto iter = result.begin(); iter != result.end(); iter++) {\n spirv.push_back(*iter);\n }\n\n return true;\n}\n\n\/\/\n\/\/ Compile a given string containing SPIR-V assembly into SPV for use by VK\n\/\/ Return value of false means an error was encountered.\n\/\/\nbool VkTestFramework::ASMtoSPV(const spv_target_env target_env, const uint32_t options, const char *pasm,\n std::vector &spv) {\n spv_binary binary;\n spv_diagnostic diagnostic = nullptr;\n spv_context context = spvContextCreate(target_env);\n spv_result_t error = spvTextToBinaryWithOptions(context, pasm, strlen(pasm), options, &binary, &diagnostic);\n spvContextDestroy(context);\n if (error) {\n __android_log_print(ANDROID_LOG_ERROR, \"VkLayerValidationTest\", \"ASMtoSPV compilation failed\");\n spvDiagnosticDestroy(diagnostic);\n return false;\n }\n spv.insert(spv.end(), binary->code, binary->code + binary->wordCount);\n spvBinaryDestroy(binary);\n\n return true;\n}\n","lang_cluster":"C++","diff_tag":0,"review_comment":"","id":129} {"diff_hunk":"@@ -120,21 +120,26 @@ void embedding_layer::bp_compute() {\n auto& opt = *m_weights[0]->get_optimizer();\n \n \/\/ Local data\n- const auto& local_input = get_local_prev_activations();\n- auto& local_dict_grad = m_dictionary_gradient.Matrix();\n- const auto& local_output_grad = get_local_prev_error_signals();\n+ const auto& local_input = dynamic_cast(get_local_prev_activations());\n+ auto& local_dict_grad = dynamic_cast(m_dictionary_gradient.Matrix());\n+ const auto& local_output_grad = dynamic_cast(get_local_prev_error_signals());\n const auto& local_width = local_input.Width();\n const auto& c = static_cast(this->m_model->get_execution_context());\n const auto& mini_batch_size = c.get_effective_mini_batch_size();\n \n \/\/ Update appropriate columns of gradient w.r.t. dictionary\n+ \/\/ Note: Don't update gradient for padding index\n El::Zero(local_dict_grad);\n CPUMat dict_grad_v, output_grad_v;\n for (El::Int col = 0; col < local_width; ++ col) {\n- const El::Int ind = static_cast(local_input(0, col));\n- El::View(dict_grad_v, local_dict_grad, El::ALL, El::IR(ind));\n- El::LockedView(output_grad_v, local_output_grad, El::ALL, El::IR(col));\n- El::Axpy(DataType{1}, output_grad_v, dict_grad_v);\n+ const El::Int ind = static_cast(std::floor(local_input(0, col)));\n+ if (0 <= ind\n+ && ind < static_cast(m_num_embeddings)\n+ && ind != m_padding_idx) {\n+ El::View(dict_grad_v, local_dict_grad, El::ALL, El::IR(ind));\n+ El::LockedView(output_grad_v, local_output_grad, El::ALL, El::IR(col));\n+ El::Axpy(DataType{1}, output_grad_v, dict_grad_v);\n+ }\n }\n opt.add_to_gradient(m_dictionary_gradient,\n DataType{1} \/ mini_batch_size,","source_code":"\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC.\n\/\/ Produced at the Lawrence Livermore National Laboratory.\n\/\/ Written by the LBANN Research Team (B. Van Essen, et al.) listed in\n\/\/ the CONTRIBUTORS file. \n\/\/\n\/\/ LLNL-CODE-697807.\n\/\/ All rights reserved.\n\/\/\n\/\/ This file is part of LBANN: Livermore Big Artificial Neural Network\n\/\/ Toolkit. For details, see http:\/\/software.llnl.gov\/LBANN or\n\/\/ https:\/\/github.com\/LLNL\/LBANN.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"Licensee\"); you\n\/\/ may not use this file except in compliance with the License. You may\n\/\/ obtain a copy of the License at:\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n\/\/ implied. See the License for the specific language governing\n\/\/ permissions and limitations under the license.\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n#define LBANN_EMBEDDING_LAYER_INSTANTIATE\n#include \"lbann\/layers\/learning\/embedding.hpp\"\n#include \"lbann\/models\/model.hpp\"\n#include \"lbann\/execution_contexts\/sgd_execution_context.hpp\"\n\nnamespace lbann {\n\ntemplate <>\nvoid embedding_layer::setup_matrices(const El::Grid& grid) {\n Layer::setup_matrices(grid);\n m_dictionary_gradient = StarMat(grid);\n}\n\ntemplate <>\nvoid embedding_layer::setup_dims() {\n Layer::setup_dims();\n\n \/\/ Make sure input dimensions are valid\n if (this->get_input_size() != 1) {\n const auto& input_dims = this->get_input_dims();\n std::ostringstream err;\n err << get_type() << \" layer \\\"\" << get_name() << \"\\\" \"\n << \"recieved an input tensor with invalid dimensions \"\n << \"(expected 1, got \";\n for (size_t i = 0; i < input_dims.size(); ++i) {\n err << (i > 0 ? \"x\" : \"\") << input_dims[i];\n }\n err << \")\";\n LBANN_ERROR(err.str());\n }\n\n \/\/ Output is size of embedding vector\n this->set_output_dims({static_cast(m_embedding_size)});\n\n}\n\ntemplate <>\nvoid embedding_layer::setup_data() {\n Layer::setup_data();\n\n \/\/ Make sure layer has weights for dictionary\n if (this->m_weights.size() != 1) {\n std::ostringstream err;\n err << \"attempted to setup \"\n << this->get_type() << \" layer \\\"\" << this->get_name() << \"\\\" \"\n << \"with an invalid number of weights \"\n << \"(expected 1, \"\n << \"found \" << this->m_weights.size() << \")\";\n LBANN_ERROR(err.str());\n }\n\n \/\/ Initialize dictionary\n auto& dict = *m_weights[0];\n auto matrix_dist = get_prev_activations().DistData();\n matrix_dist.colDist = El::STAR;\n matrix_dist.rowDist = El::STAR;\n dict.set_dims({static_cast(m_embedding_size)},\n {static_cast(m_dictionary_size)});\n dict.set_matrix_distribution(matrix_dist);\n\n \/\/ Initialize gradient w.r.t. dictionary\n m_dictionary_gradient.Resize(m_embedding_size, m_dictionary_size);\n\n}\n\ntemplate <>\nvoid embedding_layer::fp_compute() {\n\n \/\/ Local data\n const auto& local_dict = m_weights[0]->get_values().LockedMatrix();\n const auto& local_input = get_local_prev_activations();\n auto& local_output = get_local_activations();\n const auto& local_width = local_input.Width();\n\n \/\/ Populate output matrix with appropriate columns of dictionary\n CPUMat dict_v, output_v;\n for (El::Int col = 0; col < local_width; ++ col) {\n const El::Int ind = static_cast(local_input(0, col));\n El::LockedView(dict_v, local_dict, El::ALL, El::IR(ind));\n El::View(output_v, local_output, El::ALL, El::IR(col));\n El::Copy(dict_v, output_v);\n }\n\n}\n\ntemplate <>\nvoid embedding_layer::bp_compute() {\n\n \/\/ Embedding layer is not differentiable w.r.t. inputs\n El::Zero(get_error_signals());\n\n \/\/ Nothing to be done if dictionary is not being optimized\n if (m_weights[0]->get_optimizer() == nullptr) { return; }\n auto& opt = *m_weights[0]->get_optimizer();\n\n \/\/ Local data\n const auto& local_input = get_local_prev_activations();\n auto& local_dict_grad = m_dictionary_gradient.Matrix();\n const auto& local_output_grad = get_local_prev_error_signals();\n const auto& local_width = local_input.Width();\n const auto& c = static_cast(this->m_model->get_execution_context());\n const auto& mini_batch_size = c.get_effective_mini_batch_size();\n\n \/\/ Update appropriate columns of gradient w.r.t. dictionary\n El::Zero(local_dict_grad);\n CPUMat dict_grad_v, output_grad_v;\n for (El::Int col = 0; col < local_width; ++ col) {\n const El::Int ind = static_cast(local_input(0, col));\n El::View(dict_grad_v, local_dict_grad, El::ALL, El::IR(ind));\n El::LockedView(output_grad_v, local_output_grad, El::ALL, El::IR(col));\n El::Axpy(DataType{1}, output_grad_v, dict_grad_v);\n }\n opt.add_to_gradient(m_dictionary_gradient,\n DataType{1} \/ mini_batch_size,\n true);\n\n}\n\n\/\/ Explicit instantiation\ntemplate class embedding_layer;\n\n} \/\/ namespace lbann\n","lang_cluster":"C++","diff_tag":0,"review_comment":"","id":130} {"diff_hunk":"@@ -141,6 +141,28 @@ struct compute_kernel_gpu {\n const input_t& input) const {\n return compute(ctx, desc, input);\n }\n+\n+#ifdef ONEDAL_DATA_PARALLEL\n+ void operator()(const context_gpu& ctx,\n+ const descriptor_t& desc,\n+ const table& x,\n+ const table& y,\n+ homogen_table& res) {\n+ ONEDAL_ASSERT(x.get_row_count() == res.get_row_count());\n+ ONEDAL_ASSERT(y.get_row_count() == res.get_column_count());\n+ ONEDAL_ASSERT(x.get_column_count() == y.get_column_count());\n+\n+ auto& queue = ctx.get_queue();\n+ const auto x_nd = pr::table2ndarray(queue, x, sycl::usm::alloc::device);\n+ const auto y_nd = pr::table2ndarray(queue, y, sycl::usm::alloc::device);\n+\n+ auto res_ptr = res.get_data();\n+ auto res_nd = pr::ndarray::wrap(const_cast(res_ptr),\n+ { res.get_row_count(), res.get_column_count() });\n+\n+ compute_rbf(queue, x_nd, y_nd, res_nd, desc.get_sigma());\n+ }\n+#endif\n };\n \n template struct compute_kernel_gpu;","source_code":"\/*******************************************************************************\n* Copyright 2020-2021 Intel Corporation\n*\n* Licensed under the Apache License, Version 2.0 (the \"License\");\n* you may not use this file except in compliance with the License.\n* You may obtain a copy of the License at\n*\n* http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n*\n* Unless required by applicable law or agreed to in writing, software\n* distributed under the License is distributed on an \"AS IS\" BASIS,\n* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n* See the License for the specific language governing permissions and\n* limitations under the License.\n*******************************************************************************\/\n\n#include \"oneapi\/dal\/algo\/rbf_kernel\/backend\/gpu\/compute_kernel.hpp\"\n#include \"oneapi\/dal\/backend\/primitives\/reduction.hpp\"\n#include \"oneapi\/dal\/backend\/primitives\/blas.hpp\"\n#include \"oneapi\/dal\/backend\/math.hpp\"\n#include \"oneapi\/dal\/backend\/primitives\/utils.hpp\"\n\nnamespace oneapi::dal::rbf_kernel::backend {\n\nusing dal::backend::context_gpu;\nusing input_t = compute_input;\nusing result_t = compute_result;\nusing descriptor_t = detail::descriptor_base;\n\nnamespace pr = dal::backend::primitives;\n\ntemplate \ninline auto compute_exponents(sycl::queue& queue,\n const pr::ndview& sqr_x_nd,\n const pr::ndview& sqr_y_nd,\n pr::ndview& res_nd,\n double sigma,\n const dal::backend::event_vector& deps = {}) {\n const std::int64_t x_row_count = sqr_x_nd.get_dimension(0);\n const std::int64_t y_row_count = sqr_y_nd.get_dimension(0);\n ONEDAL_ASSERT(res_nd.get_count() == x_row_count * y_row_count);\n\n const Float coeff = static_cast(-0.5 \/ (sigma * sigma));\n\n const Float* sqr_x_ptr = sqr_x_nd.get_data();\n const Float* sqr_y_ptr = sqr_y_nd.get_data();\n Float* res_ptr = res_nd.get_mutable_data();\n\n const Float threshold = dal::backend::exp_low_threshold();\n\n const auto wg_size = dal::backend::propose_wg_size(queue);\n const auto range =\n dal::backend::make_multiple_nd_range_2d({ x_row_count, y_row_count }, { wg_size, 1 });\n\n auto compute_rbf_event = queue.submit([&](sycl::handler& cgh) {\n cgh.depends_on(deps);\n const std::size_t ld = y_row_count;\n\n cgh.parallel_for(range, [=](sycl::nd_item<2> item) {\n const std::size_t i = item.get_global_id(0);\n const std::size_t j = item.get_global_id(1);\n const Float sqr_x_i = sqr_x_ptr[i];\n const Float sqr_y_j = sqr_y_ptr[j];\n const Float res_rbf_ij = res_ptr[i * ld + j];\n const Float arg = sycl::fmax((sqr_x_i + sqr_y_j + res_rbf_ij) * coeff, threshold);\n\n res_ptr[i * ld + j] = sycl::exp(arg);\n });\n });\n\n return compute_rbf_event;\n}\n\ntemplate \ninline auto compute_rbf(sycl::queue& queue,\n const pr::ndview& x_nd,\n const pr::ndview& y_nd,\n pr::ndview& res_nd,\n double sigma,\n const dal::backend::event_vector& deps = {}) {\n const std::int64_t x_row_count = x_nd.get_dimension(0);\n const std::int64_t y_row_count = y_nd.get_dimension(0);\n\n auto sqr_x_nd = pr::ndarray::empty(queue, { x_row_count }, sycl::usm::alloc::device);\n auto sqr_y_nd = pr::ndarray::empty(queue, { y_row_count }, sycl::usm::alloc::device);\n\n auto reduce_x_event =\n pr::reduce_by_rows(queue, x_nd, sqr_x_nd, pr::sum{}, pr::square{}, deps);\n auto reduce_y_event =\n pr::reduce_by_rows(queue, y_nd, sqr_y_nd, pr::sum{}, pr::square{}, deps);\n\n constexpr Float alpha = -2.0;\n constexpr Float beta = 0.0;\n auto gemm_event = pr::gemm(queue, x_nd, y_nd.t(), res_nd, alpha, beta);\n\n auto compute_exponents_event =\n compute_exponents(queue,\n sqr_x_nd,\n sqr_y_nd,\n res_nd,\n sigma,\n { reduce_x_event, reduce_y_event, gemm_event });\n\n auto smart_event =\n dal::backend::smart_event{ compute_exponents_event }.attach(sqr_x_nd).attach(sqr_y_nd);\n\n return smart_event;\n}\n\ntemplate \nstatic result_t compute(const context_gpu& ctx, const descriptor_t& desc, const input_t& input) {\n const auto x = input.get_x();\n const auto y = input.get_y();\n\n auto& queue = ctx.get_queue();\n\n const std::int64_t x_row_count = x.get_row_count();\n const std::int64_t y_row_count = y.get_row_count();\n\n ONEDAL_ASSERT(x.get_column_count() == y.get_column_count());\n dal::detail::check_mul_overflow(x_row_count, y_row_count);\n\n const auto x_nd = pr::table2ndarray(queue, x, sycl::usm::alloc::device);\n const auto y_nd = pr::table2ndarray(queue, y, sycl::usm::alloc::device);\n\n auto res_nd =\n pr::ndarray::empty(queue, { x_row_count, y_row_count }, sycl::usm::alloc::device);\n\n auto compute_rbf_event = compute_rbf(queue, x_nd, y_nd, res_nd, desc.get_sigma());\n\n const auto res_array = res_nd.flatten(queue, { compute_rbf_event });\n auto res_table = homogen_table::wrap(res_array, x_row_count, y_row_count);\n\n return result_t{}.set_values(res_table);\n}\n\ntemplate \nstruct compute_kernel_gpu {\n result_t operator()(const context_gpu& ctx,\n const descriptor_t& desc,\n const input_t& input) const {\n return compute(ctx, desc, input);\n }\n};\n\ntemplate struct compute_kernel_gpu;\ntemplate struct compute_kernel_gpu;\n\n} \/\/ namespace oneapi::dal::rbf_kernel::backend\n","lang_cluster":"C++","diff_tag":0,"review_comment":"","id":131} {"diff_hunk":"@@ -55,7 +55,7 @@ def plot_importance(booster, ax=None, height=0.2,\n try:\n import matplotlib.pyplot as plt\n except ImportError:\n- raise ImportError('You must install matplotlib for plotting library')\n+ raise ImportError('You must install matplotlib to plot importance.')\n \n if isinstance(booster, LGBMModel):\n importance = booster.booster_.feature_importance(importance_type=importance_type)","source_code":"# coding: utf-8\n# pylint: disable = C0103\n\"\"\"Plotting Library.\"\"\"\nfrom __future__ import absolute_import\n\nimport numpy as np\n\nfrom .basic import Booster, is_numpy_1d_array\nfrom .sklearn import LGBMModel\n\n\ndef plot_importance(booster, ax=None, height=0.2,\n xlim=None, ylim=None, title='Feature importance',\n xlabel='Feature importance', ylabel='Features',\n importance_type='split', max_num_features=None,\n ignore_zero=True, grid=True, **kwargs):\n \"\"\"Plot model feature importances.\n\n Parameters\n ----------\n booster : Booster, LGBMModel or array\n Booster or LGBMModel instance, or array of feature importances\n ax : matplotlib Axes\n Target axes instance. If None, new figure and axes will be created.\n height : float\n Bar height, passed to ax.barh()\n xlim : tuple\n Tuple passed to axes.xlim()\n ylim : tuple\n Tuple passed to axes.ylim()\n title : str\n Axes title. Pass None to disable.\n xlabel : str\n X axis title label. Pass None to disable.\n ylabel : str\n Y axis title label. Pass None to disable.\n importance_type : str\n How the importance is calculated: \"split\" or \"gain\"\n \"split\" is the number of times a feature is used in a model\n \"gain\" is the total gain of splits which use the feature\n max_num_features : int\n Max number of top features displayed on plot.\n If None or smaller than 1, all features will be displayed.\n ignore_zero : bool\n Ignore features with zero importance\n grid : bool\n Whether add grid for axes\n **kwargs :\n Other keywords passed to ax.barh()\n\n Returns\n -------\n ax : matplotlib Axes\n \"\"\"\n try:\n import matplotlib.pyplot as plt\n except ImportError:\n raise ImportError('You must install matplotlib for plotting library')\n\n if isinstance(booster, LGBMModel):\n importance = booster.booster_.feature_importance(importance_type=importance_type)\n elif isinstance(booster, Booster):\n importance = booster.feature_importance(importance_type=importance_type)\n elif is_numpy_1d_array(booster) or isinstance(booster, list):\n importance = booster\n else:\n raise ValueError('booster must be Booster or array instance')\n\n if not len(importance):\n raise ValueError('Booster feature_importances are empty')\n\n tuples = sorted(enumerate(importance), key=lambda x: x[1])\n if ignore_zero:\n tuples = [x for x in tuples if x[1] > 0]\n if max_num_features is not None and max_num_features > 0:\n tuples = tuples[-max_num_features:]\n labels, values = zip(*tuples)\n\n if ax is None:\n _, ax = plt.subplots(1, 1)\n\n ylocs = np.arange(len(values))\n ax.barh(ylocs, values, align='center', height=height, **kwargs)\n\n for x, y in zip(values, ylocs):\n ax.text(x + 1, y, x, va='center')\n\n ax.set_yticks(ylocs)\n ax.set_yticklabels(labels)\n\n if xlim is not None:\n if not isinstance(xlim, tuple) or len(xlim) != 2:\n raise ValueError('xlim must be a tuple of 2 elements')\n else:\n xlim = (0, max(values) * 1.1)\n ax.set_xlim(xlim)\n\n if ylim is not None:\n if not isinstance(ylim, tuple) or len(ylim) != 2:\n raise ValueError('ylim must be a tuple of 2 elements')\n else:\n ylim = (-1, len(values))\n ax.set_ylim(ylim)\n\n if title is not None:\n ax.set_title(title)\n if xlabel is not None:\n ax.set_xlabel(xlabel)\n if ylabel is not None:\n ax.set_ylabel(ylabel)\n ax.grid(grid)\n return ax\n","lang_cluster":"C++","diff_tag":0,"review_comment":"","id":132} {"diff_hunk":"@@ -21,7 +21,7 @@ void AddEdgesProcessor::process(const cpp2::AddEdgesRequest& req) {\n std::vector data;\n std::for_each(partEdges.second.begin(), partEdges.second.end(), [&](auto& edge){\n auto key = KeyUtils::edgeKey(partId, edge.key.src, edge.key.edge_type,\n- edge.key.ranking, edge.key.dst, now);\n+ edge.key.ranking, edge.key.dst, version);\n data.emplace_back(std::move(key), std::move(edge.get_props()));\n });\n doPut(spaceId, partId, std::move(data));","source_code":"\/* Copyright (c) 2018 - present, VE Software Inc. All rights reserved\n *\n * This source code is licensed under Apache 2.0 License\n * (found in the LICENSE.Apache file in the root directory)\n *\/\n#include \"storage\/AddEdgesProcessor.h\"\n#include \n#include \"time\/TimeUtils.h\"\n#include \"storage\/KeyUtils.h\"\n\nnamespace nebula {\nnamespace storage {\n\nvoid AddEdgesProcessor::process(const cpp2::AddEdgesRequest& req) {\n auto spaceId = req.get_space_id();\n auto now = time::TimeUtils::nowInMSeconds();\n callingNum_ = req.parts.size();\n CHECK_NOTNULL(kvstore_);\n std::for_each(req.parts.begin(), req.parts.end(), [&](auto& partEdges){\n auto partId = partEdges.first;\n std::vector data;\n std::for_each(partEdges.second.begin(), partEdges.second.end(), [&](auto& edge){\n auto key = KeyUtils::edgeKey(partId, edge.key.src, edge.key.edge_type,\n edge.key.ranking, edge.key.dst, now);\n data.emplace_back(std::move(key), std::move(edge.get_props()));\n });\n doPut(spaceId, partId, std::move(data));\n });\n}\n\n} \/\/ namespace storage\n} \/\/ namespace nebula\n","lang_cluster":"C++","diff_tag":0,"review_comment":"","id":133} {"diff_hunk":"@@ -128,6 +128,7 @@ void WANZmq::Write(const char *buffer, size_t size)\n \", in call to WANZmq write\\n\");\n }\n }\n+ *\/\n }\n \n void WANZmq::Flush() {}","source_code":"\/*\n * Distributed under the OSI-approved Apache License, Version 2.0. See\n * accompanying file Copyright.txt for details.\n *\n * WANZmq.cpp\n *\n * Created on: May 26, 2017\n * Author: Jason Wang wangr1@ornl.gov\n *\/\n\n#include \"WANZmq.h\"\n\n#include \n\nnamespace adios2\n{\nnamespace transport\n{\n\nWANZmq::WANZmq(const std::string ipAddress, const std::string port,\n MPI_Comm mpiComm, const bool debugMode)\n: Transport(\"wan\", \"zmq\", mpiComm, debugMode), m_IPAddress(ipAddress),\n m_Port(port)\n{\n\n if (m_DebugMode)\n {\n \/\/ TODO verify port is unsigned int\n }\n}\n\nWANZmq::~WANZmq()\n{\n if (m_Socket)\n {\n zmq_close(m_Socket);\n }\n}\n\nvoid WANZmq::Open(const std::string &name, const OpenMode openMode)\n{\n m_Name = name;\n m_OpenMode = openMode;\n\n if (m_OpenMode == OpenMode::Write)\n {\n if (m_Profiler.IsActive)\n {\n m_Profiler.Timers.at(\"open\").Resume();\n }\n\n m_Socket = zmq_socket(m_Context, ZMQ_REQ);\n const std::string fullIP(\"tcp:\/\/\" + m_IPAddress + \":\" + m_Port);\n zmq_connect(m_Socket, fullIP.c_str());\n\n if (m_Profiler.IsActive)\n {\n m_Profiler.Timers.at(\"open\").Pause();\n }\n }\n else if (m_OpenMode == OpenMode::Append)\n {\n if (m_DebugMode)\n {\n throw std::invalid_argument(\n \"ERROR: WAN transport \" + m_Name +\n \" only supports \"\n \"OpenMode:w (write\/sender) and \"\n \"OpenMode:r (read\/receiver), in call to Open\\n\");\n }\n }\n else if (m_OpenMode == OpenMode::Read)\n {\n if (m_Profiler.IsActive)\n {\n m_Profiler.Timers.at(\"open\").Resume();\n }\n\n m_Socket = zmq_socket(m_Context, ZMQ_REP);\n const std::string fullIP(\"tcp:\/\/\" + m_IPAddress + \":\" + m_Port);\n zmq_bind(m_Socket, fullIP.c_str());\n\n if (m_Profiler.IsActive)\n {\n m_Profiler.Timers.at(\"open\").Pause();\n }\n }\n\n if (m_DebugMode)\n {\n if (m_Socket == NULL) \/\/ something goes wrong\n {\n throw std::ios_base::failure(\n \"ERROR: couldn't open socket for address \" + m_Name +\n \", in call to WANZmq Open\\n\");\n }\n }\n m_IsOpen = true;\n}\n\nvoid WANZmq::SetBuffer(char *buffer, size_t size) {}\n\nvoid WANZmq::Write(const char *buffer, size_t size)\n{\n\n if (m_Profiler.IsActive)\n {\n m_Profiler.Timers.at(\"write\").Resume();\n }\n\n int status = zmq_send(m_Socket, buffer, size, 0);\n char ret[10];\n zmq_recv(m_Socket, ret, 10, 0);\n\n if (m_Profiler.IsActive)\n {\n m_Profiler.Timers.at(\"write\").Pause();\n }\n\n if (m_DebugMode)\n {\n const std::string retString(ret);\n\n if (status == -1 || retString != \"OK\") \/\/ TODO : verify this\n {\n throw std::ios_base::failure(\"ERROR: couldn't send message \" +\n m_Name +\n \", in call to WANZmq write\\n\");\n }\n }\n}\n\nvoid WANZmq::Flush() {}\n\nvoid WANZmq::Close()\n{\n if (m_Socket)\n {\n zmq_close(m_Socket);\n }\n}\n\n} \/\/ end namespace transport\n} \/\/ end namespace adios\n","lang_cluster":"C++","diff_tag":0,"review_comment":"","id":134} {"diff_hunk":"@@ -68,10 +68,21 @@ bool generateDataStoreOptions(const int argc,\n \/\/ declare a group of options that will be allowed both on command line\n \/\/ as well as in a config file\n boost::program_options::options_description config_options(\"Configuration\");\n- config_options.add_options()(\"max-wait\",\n- boost::program_options::value(&max_wait)->default_value(-1),\n- \"Maximum number of seconds to wait on a running data update \"\n- \"before aquiring the lock by force.\");\n+ config_options.add_options() \/\/\n+ (\"max-wait\",\n+ boost::program_options::value(&max_wait)->default_value(-1),\n+ \"Maximum number of seconds to wait on a running data update \"\n+ \"before aquiring the lock by force.\") \/\/\n+ (\"dataset-name\",\n+ boost::program_options::value(&dataset_name)->default_value(\"\"),\n+ \"Name of the dataset to load into memory. This allows having multiple datasets in memory \"\n+ \"at the same time.\") \/\/\n+ (\"list\",\n+ boost::program_options::value(&list_datasets)\n+ ->default_value(false)\n+ ->implicit_value(true),\n+ \"Name of the dataset to load into memory. This allows having multiple datasets in memory \"\n+ \"at the same time.\");\n \n \/\/ hidden options, will be allowed on command line but will not be shown to the user\n boost::program_options::options_description hidden_options(\"Hidden options\");","source_code":"#include \"storage\/shared_memory.hpp\"\n#include \"storage\/shared_monitor.hpp\"\n#include \"storage\/storage.hpp\"\n#include \"osrm\/exception.hpp\"\n#include \"util\/log.hpp\"\n#include \"util\/meminfo.hpp\"\n#include \"util\/typedefs.hpp\"\n#include \"util\/version.hpp\"\n\n#include \n#include \n\n#include \n#include \n\nusing namespace osrm;\n\nvoid removeLocks() { storage::SharedMonitor::remove(); }\n\nvoid deleteRegion(const storage::SharedDataType region)\n{\n if (storage::SharedMemory::RegionExists(region) && !storage::SharedMemory::Remove(region))\n {\n util::Log(logWARNING) << \"could not delete shared memory region \"\n << storage::regionToString(region);\n }\n}\n\nvoid springClean()\n{\n osrm::util::Log() << \"Releasing all locks\";\n osrm::util::Log() << \"ATTENTION! BE CAREFUL!\";\n osrm::util::Log() << \"----------------------\";\n osrm::util::Log() << \"This tool may put osrm-routed into an undefined state!\";\n osrm::util::Log() << \"Type 'Y' to acknowledge that you know what your are doing.\";\n osrm::util::Log() << \"\\n\\nDo you want to purge all shared memory allocated \"\n << \"by osrm-datastore? [type 'Y' to confirm]\";\n\n const auto letter = getchar();\n if (letter != 'Y')\n {\n osrm::util::Log() << \"aborted.\";\n }\n else\n {\n deleteRegion(storage::REGION_1);\n deleteRegion(storage::REGION_2);\n removeLocks();\n }\n}\n\n\/\/ generate boost::program_options object for the routing part\nbool generateDataStoreOptions(const int argc,\n const char *argv[],\n std::string &verbosity,\n boost::filesystem::path &base_path,\n int &max_wait)\n{\n \/\/ declare a group of options that will be allowed only on command line\n boost::program_options::options_description generic_options(\"Options\");\n generic_options.add_options()(\"version,v\", \"Show version\")(\"help,h\", \"Show this help message\")(\n \"verbosity,l\",\n boost::program_options::value(&verbosity)->default_value(\"INFO\"),\n std::string(\"Log verbosity level: \" + util::LogPolicy::GetLevels()).c_str())(\n \"remove-locks,r\", \"Remove locks\")(\"spring-clean,s\",\n \"Spring-cleaning all shared memory regions\");\n\n \/\/ declare a group of options that will be allowed both on command line\n \/\/ as well as in a config file\n boost::program_options::options_description config_options(\"Configuration\");\n config_options.add_options()(\"max-wait\",\n boost::program_options::value(&max_wait)->default_value(-1),\n \"Maximum number of seconds to wait on a running data update \"\n \"before aquiring the lock by force.\");\n\n \/\/ hidden options, will be allowed on command line but will not be shown to the user\n boost::program_options::options_description hidden_options(\"Hidden options\");\n hidden_options.add_options()(\"base,b\",\n boost::program_options::value(&base_path),\n \"base path to .osrm file\");\n\n \/\/ positional option\n boost::program_options::positional_options_description positional_options;\n positional_options.add(\"base\", 1);\n\n \/\/ combine above options for parsing\n boost::program_options::options_description cmdline_options;\n cmdline_options.add(generic_options).add(config_options).add(hidden_options);\n\n const auto *executable = argv[0];\n boost::program_options::options_description visible_options(\n boost::filesystem::path(executable).filename().string() + \" [] \");\n visible_options.add(generic_options).add(config_options);\n\n \/\/ print help options if no infile is specified\n if (argc < 2)\n {\n util::Log() << visible_options;\n return false;\n }\n\n \/\/ parse command line options\n boost::program_options::variables_map option_variables;\n\n try\n {\n boost::program_options::store(boost::program_options::command_line_parser(argc, argv)\n .options(cmdline_options)\n .positional(positional_options)\n .run(),\n option_variables);\n }\n catch (const boost::program_options::error &e)\n {\n util::Log(logERROR) << e.what();\n return false;\n }\n\n if (option_variables.count(\"version\"))\n {\n util::Log() << OSRM_VERSION;\n return false;\n }\n\n if (option_variables.count(\"help\"))\n {\n util::Log() << visible_options;\n return false;\n }\n\n if (option_variables.count(\"remove-locks\"))\n {\n removeLocks();\n return false;\n }\n\n if (option_variables.count(\"spring-clean\"))\n {\n springClean();\n return false;\n }\n\n boost::program_options::notify(option_variables);\n\n return true;\n}\n\n[[noreturn]] void CleanupSharedBarriers(int signum)\n{ \/\/ Here the lock state of named mutexes is unknown, make a hard cleanup\n removeLocks();\n std::_Exit(128 + signum);\n}\n\nint main(const int argc, const char *argv[]) try\n{\n int signals[] = {SIGTERM, SIGSEGV, SIGINT, SIGILL, SIGABRT, SIGFPE};\n for (auto sig : signals)\n {\n std::signal(sig, CleanupSharedBarriers);\n }\n\n util::LogPolicy::GetInstance().Unmute();\n\n std::string verbosity;\n boost::filesystem::path base_path;\n int max_wait = -1;\n if (!generateDataStoreOptions(argc, argv, verbosity, base_path, max_wait))\n {\n return EXIT_SUCCESS;\n }\n\n util::LogPolicy::GetInstance().SetLevel(verbosity);\n\n storage::StorageConfig config(base_path);\n if (!config.IsValid())\n {\n util::Log(logERROR) << \"Config contains invalid file paths. Exiting!\";\n return EXIT_FAILURE;\n }\n storage::Storage storage(std::move(config));\n\n return storage.Run(max_wait);\n}\ncatch (const osrm::RuntimeError &e)\n{\n util::Log(logERROR) << e.what();\n return e.GetCode();\n}\ncatch (const std::bad_alloc &e)\n{\n util::DumpMemoryStats();\n util::Log(logERROR) << \"[exception] \" << e.what();\n util::Log(logERROR) << \"Please provide more memory or disable locking the virtual \"\n \"address space (note: this makes OSRM swap, i.e. slow)\";\n return EXIT_FAILURE;\n}\n","lang_cluster":"C++","diff_tag":0,"review_comment":"","id":135} {"diff_hunk":"@@ -119,7 +119,20 @@ struct train_kernel_gpu {\n }\n };\n \n+template \n+struct train_kernel_gpu {\n+ train_result operator()(\n+ const dal::backend::context_gpu& ctx,\n+ const detail::descriptor_base& params,\n+ const train_input& input) const {\n+ throw unimplemented(\n+ dal::detail::error_messages::nu_svm_thunder_method_is_not_implemented_for_gpu());\n+ }\n+};\n+\n template struct train_kernel_gpu;\n template struct train_kernel_gpu;\n+template struct train_kernel_gpu;\n+template struct train_kernel_gpu;\n \n } \/\/ namespace oneapi::dal::svm::backend","source_code":"\/*******************************************************************************\n* Copyright 2020-2021 Intel Corporation\n*\n* Licensed under the Apache License, Version 2.0 (the \"License\");\n* you may not use this file except in compliance with the License.\n* You may obtain a copy of the License at\n*\n* http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n*\n* Unless required by applicable law or agreed to in writing, software\n* distributed under the License is distributed on an \"AS IS\" BASIS,\n* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n* See the License for the specific language governing permissions and\n* limitations under the License.\n*******************************************************************************\/\n\n#include \"oneapi\/dal\/algo\/svm\/backend\/gpu\/train_kernel.hpp\"\n#include \"oneapi\/dal\/algo\/svm\/backend\/model_interop.hpp\"\n#include \"oneapi\/dal\/algo\/svm\/backend\/model_conversion.hpp\"\n#include \"oneapi\/dal\/algo\/svm\/backend\/kernel_function_impl.hpp\"\n#include \"oneapi\/dal\/algo\/svm\/backend\/utils.hpp\"\n\n#include \"oneapi\/dal\/backend\/interop\/common_dpc.hpp\"\n#include \"oneapi\/dal\/backend\/interop\/error_converter.hpp\"\n#include \"oneapi\/dal\/backend\/interop\/table_conversion.hpp\"\n\n#include \"oneapi\/dal\/table\/row_accessor.hpp\"\n\n#include \n\nnamespace oneapi::dal::svm::backend {\n\nusing dal::backend::context_gpu;\nusing model_t = model;\nusing input_t = train_input;\nusing result_t = train_result;\nusing descriptor_t = detail::descriptor_base;\n\nnamespace daal_svm = daal::algorithms::svm;\nnamespace daal_kernel_function = daal::algorithms::kernel_function;\nnamespace interop = dal::backend::interop;\n\ntemplate \nusing daal_svm_thunder_kernel_t =\n daal_svm::training::internal::SVMTrainOneAPI;\n\ntemplate \nstatic result_t call_daal_kernel(const context_gpu& ctx,\n const descriptor_t& desc,\n const table& data,\n const table& labels) {\n auto& queue = ctx.get_queue();\n interop::execution_context_guard guard(queue);\n\n const std::uint64_t class_count = desc.get_class_count();\n if (class_count > 2) {\n throw unimplemented(dal::detail::error_messages::svm_multiclass_not_implemented_for_gpu());\n }\n\n const std::int64_t row_count = data.get_row_count();\n const std::int64_t column_count = data.get_column_count();\n\n auto arr_label = row_accessor{ labels }.pull(queue);\n\n binary_label_t unique_label;\n auto arr_new_label =\n convert_labels(queue, arr_label, { Float(-1.0), Float(1.0) }, unique_label);\n\n const auto daal_data = interop::convert_to_daal_table(queue, data);\n const auto daal_labels = interop::convert_to_daal_table(queue, arr_new_label, row_count, 1);\n\n auto kernel_impl = detail::get_kernel_function_impl(desc);\n if (!kernel_impl) {\n throw internal_error{ dal::detail::error_messages::unknown_kernel_function_type() };\n }\n const auto daal_kernel = kernel_impl->get_daal_kernel_function();\n\n const std::uint64_t cache_megabyte = static_cast(desc.get_cache_size());\n constexpr std::uint64_t megabyte = 1024 * 1024;\n dal::detail::check_mul_overflow(cache_megabyte, megabyte);\n const std::uint64_t cache_byte = cache_megabyte * megabyte;\n\n daal_svm::training::internal::KernelParameter daal_svm_parameter;\n daal_svm_parameter.kernel = daal_kernel;\n daal_svm_parameter.C = desc.get_c();\n daal_svm_parameter.accuracyThreshold = desc.get_accuracy_threshold();\n daal_svm_parameter.tau = desc.get_tau();\n daal_svm_parameter.maxIterations =\n dal::detail::integral_cast(desc.get_max_iteration_count());\n daal_svm_parameter.doShrinking = desc.get_shrinking();\n daal_svm_parameter.cacheSize = cache_byte;\n\n auto daal_model = daal_svm::Model::create(column_count);\n interop::status_to_exception(daal_svm_thunder_kernel_t().compute(daal_data,\n *daal_labels,\n daal_model.get(),\n daal_svm_parameter));\n auto table_support_indices =\n interop::convert_from_daal_homogen_table(daal_model->getSupportIndices());\n\n auto trained_model = convert_from_daal_model(*daal_model)\n .set_first_class_label(unique_label.first)\n .set_second_class_label(unique_label.second);\n\n return result_t().set_model(trained_model).set_support_indices(table_support_indices);\n}\n\ntemplate \nstatic result_t train(const context_gpu& ctx, const descriptor_t& desc, const input_t& input) {\n return call_daal_kernel(ctx, desc, input.get_data(), input.get_labels());\n}\n\ntemplate \nstruct train_kernel_gpu {\n result_t operator()(const context_gpu& ctx,\n const descriptor_t& desc,\n const input_t& input) const {\n return train(ctx, desc, input);\n }\n};\n\ntemplate struct train_kernel_gpu;\ntemplate struct train_kernel_gpu;\n\n} \/\/ namespace oneapi::dal::svm::backend\n","lang_cluster":"C++","diff_tag":0,"review_comment":"","id":136} {"diff_hunk":"@@ -57,15 +57,10 @@ TEST(StatisticsQosTests, StatisticsDataWriterQosTest)\n *\/\n TEST(StatisticsQosTests, StatisticsDataReaderQosTest)\n {\n- \/\/ TODO(jlbueno) Remove this guards after implementation. Here to prevent failures in current CI.\n-#ifdef FASTDDS_STATISTICS\n- logError(STATISTICS_QOS_TEST, \"This test is going to fail because API is not yet implemented.\")\n-\n EXPECT_TRUE(STATISTICS_DATAREADER_QOS.reliability().kind == eprosima::fastdds::dds::RELIABLE_RELIABILITY_QOS);\n EXPECT_TRUE(STATISTICS_DATAREADER_QOS.durability().kind == eprosima::fastdds::dds::TRANSIENT_LOCAL_DURABILITY_QOS);\n EXPECT_TRUE(STATISTICS_DATAREADER_QOS.history().kind == eprosima::fastdds::dds::KEEP_LAST_HISTORY_QOS);\n EXPECT_TRUE(STATISTICS_DATAREADER_QOS.history().depth == 100);\n-#endif \/\/ FASTDDS_STATISTICS\n }\n \n } \/\/ namespace dds","source_code":"\/\/ Copyright 2021 Proyectos y Sistemas de Mantenimiento SL (eProsima).\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n#include \n\n#include \n#include \n#include \n#include \n\nnamespace eprosima {\nnamespace fastdds {\nnamespace statistics {\nnamespace dds {\n\n\/*\n * This test checks that STATISTICS_DATAWRITER_QOS correctly sets the expected QoS.\n * 1. Reliability RELIABLE\n * 2. Durability TRANSIENT LOCAL\n * 3. Pull mode enabled\n * 4. Publication mode ASYNCHRONOUS\n * 5. History kind KEEP LAST\n * 6. History depth 100\n *\/\nTEST(StatisticsQosTests, StatisticsDataWriterQosTest)\n{\n \/\/ TODO(jlbueno) Remove this guards after implementation. Here to prevent failures in current CI.\n#ifdef FASTDDS_STATISTICS\n logError(STATISTICS_QOS_TEST, \"This test is going to fail because API is not yet implemented.\")\n\n EXPECT_TRUE(STATISTICS_DATAWRITER_QOS.reliability().kind == eprosima::fastdds::dds::RELIABLE_RELIABILITY_QOS);\n EXPECT_TRUE(STATISTICS_DATAWRITER_QOS.durability().kind == eprosima::fastdds::dds::TRANSIENT_LOCAL_DURABILITY_QOS);\n \/\/ TODO(jlbueno) Pull mode is not yet exposed in DDS API\n EXPECT_TRUE(STATISTICS_DATAWRITER_QOS.publish_mode().kind == eprosima::fastdds::dds::ASYNCHRONOUS_PUBLISH_MODE);\n EXPECT_TRUE(STATISTICS_DATAWRITER_QOS.history().kind == eprosima::fastdds::dds::KEEP_LAST_HISTORY_QOS);\n EXPECT_TRUE(STATISTICS_DATAWRITER_QOS.history().depth == 100);\n#endif \/\/ FASTDDS_STATISTICS\n}\n\n\/*\n * This test checks that STATISTICS_DATAREADER_QOS correctly sets the expected QoS.\n * 1. Reliability RELIABLE\n * 2. Durability TRANSIENT LOCAL\n * 3. History kind KEEP LAST\n * 4. History depth 100\n *\/\nTEST(StatisticsQosTests, StatisticsDataReaderQosTest)\n{\n \/\/ TODO(jlbueno) Remove this guards after implementation. Here to prevent failures in current CI.\n#ifdef FASTDDS_STATISTICS\n logError(STATISTICS_QOS_TEST, \"This test is going to fail because API is not yet implemented.\")\n\n EXPECT_TRUE(STATISTICS_DATAREADER_QOS.reliability().kind == eprosima::fastdds::dds::RELIABLE_RELIABILITY_QOS);\n EXPECT_TRUE(STATISTICS_DATAREADER_QOS.durability().kind == eprosima::fastdds::dds::TRANSIENT_LOCAL_DURABILITY_QOS);\n EXPECT_TRUE(STATISTICS_DATAREADER_QOS.history().kind == eprosima::fastdds::dds::KEEP_LAST_HISTORY_QOS);\n EXPECT_TRUE(STATISTICS_DATAREADER_QOS.history().depth == 100);\n#endif \/\/ FASTDDS_STATISTICS\n}\n\n} \/\/ namespace dds\n} \/\/ namespace statistics\n} \/\/ namespace fastdds\n} \/\/ namespace eprosima\n\nint main(\n int argc,\n char** argv)\n{\n eprosima::fastdds::dds::Log::SetVerbosity(eprosima::fastdds::dds::Log::Error);\n\n testing::InitGoogleTest(&argc, argv);\n int ret = RUN_ALL_TESTS();\n\n eprosima::fastdds::dds::Log::KillThread();\n return ret;\n}\n","lang_cluster":"C++","diff_tag":0,"review_comment":"","id":137} {"diff_hunk":"@@ -80,6 +80,13 @@ void ScriptingEnvironment::init_lua_state(lua_State *lua_state)\n luabind::def(\"print\", LUA_print),\n luabind::def(\"durationIsValid\", durationIsValid),\n luabind::def(\"parseDuration\", parseDuration),\n+ luabind::class_(\"sources\")\n+ .def(luabind::constructor<>())\n+ .def(\"load\", &SourceContainer::loadRasterSource)\n+ .def(\"query\", &SourceContainer::getRasterDataFromSource)\n+ .def(\"interpolate\", &SourceContainer::getRasterInterpolateFromSource),\n+ luabind::class_(\"constants\")\n+ .enum_(\"enums\")[luabind::value(\"precision\", COORDINATE_PRECISION)],\n \n luabind::class_>(\"vector\")\n .def(\"Add\", static_cast::*)(const std::string &)>(","source_code":"\/*\n\nCopyright (c) 2015, Project OSRM contributors\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\nRedistributions of source code must retain the above copyright notice, this list\nof conditions and the following disclaimer.\nRedistributions in binary form must reproduce the above copyright notice, this\nlist of conditions and the following disclaimer in the documentation and\/or\nother materials provided with the distribution.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR\nANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\nANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n*\/\n\n#include \"scripting_environment.hpp\"\n\n#include \"extraction_helper_functions.hpp\"\n#include \"extraction_node.hpp\"\n#include \"extraction_way.hpp\"\n#include \"..\/data_structures\/external_memory_node.hpp\"\n#include \"..\/util\/lua_util.hpp\"\n#include \"..\/util\/osrm_exception.hpp\"\n#include \"..\/util\/simple_logger.hpp\"\n#include \"..\/typedefs.h\"\n\n#include \n\n#include \n\n#include \nnamespace\n{\n\/\/ wrapper method as luabind doesn't automatically overload funcs w\/ default parameters\ntemplate \nauto get_value_by_key(T const &object, const char *key) -> decltype(object.get_value_by_key(key))\n{\n return object.get_value_by_key(key, \"\");\n}\n\nint lua_error_callback(lua_State *L) \/\/ This is so I can use my own function as an\n\/\/ exception handler, pcall_log()\n{\n std::string error_msg = lua_tostring(L, -1);\n std::ostringstream error_stream;\n error_stream << error_msg;\n throw osrm::exception(\"ERROR occured in profile script:\\n\" + error_stream.str());\n}\n}\n\nScriptingEnvironment::ScriptingEnvironment(const std::string &file_name) : file_name(file_name)\n{\n SimpleLogger().Write() << \"Using script \" << file_name;\n}\n\nvoid ScriptingEnvironment::init_lua_state(lua_State *lua_state)\n{\n typedef double (osmium::Location::*location_member_ptr_type)() const;\n\n luabind::open(lua_state);\n \/\/ open utility libraries string library;\n luaL_openlibs(lua_state);\n\n luaAddScriptFolderToLoadPath(lua_state, file_name.c_str());\n\n \/\/ Add our function to the state's global scope\n luabind::module(lua_state)[\n luabind::def(\"print\", LUA_print),\n luabind::def(\"durationIsValid\", durationIsValid),\n luabind::def(\"parseDuration\", parseDuration),\n\n luabind::class_>(\"vector\")\n .def(\"Add\", static_cast::*)(const std::string &)>(\n &std::vector::push_back)),\n\n luabind::class_(\"Location\")\n .def(\"lat\", &osmium::Location::lat)\n .def(\"lon\", &osmium::Location::lon),\n\n luabind::class_(\"Node\")\n \/\/ .def(\"tags\", &osmium::Node::tags)\n .def(\"location\", &osmium::Node::location)\n .def(\"get_value_by_key\", &osmium::Node::get_value_by_key)\n .def(\"get_value_by_key\", &get_value_by_key)\n .def(\"id\", &osmium::Node::id),\n\n luabind::class_(\"ResultNode\")\n .def_readwrite(\"traffic_lights\", &ExtractionNode::traffic_lights)\n .def_readwrite(\"barrier\", &ExtractionNode::barrier),\n\n luabind::class_(\"ResultWay\")\n \/\/ .def(luabind::constructor<>())\n .def_readwrite(\"forward_speed\", &ExtractionWay::forward_speed)\n .def_readwrite(\"backward_speed\", &ExtractionWay::backward_speed)\n .def_readwrite(\"name\", &ExtractionWay::name)\n .def_readwrite(\"roundabout\", &ExtractionWay::roundabout)\n .def_readwrite(\"is_access_restricted\", &ExtractionWay::is_access_restricted)\n .def_readwrite(\"duration\", &ExtractionWay::duration)\n .property(\"forward_mode\", &ExtractionWay::get_forward_mode,\n &ExtractionWay::set_forward_mode)\n .property(\"backward_mode\", &ExtractionWay::get_backward_mode,\n &ExtractionWay::set_backward_mode)\n .enum_(\"constants\")[\n luabind::value(\"notSure\", 0),\n luabind::value(\"oneway\", 1),\n luabind::value(\"bidirectional\", 2),\n luabind::value(\"opposite\", 3)\n ],\n luabind::class_(\"Way\")\n .def(\"get_value_by_key\", &osmium::Way::get_value_by_key)\n .def(\"get_value_by_key\", &get_value_by_key)\n .def(\"id\", &osmium::Way::id)\n ];\n\n if (0 != luaL_dofile(lua_state, file_name.c_str()))\n {\n luabind::object error_msg(luabind::from_stack(lua_state, -1));\n std::ostringstream error_stream;\n error_stream << error_msg;\n throw osrm::exception(\"ERROR occured in profile script:\\n\" + error_stream.str());\n }\n}\n\nlua_State *ScriptingEnvironment::get_lua_state()\n{\n std::lock_guard lock(init_mutex);\n bool initialized = false;\n auto &ref = script_contexts.local(initialized);\n if (!initialized)\n {\n std::shared_ptr state(luaL_newstate(), lua_close);\n ref = state;\n init_lua_state(ref.get());\n }\n luabind::set_pcall_callback(&lua_error_callback);\n\n return ref.get();\n}\n","lang_cluster":"C++","diff_tag":0,"review_comment":"","id":138} {"diff_hunk":"@@ -7,6 +7,7 @@\n #include \"base\/Base.h\"\n #include \n #include \"time\/Duration.h\"\n+#include \n \n using nebula::time::Duration;\n ","source_code":"\/* Copyright (c) 2018 vesoft inc. All rights reserved.\n *\n * This source code is licensed under Apache 2.0 License,\n * attached with Common Clause Condition 1.0, found in the LICENSES directory.\n *\/\n\n#include \"base\/Base.h\"\n#include \n#include \"time\/Duration.h\"\n\nusing nebula::time::Duration;\n\nTEST(Duration, elapsedInSeconds) {\n for (int i = 0; i < 5; i++) {\n Duration dur;\n auto start = std::chrono::steady_clock::now();\n sleep(2);\n auto diff = std::chrono::steady_clock::now() - start;\n dur.pause();\n\n ASSERT_EQ(std::chrono::duration_cast(diff).count(),\n dur.elapsedInSec()) << \"Inaccuracy in iteration \" << i;\n }\n}\n\n\nTEST(Duration, elapsedInMilliSeconds) {\n Duration dur;\n for (int i = 0; i < 200; i++) {\n dur.reset();\n auto start = std::chrono::steady_clock::now();\n usleep(5000); \/\/ Sleep for 5 ms\n auto diff = std::chrono::steady_clock::now() - start;\n dur.pause();\n\n \/\/ Allow 1ms difference\n ASSERT_LE(std::chrono::duration_cast(diff).count(),\n dur.elapsedInMSec()) << \"Inaccuracy in iteration \" << i;\n ASSERT_GE(std::chrono::duration_cast(diff).count() + 1,\n dur.elapsedInMSec()) << \"Inaccuracy in iteration \" << i;\n }\n}\n\n\nint main(int argc, char** argv) {\n testing::InitGoogleTest(&argc, argv);\n folly::init(&argc, &argv, true);\n google::SetStderrLogging(google::INFO);\n\n return RUN_ALL_TESTS();\n}\n\n","lang_cluster":"C++","diff_tag":0,"review_comment":"","id":139} {"diff_hunk":"@@ -17,15 +17,19 @@ namespace nebula {\n namespace meta {\n \n using Status = cpp2::JobStatus;\n+using AdminCmd = nebula::cpp2::AdminCmd;\n+\n+int32_t JobDescription::minDataVer_ = 1;\n+int32_t JobDescription::currDataVer_ = 1;\n \n JobDescription::JobDescription(int32_t id,\n- std::string cmd,\n+ nebula::cpp2::AdminCmd cmd,\n std::vector paras,\n Status status,\n int64_t startTime,\n int64_t stopTime)\n : id_(id),\n- cmd_(std::move(cmd)),\n+ cmd_(cmd),\n paras_(std::move(paras)),\n status_(status),\n startTime_(startTime),","source_code":"\/* Copyright (c) 2019 vesoft inc. All rights reserved.\n *\n * This source code is licensed under Apache 2.0 License,\n * attached with Common Clause Condition 1.0, found in the LICENSES directory.\n *\/\n\n#include \n#include \n#include \n#include \n#include \n#include \"meta\/processors\/jobMan\/JobUtils.h\"\n#include \"meta\/processors\/jobMan\/JobDescription.h\"\n\n#include \"kvstore\/KVIterator.h\"\nnamespace nebula {\nnamespace meta {\n\nusing Status = cpp2::JobStatus;\n\nJobDescription::JobDescription(int32_t id,\n std::string cmd,\n std::vector paras,\n Status status,\n int64_t startTime,\n int64_t stopTime)\n : id_(id),\n cmd_(std::move(cmd)),\n paras_(std::move(paras)),\n status_(status),\n startTime_(startTime),\n stopTime_(stopTime) {}\n\nfolly::Optional\nJobDescription::makeJobDescription(folly::StringPiece rawkey,\n folly::StringPiece rawval) {\n try {\n if (!isJobKey(rawkey)) {\n return folly::none;\n }\n auto key = parseKey(rawkey);\n auto tup = parseVal(rawval);\n\n auto cmd = std::get<0>(tup);\n auto paras = std::get<1>(tup);\n for (auto p : paras) {\n LOG(INFO) << \"p = \" << p;\n }\n auto status = std::get<2>(tup);\n auto startTime = std::get<3>(tup);\n auto stopTime = std::get<4>(tup);\n return JobDescription(key, cmd, paras, status, startTime, stopTime);\n } catch(std::exception& ex) {\n LOG(ERROR) << ex.what();\n }\n return folly::none;\n}\n\nstd::string JobDescription::jobKey() const {\n return makeJobKey(id_);\n}\n\nstd::string JobDescription::makeJobKey(int32_t iJob) {\n std::string str;\n str.reserve(32);\n str.append(reinterpret_cast(JobUtil::jobPrefix().data()),\n JobUtil::jobPrefix().size());\n str.append(reinterpret_cast(&iJob), sizeof(int32_t));\n return str;\n}\n\nint32_t JobDescription::parseKey(const folly::StringPiece& rawKey) {\n auto offset = JobUtil::jobPrefix().size();\n return *reinterpret_cast(rawKey.begin() + offset);\n}\n\nstd::string JobDescription::jobVal() const {\n std::string str;\n auto cmdLen = cmd_.length();\n auto paraSize = paras_.size();\n str.reserve(256);\n str.append(reinterpret_cast(&cmdLen), sizeof(size_t));\n str.append(reinterpret_cast(cmd_.data()), cmd_.length());\n str.append(reinterpret_cast(¶Size), sizeof(size_t));\n for (auto& para : paras_) {\n auto len = para.length();\n str.append(reinterpret_cast(&len), sizeof(len));\n str.append(reinterpret_cast(¶[0]), len);\n }\n str.append(reinterpret_cast(&status_), sizeof(Status));\n str.append(reinterpret_cast(&startTime_), sizeof(int64_t));\n str.append(reinterpret_cast(&stopTime_), sizeof(int64_t));\n return str;\n}\n\nstd::tuple,\n Status,\n int64_t,\n int64_t>\nJobDescription::parseVal(const folly::StringPiece& rawVal) {\n size_t offset = 0;\n\n std::string cmd = JobUtil::parseString(rawVal, offset);\n offset += sizeof(size_t) + cmd.length();\n\n std::vector paras = JobUtil::parseStrVector(rawVal, &offset);\n\n auto status = JobUtil::parseFixedVal(rawVal, offset);\n offset += sizeof(Status);\n\n auto tStart = JobUtil::parseFixedVal(rawVal, offset);\n offset += sizeof(int64_t);\n\n auto tStop = JobUtil::parseFixedVal(rawVal, offset);\n\n return std::make_tuple(cmd, paras, status, tStart, tStop);\n}\n\ncpp2::JobDesc JobDescription::toJobDesc() {\n cpp2::JobDesc ret;\n ret.set_id(id_);\n ret.set_cmd(cmd_);\n ret.set_paras(paras_);\n ret.set_status(status_);\n ret.set_start_time(startTime_);\n ret.set_stop_time(stopTime_);\n return ret;\n}\n\nstd::string JobDescription::archiveKey() {\n std::string str;\n str.reserve(32);\n str.append(reinterpret_cast(JobUtil::archivePrefix().data()),\n JobUtil::archivePrefix().size());\n str.append(reinterpret_cast(&id_), sizeof(id_));\n return str;\n}\n\nbool JobDescription::setStatus(Status newStatus) {\n if (JobStatus::laterThan(status_, newStatus)) {\n return false;\n }\n status_ = newStatus;\n if (newStatus == Status::RUNNING) {\n startTime_ = std::time(nullptr);\n }\n if (JobStatus::laterThan(newStatus, Status::RUNNING)) {\n stopTime_ = std::time(nullptr);\n }\n return true;\n}\n\nbool JobDescription::isJobKey(const folly::StringPiece& rawKey) {\n return rawKey.size() == JobUtil::jobPrefix().length() + sizeof(int32_t);\n}\n\nfolly::Optional\nJobDescription::loadJobDescription(int32_t iJob, nebula::kvstore::KVStore* kv) {\n auto key = makeJobKey(iJob);\n std::string val;\n auto rc = kv->get(0, 0, key, &val);\n if (rc != nebula::kvstore::SUCCEEDED) {\n return folly::none;\n }\n return makeJobDescription(key, val);\n}\n\n} \/\/ namespace meta\n} \/\/ namespace nebula\n\n","lang_cluster":"C++","diff_tag":0,"review_comment":"","id":140} {"diff_hunk":"@@ -21,10 +21,9 @@ typedef boost::tokenizer> tokenizer;\n namespace RDKit {\n namespace {\n \n-MolStandardize::TautomerTransform* getTautomer(const std::string& name,\n- const std::string& smarts,\n- const std::string& bond_str,\n- const std::string& charge_str) {\n+std::unique_ptr getTautomer(\n+ const std::string& name, const std::string& smarts,\n+ const std::string& bond_str, const std::string& charge_str) {\n std::vector bond_types =\n MolStandardize::stringToBondType(bond_str);\n std::vector charges = MolStandardize::stringToCharge(charge_str);","source_code":"\/\/\n\/\/ Copyright (C) 2018-2021 Susan H. Leung and other RDKit contributors\n\/\/\n\/\/ @@ All Rights Reserved @@\n\/\/ This file is part of the RDKit.\n\/\/ The contents are covered by the terms of the BSD license\n\/\/ which is included in the file license.txt, found at the root\n\/\/ of the RDKit source tree.\n\/\/\n#include \"TautomerCatalogUtils.h\"\n#include \n#include \n#include \n#include \n#include \n#include \ntypedef boost::tokenizer> tokenizer;\n#include \n#include \n\nnamespace RDKit {\nnamespace {\n\nMolStandardize::TautomerTransform* getTautomer(const std::string& name,\n const std::string& smarts,\n const std::string& bond_str,\n const std::string& charge_str) {\n std::vector bond_types =\n MolStandardize::stringToBondType(bond_str);\n std::vector charges = MolStandardize::stringToCharge(charge_str);\n\n ROMol* tautomer = SmartsToMol(smarts);\n if (!tautomer) {\n throw ValueErrorException(\"cannot parse tautomer SMARTS: \" + smarts);\n }\n tautomer->setProp(common_properties::_Name, name);\n return new MolStandardize::TautomerTransform(tautomer, bond_types, charges);\n}\n\nMolStandardize::TautomerTransform* getTautomer(const std::string& tmpStr) {\n if (tmpStr.length() == 0 || tmpStr.substr(0, 2) == \"\/\/\") {\n \/\/ empty or comment line\n return nullptr;\n }\n boost::char_separator tabSep(\"\\t\");\n tokenizer tokens(tmpStr, tabSep);\n std::vector result(tokens.begin(), tokens.end());\n\n \/\/ tautomer information to collect from each line\n std::string name;\n std::string smarts;\n std::string bond_str;\n std::string charge_str;\n\n \/\/ line must have at least two tab separated values\n if (result.size() < 2) {\n BOOST_LOG(rdWarningLog) << \"Invalid line: \" << tmpStr << std::endl;\n return nullptr;\n }\n \/\/ line only has name and smarts\n if (result.size() == 2) {\n name = result[0];\n smarts = result[1];\n }\n \/\/ line has name, smarts, bonds\n if (result.size() == 3) {\n name = result[0];\n smarts = result[1];\n bond_str = result[2];\n }\n \/\/ line has name, smarts, bonds, charges\n if (result.size() == 4) {\n name = result[0];\n smarts = result[1];\n bond_str = result[2];\n charge_str = result[3];\n }\n\n boost::erase_all(smarts, \" \");\n boost::erase_all(name, \" \");\n boost::erase_all(bond_str, \" \");\n boost::erase_all(charge_str, \" \");\n\n return getTautomer(name, smarts, bond_str, charge_str);\n}\n} \/\/ namespace\n\nnamespace MolStandardize {\n\nstd::vector stringToBondType(std::string bond_str) {\n std::vector bonds;\n for (const auto& c : bond_str) {\n switch (c) {\n case '-':\n bonds.push_back(Bond::SINGLE);\n break;\n case '=':\n bonds.push_back(Bond::DOUBLE);\n break;\n case '#':\n bonds.push_back(Bond::TRIPLE);\n break;\n case ':':\n bonds.push_back(Bond::AROMATIC);\n break;\n }\n }\n return bonds;\n}\n\nstd::vector stringToCharge(std::string charge_str) {\n std::vector charges;\n for (const auto& c : charge_str) {\n switch (c) {\n case '+':\n charges.push_back(1);\n break;\n case '0':\n charges.push_back(0);\n break;\n case '-':\n charges.push_back(-1);\n break;\n default:\n throw ValueErrorException(\"Charge symbol not recognised.\");\n }\n }\n return charges;\n}\n\nstd::vector readTautomers(std::string fileName) {\n std::ifstream inStream(fileName.c_str());\n if ((!inStream) || (inStream.bad())) {\n std::ostringstream errout;\n errout << \"Bad input file \" << fileName;\n throw BadFileException(errout.str());\n }\n std::vector tautomers = readTautomers(inStream);\n return tautomers;\n}\n\nstd::vector readTautomers(std::istream& inStream,\n int nToRead) {\n if (inStream.bad()) {\n throw BadFileException(\"Bad stream contents.\");\n }\n std::vector tautomers;\n if (nToRead > 0) {\n tautomers.reserve(nToRead);\n }\n const int MAX_LINE_LEN = 512;\n char inLine[MAX_LINE_LEN];\n std::string tmpstr;\n int nRead = 0;\n while (!inStream.eof() && !inStream.fail() &&\n (nToRead < 0 || nRead < nToRead)) {\n inStream.getline(inLine, MAX_LINE_LEN, '\\n');\n tmpstr = inLine;\n \/\/ parse the tautomer on this line (if there is one)\n TautomerTransform* transform = getTautomer(tmpstr);\n if (transform) {\n tautomers.emplace_back(*transform);\n delete transform;\n nRead++;\n }\n }\n\n return tautomers;\n}\n\nstd::vector readTautomers(\n const std::vector<\n std::tuple>& data) {\n std::vector tautomers;\n for (const auto& tpl : data) {\n auto transform = getTautomer(std::get<0>(tpl), std::get<1>(tpl),\n std::get<2>(tpl), std::get<3>(tpl));\n if (transform) {\n tautomers.emplace_back(*transform);\n }\n }\n return tautomers;\n}\n\n} \/\/ namespace MolStandardize\n} \/\/ namespace RDKit\n","lang_cluster":"C++","diff_tag":0,"review_comment":"","id":141} {"diff_hunk":"@@ -108,7 +108,7 @@ int main(int argc, char *argv[]) {\n }\n \n } catch (exception& e) {\n- if (options::get()->has_bool(\"stack_trace_to_file\")) {\n+ if (options::get()->get_bool(\"stack_trace_to_file\")) {\n std::ostringstream ss(\"stack_trace\");\n const auto& rank = get_rank_in_world();\n if (rank >= 0) {","source_code":"\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC.\n\/\/ Produced at the Lawrence Livermore National Laboratory.\n\/\/ Written by the LBANN Research Team (B. Van Essen, et al.) listed in\n\/\/ the CONTRIBUTORS file. \n\/\/\n\/\/ LLNL-CODE-697807.\n\/\/ All rights reserved.\n\/\/\n\/\/ This file is part of LBANN: Livermore Big Artificial Neural Network\n\/\/ Toolkit. For details, see http:\/\/software.llnl.gov\/LBANN or\n\/\/ https:\/\/github.com\/LLNL\/LBANN.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"Licensee\"); you\n\/\/ may not use this file except in compliance with the License. You may\n\/\/ obtain a copy of the License at:\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n\/\/ implied. See the License for the specific language governing\n\/\/ permissions and limitations under the license.\n\/\/\n\/\/ lbann_proto.cpp - prototext application\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n#include \"lbann\/lbann.hpp\"\n#include \"lbann\/proto\/proto_common.hpp\"\n#include \"lbann\/utils\/protobuf_utils.hpp\"\n#include \"lbann\/data_store\/generic_data_store.hpp\"\n#include \n\n\nusing namespace lbann;\n\nint main(int argc, char *argv[]) {\n int random_seed = lbann_default_random_seed;\n world_comm_ptr comm = initialize(argc, argv, random_seed);\n const bool master = comm->am_world_master();\n\n if (master) {\n std::cout << \"\\n\\n==============================================================\\n\"\n << \"STARTING lbann with this command line:\\n\";\n for (int j=0; jinit(argc, argv);\n if (opts->has_string(\"h\") or opts->has_string(\"help\") or argc == 1) {\n print_help(*comm);\n return EXIT_SUCCESS;\n }\n\n \/\/this must be called after call to opts->init();\n if (!opts->has_bool(\"disable_signal_handler\")) {\n std::string file_base = (opts->has_bool(\"stack_trace_to_file\") ?\n \"stack_trace\" : \"\");\n stack_trace::register_signal_handler(file_base);\n }\n\n \/\/to activate, must specify --st_on on cmd line\n stack_profiler::get()->activate(comm->get_rank_in_world());\n\n \/\/ Initalize a global I\/O thread pool\n std::shared_ptr io_thread_pool = construct_io_thread_pool(comm.get());\n\n auto pbs = protobuf_utils::load_prototext(master, argc, argv);\n lbann_data::LbannPB pb = *(pbs[0]);\n\n lbann_data::Model *pb_model = pb.mutable_model();\n\n auto model = build_model_from_prototext(argc, argv, pb,\n comm.get(), io_thread_pool, true);\n\n if (opts->has_string(\"create_tarball\")) {\n return EXIT_SUCCESS;\n }\n\n if (! (opts->has_bool(\"exit_after_setup\") && opts->get_bool(\"exit_after_setup\"))) {\n\n \/\/ Train model\n model->train(pb_model->num_epochs());\n\n \/\/ Evaluate model on test set\n model->evaluate(execution_mode::testing);\n\n \/\/has no affect unless option: --st_on was given\n stack_profiler::get()->print();\n\n } else {\n if (comm->am_world_master()) {\n std::cout <<\n \"--------------------------------------------------------------------------------\\n\"\n \"ALERT: model has been setup; we are now exiting due to command\\n\"\n \" line option: --exit_after_setup\\n\"\n \"--------------------------------------------------------------------------------\\n\";\n }\n\n \/\/has no affect unless option: --st_on was given\n stack_profiler::get()->print();\n }\n\n } catch (exception& e) {\n if (options::get()->has_bool(\"stack_trace_to_file\")) {\n std::ostringstream ss(\"stack_trace\");\n const auto& rank = get_rank_in_world();\n if (rank >= 0) {\n ss << \"_rank\" << rank;\n }\n ss << \".txt\";\n std::ofstream fs(ss.str());\n e.print_report(fs);\n }\n El::ReportException(e);\n return EXIT_FAILURE;\n } catch (std::exception& e) {\n El::ReportException(e);\n return EXIT_FAILURE;\n }\n\n return EXIT_SUCCESS;\n}\n","lang_cluster":"C++","diff_tag":0,"review_comment":"","id":142} {"diff_hunk":"@@ -61,7 +61,8 @@ ReceiverResource::ReceiverResource(ReceiverResource&& rValueResource)\n max_message_size_ = rValueResource.max_message_size_;\n }\n \n-bool ReceiverResource::SupportsLocator(const Locator_t& localLocator)\n+bool ReceiverResource::SupportsLocator(\n+ const Locator_t& localLocator)\n {\n if (LocatorMapsToManagedChannel)\n {","source_code":"\/\/ Copyright 2016 Proyectos y Sistemas de Mantenimiento SL (eProsima).\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n#include \n#include \n#include \n#include \n\n#define IDSTRING \"(ID:\" << std::this_thread::get_id() <<\") \"<<\n\nusing namespace std;\nusing namespace eprosima::fastdds::rtps;\n\nnamespace eprosima{\nnamespace fastrtps{\nnamespace rtps{\n\nReceiverResource::ReceiverResource(\n\t\t\tTransportInterface& transport,\n\t\t\tconst Locator_t& locator,\n\t\t\tuint32_t max_recv_buffer_size)\n : Cleanup(nullptr)\n , LocatorMapsToManagedChannel(nullptr)\n , mValid(false)\n , mtx()\n , receiver(nullptr)\n , max_message_size_(max_recv_buffer_size)\n{\n \/\/ Internal channel is opened and assigned to this resource.\n mValid = transport.OpenInputChannel(locator, this, max_message_size_);\n if (!mValid)\n {\n return; \/\/ Invalid resource to be discarded by the factory.\n }\n\n \/\/ Implementation functions are bound to the right transport parameters\n Cleanup = [&transport, locator]() { transport.CloseInputChannel(locator); };\n LocatorMapsToManagedChannel = [&transport, locator](const Locator_t& locatorToCheck) -> bool\n { return transport.DoInputLocatorsMatch(locator, locatorToCheck); };\n}\n\nReceiverResource::ReceiverResource(ReceiverResource&& rValueResource)\n{\n Cleanup.swap(rValueResource.Cleanup);\n LocatorMapsToManagedChannel.swap(rValueResource.LocatorMapsToManagedChannel);\n receiver = rValueResource.receiver;\n rValueResource.receiver = nullptr;\n mValid = rValueResource.mValid;\n rValueResource.mValid = false;\n max_message_size_ = rValueResource.max_message_size_;\n}\n\nbool ReceiverResource::SupportsLocator(const Locator_t& localLocator)\n{\n if (LocatorMapsToManagedChannel)\n {\n return LocatorMapsToManagedChannel(localLocator);\n }\n return false;\n}\n\nvoid ReceiverResource::RegisterReceiver(MessageReceiver* rcv)\n{\n std::unique_lock lock(mtx);\n if (receiver == nullptr)\n receiver = rcv;\n}\n\nvoid ReceiverResource::UnregisterReceiver(MessageReceiver* rcv)\n{\n std::unique_lock lock(mtx);\n if (receiver == rcv)\n receiver = nullptr;\n}\n\nvoid ReceiverResource::OnDataReceived(const octet * data, const uint32_t size,\n const Locator_t & localLocator, const Locator_t & remoteLocator)\n{\n (void)localLocator;\n\n std::unique_lock lock(mtx);\n MessageReceiver* rcv = receiver;\n\n if (rcv != nullptr)\n {\n CDRMessage_t msg(0);\n msg.wraps = true;\n msg.buffer = const_cast(data);\n msg.length = size;\n msg.max_size = size;\n msg.reserved_size = size;\n\n \/\/ TODO: Should we unlock in case UnregisterReceiver is called from callback ?\n rcv->processCDRMsg(remoteLocator, &msg);\n }\n\n}\n\nvoid ReceiverResource::disable()\n{\n if (Cleanup)\n {\n Cleanup();\n }\n}\n\nReceiverResource::~ReceiverResource()\n{\n}\n\n} \/\/ namespace rtps\n} \/\/ namespace fastrtps\n} \/\/ namespace eprosima\n","lang_cluster":"C++","diff_tag":0,"review_comment":"","id":143} {"diff_hunk":"@@ -23,6 +23,8 @@ DEFINE_string(peers, \"\", \"It is a list of IPs split by comma,\"\n \"the ips number equals replica number.\"\n \"If empty, it means replica is 1\");\n DEFINE_string(local_ip, \"\", \"Local ip speicified for NetworkUtils::getLocalIP\");\n+DEFINE_int32(num_workers, 4, \"Number of worker threads\");\n+DEFINE_int32(num_io_threads, 16, \"Number of IO threads\");\n DECLARE_string(part_man_type);\n \n DEFINE_string(pid_file, \"pids\/nebula-metad.pid\", \"File to hold the process id\");","source_code":"\/* Copyright (c) 2018 vesoft inc. All rights reserved.\n *\n * This source code is licensed under Apache 2.0 License,\n * attached with Common Clause Condition 1.0, found in the LICENSES directory.\n *\/\n\n#include \"base\/Base.h\"\n#include \n#include \"meta\/MetaServiceHandler.h\"\n#include \"meta\/MetaHttpHandler.h\"\n#include \"webservice\/WebService.h\"\n#include \"network\/NetworkUtils.h\"\n#include \"process\/ProcessUtils.h\"\n#include \"kvstore\/PartManager.h\"\n\nusing nebula::ProcessUtils;\nusing nebula::Status;\n\nDEFINE_int32(port, 45500, \"Meta daemon listening port\");\nDEFINE_bool(reuse_port, true, \"Whether to turn on the SO_REUSEPORT option\");\nDEFINE_string(data_path, \"\", \"Root data path\");\nDEFINE_string(peers, \"\", \"It is a list of IPs split by comma,\"\n \"the ips number equals replica number.\"\n \"If empty, it means replica is 1\");\nDEFINE_string(local_ip, \"\", \"Local ip speicified for NetworkUtils::getLocalIP\");\nDECLARE_string(part_man_type);\n\nDEFINE_string(pid_file, \"pids\/nebula-metad.pid\", \"File to hold the process id\");\nDEFINE_bool(daemonize, true, \"Whether run as a daemon process\");\n\nstatic std::unique_ptr gServer;\n\nstatic void signalHandler(int sig);\nstatic Status setupSignalHandler();\n\nint main(int argc, char *argv[]) {\n folly::init(&argc, &argv, true);\n if (FLAGS_data_path.empty()) {\n LOG(ERROR) << \"Meta Data Path should not empty\";\n return EXIT_FAILURE;\n }\n\n if (FLAGS_daemonize) {\n google::SetStderrLogging(google::FATAL);\n } else {\n google::SetStderrLogging(google::INFO);\n }\n\n \/\/ Detect if the server has already been started\n auto pidPath = FLAGS_pid_file;\n auto status = ProcessUtils::isPidAvailable(pidPath);\n if (!status.ok()) {\n LOG(ERROR) << status;\n return EXIT_FAILURE;\n }\n\n if (FLAGS_daemonize) {\n status = ProcessUtils::daemonize(pidPath);\n if (!status.ok()) {\n LOG(ERROR) << status;\n return EXIT_FAILURE;\n }\n } else {\n status = ProcessUtils::makePidFile(pidPath);\n if (!status.ok()) {\n LOG(ERROR) << status;\n return EXIT_FAILURE;\n }\n }\n\n LOG(INFO) << \"Starting Meta HTTP Service\";\n nebula::WebService::registerHandler(\"\/status\", [] {\n return new nebula::meta::MetaHttpHandler();\n });\n status = nebula::WebService::start();\n if (!status.ok()) {\n LOG(ERROR) << \"Failed to start web service: \" << status;\n return EXIT_FAILURE;\n }\n\n auto result = nebula::network::NetworkUtils::getLocalIP(FLAGS_local_ip);\n if (!result.ok()) {\n LOG(ERROR) << \"Get local ip failed! status:\" << result.status();\n return EXIT_FAILURE;\n }\n auto hostAddrRet = nebula::network::NetworkUtils::toHostAddr(result.value(), FLAGS_port);\n if (!hostAddrRet.ok()) {\n LOG(ERROR) << \"Bad local host addr, status:\" << hostAddrRet.status();\n return EXIT_FAILURE;\n }\n auto& localHost = hostAddrRet.value();\n\n auto peersRet = nebula::network::NetworkUtils::toHosts(FLAGS_peers);\n if (!peersRet.ok()) {\n LOG(ERROR) << \"Can't get peers address, status:\" << peersRet.status();\n return EXIT_FAILURE;\n }\n \/\/ Setup the signal handlers\n status = setupSignalHandler();\n if (!status.ok()) {\n LOG(ERROR) << status;\n return EXIT_FAILURE;\n }\n\n auto partMan\n = std::make_unique();\n \/\/ The meta server has only one space, one part.\n partMan->addPart(0, 0, std::move(peersRet.value()));\n\n nebula::kvstore::KVOptions options;\n options.local_ = localHost;\n options.dataPaths_ = {FLAGS_data_path};\n options.partMan_ = std::move(partMan);\n std::unique_ptr kvstore(\n nebula::kvstore::KVStore::instance(std::move(options)));\n\n auto handler = std::make_shared(kvstore.get());\n\n nebula::operator<<(operator<<(LOG(INFO), \"The meta deamon start on \"), localHost);\n try {\n gServer = std::make_unique();\n gServer->setInterface(std::move(handler));\n gServer->setPort(FLAGS_port);\n gServer->setReusePort(FLAGS_reuse_port);\n gServer->setIdleTimeout(std::chrono::seconds(0)); \/\/ No idle timeout on client connection\n gServer->serve(); \/\/ Will wait until the server shuts down\n } catch (const std::exception &e) {\n LOG(ERROR) << \"Exception thrown: \" << e.what();\n return EXIT_FAILURE;\n }\n\n LOG(INFO) << \"The meta Daemon stopped\";\n}\n\n\nStatus setupSignalHandler() {\n ::signal(SIGPIPE, SIG_IGN);\n ::signal(SIGINT, signalHandler);\n ::signal(SIGTERM, signalHandler);\n return Status::OK();\n}\n\n\nvoid signalHandler(int sig) {\n switch (sig) {\n case SIGINT:\n case SIGTERM:\n FLOG_INFO(\"Signal %d(%s) received, stopping this server\", sig, ::strsignal(sig));\n nebula::WebService::stop();\n gServer->stop();\n break;\n default:\n FLOG_ERROR(\"Signal %d(%s) received but ignored\", sig, ::strsignal(sig));\n }\n}\n","lang_cluster":"C++","diff_tag":0,"review_comment":"","id":144} {"diff_hunk":"@@ -112,6 +112,26 @@ std::string NebulaKeyUtils::edgePrefix(PartitionID partId, VertexID srcId, EdgeT\n return key;\n }\n \n+\/\/ static\n+std::string NebulaKeyUtils::edgePrefix(PartitionID partId,\n+ VertexID srcId,\n+ EdgeType type,\n+ EdgeRanking rank,\n+ VertexID dstId) {\n+ type |= kEdgeMaskSet;\n+ int32_t item = (partId << kPartitionOffset) | static_cast(NebulaKeyType::kData);\n+ std::string key;\n+ key.reserve(sizeof(PartitionID) + sizeof(VertexID)\n+ + sizeof(EdgeType) + sizeof(VertexID)\n+ + sizeof(EdgeRanking));\n+ key.append(reinterpret_cast(&item), sizeof(PartitionID))\n+ .append(reinterpret_cast(&srcId), sizeof(VertexID))\n+ .append(reinterpret_cast(&type), sizeof(EdgeType))\n+ .append(reinterpret_cast(&rank), sizeof(EdgeRanking))\n+ .append(reinterpret_cast(&dstId), sizeof(VertexID));\n+ return key;\n+}\n+\n \/\/ static\n std::string NebulaKeyUtils::prefix(PartitionID partId) {\n PartitionID item = (partId << kPartitionOffset) | static_cast(NebulaKeyType::kData);","source_code":"\/* Copyright (c) 2018 vesoft inc. All rights reserved.\n *\n * This source code is licensed under Apache 2.0 License,\n * attached with Common Clause Condition 1.0, found in the LICENSES directory.\n *\/\n\n#include \"base\/NebulaKeyUtils.h\"\n\nnamespace nebula {\n\n\/\/ static\nstd::string NebulaKeyUtils::vertexKey(PartitionID partId, VertexID vId,\n TagID tagId, TagVersion tv) {\n tagId &= kTagMaskSet;\n int32_t item = (partId << kPartitionOffset) | static_cast(NebulaKeyType::kData);\n\n std::string key;\n key.reserve(kVertexLen);\n key.append(reinterpret_cast(&item), sizeof(int32_t))\n .append(reinterpret_cast(&vId), sizeof(VertexID))\n .append(reinterpret_cast(&tagId), sizeof(TagID))\n .append(reinterpret_cast(&tv), sizeof(TagVersion));\n return key;\n}\n\n\/\/ static\nstd::string NebulaKeyUtils::edgeKey(PartitionID partId,\n VertexID srcId,\n EdgeType type,\n EdgeRanking rank,\n VertexID dstId,\n EdgeVersion ev) {\n type |= kEdgeMaskSet;\n int32_t item = (partId << kPartitionOffset) | static_cast(NebulaKeyType::kData);\n\n std::string key;\n key.reserve(kEdgeLen);\n key.append(reinterpret_cast(&item), sizeof(PartitionID))\n .append(reinterpret_cast(&srcId), sizeof(VertexID))\n .append(reinterpret_cast(&type), sizeof(EdgeType))\n .append(reinterpret_cast(&rank), sizeof(EdgeRanking))\n .append(reinterpret_cast(&dstId), sizeof(VertexID))\n .append(reinterpret_cast(&ev), sizeof(EdgeVersion));\n return key;\n}\n\n\/\/ static\nstd::string NebulaKeyUtils::systemCommitKey(PartitionID partId) {\n int32_t item = (partId << kPartitionOffset) | static_cast(NebulaKeyType::kSystem);\n uint32_t type = static_cast(NebulaSystemKeyType::kSystemCommit);\n std::string key;\n key.reserve(kSystemLen);\n key.append(reinterpret_cast(&item), sizeof(PartitionID))\n .append(reinterpret_cast(&type), sizeof(NebulaSystemKeyType));\n return key;\n}\n\n\/\/ static\nstd::string NebulaKeyUtils::systemPartKey(PartitionID partId) {\n uint32_t item = (partId << kPartitionOffset) | static_cast(NebulaKeyType::kSystem);\n uint32_t type = static_cast(NebulaSystemKeyType::kSystemPart);\n std::string key;\n key.reserve(kSystemLen);\n key.append(reinterpret_cast(&item), sizeof(PartitionID))\n .append(reinterpret_cast(&type), sizeof(NebulaSystemKeyType));\n return key;\n}\n\n\/\/ static\nstd::string NebulaKeyUtils::uuidKey(PartitionID partId, const folly::StringPiece& name) {\n std::string key;\n key.reserve(sizeof(PartitionID) + name.size());\n int32_t item = (partId << kPartitionOffset) | static_cast(NebulaKeyType::kUUID);\n key.append(reinterpret_cast(&item), sizeof(int32_t))\n .append(name.data(), name.size());\n return key;\n}\n\n\/\/ static\nstd::string NebulaKeyUtils::kvKey(PartitionID partId, const folly::StringPiece& name) {\n std::string key;\n key.reserve(sizeof(PartitionID) + name.size());\n int32_t item = (partId << kPartitionOffset) | static_cast(NebulaKeyType::kData);\n key.append(reinterpret_cast(&item), sizeof(int32_t))\n .append(name.data(), name.size());\n return key;\n}\n\n\/\/ static\nstd::string NebulaKeyUtils::vertexPrefix(PartitionID partId, VertexID vId, TagID tagId) {\n tagId &= kTagMaskSet;\n PartitionID item = (partId << kPartitionOffset) | static_cast(NebulaKeyType::kData);\n\n std::string key;\n key.reserve(kVertexLen);\n key.append(reinterpret_cast(&item), sizeof(PartitionID))\n .append(reinterpret_cast(&vId), sizeof(VertexID))\n .append(reinterpret_cast(&tagId), sizeof(TagID));\n return key;\n}\n\n\/\/ static\nstd::string NebulaKeyUtils::edgePrefix(PartitionID partId, VertexID srcId, EdgeType type) {\n type |= kEdgeMaskSet;\n PartitionID item = (partId << kPartitionOffset) | static_cast(NebulaKeyType::kData);\n\n std::string key;\n key.reserve(sizeof(PartitionID) + sizeof(VertexID) + sizeof(EdgeType));\n key.append(reinterpret_cast(&item), sizeof(PartitionID))\n .append(reinterpret_cast(&srcId), sizeof(VertexID))\n .append(reinterpret_cast(&type), sizeof(EdgeType));\n return key;\n}\n\n\/\/ static\nstd::string NebulaKeyUtils::prefix(PartitionID partId) {\n PartitionID item = (partId << kPartitionOffset) | static_cast(NebulaKeyType::kData);\n std::string key;\n key.reserve(sizeof(PartitionID));\n key.append(reinterpret_cast(&item), sizeof(PartitionID));\n return key;\n}\n\n\/\/ static\nstd::string NebulaKeyUtils::vertexPrefix(PartitionID partId, VertexID vId) {\n PartitionID item = (partId << kPartitionOffset) | static_cast(NebulaKeyType::kData);\n std::string key;\n key.reserve(sizeof(PartitionID) + sizeof(VertexID));\n key.append(reinterpret_cast(&item), sizeof(PartitionID))\n .append(reinterpret_cast(&vId), sizeof(VertexID));\n return key;\n}\n\n\/\/ static\nstd::string NebulaKeyUtils::edgePrefix(PartitionID partId, VertexID vId) {\n PartitionID item = (partId << kPartitionOffset) | static_cast(NebulaKeyType::kData);\n std::string key;\n key.reserve(sizeof(PartitionID) + sizeof(VertexID));\n key.append(reinterpret_cast(&item), sizeof(PartitionID))\n .append(reinterpret_cast(&vId), sizeof(VertexID));\n return key;\n}\n\n\/\/ static\nstd::string NebulaKeyUtils::systemPrefix() {\n int8_t type = static_cast(NebulaKeyType::kSystem);\n std::string key;\n key.reserve(sizeof(int8_t));\n key.append(reinterpret_cast(&type), sizeof(int8_t));\n return key;\n}\n\n\/\/ static\nstd::string NebulaKeyUtils::prefix(PartitionID partId, VertexID src, EdgeType type,\n EdgeRanking ranking, VertexID dst) {\n type |= kEdgeMaskSet;\n PartitionID item = (partId << kPartitionOffset) | static_cast(NebulaKeyType::kData);\n\n std::string key;\n key.reserve(sizeof(PartitionID) + sizeof(VertexID) + sizeof(EdgeType)\n + sizeof(VertexID) + sizeof(EdgeRanking));\n key.append(reinterpret_cast(&item), sizeof(PartitionID))\n .append(reinterpret_cast(&src), sizeof(VertexID))\n .append(reinterpret_cast(&type), sizeof(EdgeType))\n .append(reinterpret_cast(&ranking), sizeof(EdgeRanking))\n .append(reinterpret_cast(&dst), sizeof(VertexID));\n return key;\n}\n\n} \/\/ namespace nebula\n\n","lang_cluster":"C++","diff_tag":0,"review_comment":"","id":145} {"diff_hunk":"@@ -137,8 +137,7 @@ TopologicalTorsionEnvGenerator::getEnvironments(\n code = getTopologicalTorsionCode(\n pathCodes, topologicalTorsionArguments->df_includeChirality);\n }\n-\n- result.push_back(new TopologicalTorsionAtomEnv(code));\n+ result.push_back(new TopologicalTorsionAtomEnv(code, path));\n }\n }\n }","source_code":"\/\/\n\/\/ Copyright (C) 2018 Boran Adas, Google Summer of Code\n\/\/\n\/\/ @@ All Rights Reserved @@\n\/\/ This file is part of the RDKit.\n\/\/ The contents are covered by the terms of the BSD license\n\/\/ which is included in the file license.txt, found at the root\n\/\/ of the RDKit source tree.\n\/\/\n\n#include \n#include \n#include \n\nnamespace RDKit {\nnamespace TopologicalTorsion {\n\nusing namespace AtomPairs;\n\ntemplate \nTopologicalTorsionArguments::TopologicalTorsionArguments(\n const bool includeChirality, const uint32_t torsionAtomCount,\n const bool countSimulation, const std::vector countBounds,\n const std::uint32_t fpSize)\n : FingerprintArguments(countSimulation, countBounds, fpSize),\n df_includeChirality(includeChirality),\n d_torsionAtomCount(torsionAtomCount){};\n\ntemplate \nOutputType TopologicalTorsionArguments::getResultSize() const {\n OutputType result = 1;\n return (result << (d_torsionAtomCount *\n (codeSize + (df_includeChirality ? numChiralBits : 0))));\n};\n\ntemplate \nstd::string TopologicalTorsionArguments::infoString() const {\n return \"TopologicalTorsionArguments includeChirality=\" +\n std::to_string(df_includeChirality) +\n \" torsionAtomCount=\" + std::to_string(d_torsionAtomCount);\n};\ntemplate \nOutputType TopologicalTorsionAtomEnv::getBitId(\n FingerprintArguments *, \/\/ arguments\n const std::vector *, \/\/ atomInvariants\n const std::vector *, \/\/ bondInvariants\n const AdditionalOutput *, \/\/ additionalOutput\n const bool \/\/ hashResults\n) const {\n return d_bitId;\n};\n\ntemplate \nTopologicalTorsionAtomEnv::TopologicalTorsionAtomEnv(\n OutputType bitId)\n : d_bitId(bitId){};\n\ntemplate \nstd::vector *>\nTopologicalTorsionEnvGenerator::getEnvironments(\n const ROMol &mol, FingerprintArguments *arguments,\n const std::vector *fromAtoms,\n const std::vector *ignoreAtoms,\n const int, \/\/ confId\n const AdditionalOutput *, \/\/ additionalOutput\n const std::vector *atomInvariants,\n const std::vector *, \/\/ bondInvariants\n const bool hashResults) const {\n auto *topologicalTorsionArguments =\n dynamic_cast *>(arguments);\n\n std::vector *> result =\n std::vector *>();\n\n boost::dynamic_bitset<> *fromAtomsBV = nullptr;\n if (fromAtoms) {\n fromAtomsBV = new boost::dynamic_bitset<>(mol.getNumAtoms());\n for (auto fAt : *fromAtoms) {\n fromAtomsBV->set(fAt);\n }\n }\n boost::dynamic_bitset<> *ignoreAtomsBV = nullptr;\n if (ignoreAtoms) {\n ignoreAtomsBV = new boost::dynamic_bitset<>(mol.getNumAtoms());\n for (auto fAt : *ignoreAtoms) {\n ignoreAtomsBV->set(fAt);\n }\n }\n boost::dynamic_bitset<> pAtoms(mol.getNumAtoms());\n PATH_LIST paths = findAllPathsOfLengthN(\n mol, topologicalTorsionArguments->d_torsionAtomCount, false);\n for (PATH_LIST::const_iterator pathIt = paths.begin(); pathIt != paths.end();\n ++pathIt) {\n bool keepIt = true;\n if (fromAtomsBV) {\n keepIt = false;\n }\n std::vector pathCodes;\n const PATH_TYPE &path = *pathIt;\n if (fromAtomsBV) {\n if (fromAtomsBV->test(static_cast(path.front())) ||\n fromAtomsBV->test(static_cast(path.back()))) {\n keepIt = true;\n }\n }\n if (keepIt && ignoreAtomsBV) {\n for (int pElem : path) {\n if (ignoreAtomsBV->test(pElem)) {\n keepIt = false;\n break;\n }\n }\n }\n if (keepIt) {\n pAtoms.reset();\n for (auto pIt = path.begin(); pIt < path.end(); ++pIt) {\n \/\/ look for a cycle that doesn't start at the first atom\n \/\/ we can't effectively canonicalize these at the moment\n \/\/ (was github #811)\n if (pIt != path.begin() && *pIt != *(path.begin()) && pAtoms[*pIt]) {\n pathCodes.clear();\n break;\n }\n pAtoms.set(*pIt);\n unsigned int code = (*atomInvariants)[*pIt] % ((1 << codeSize) - 1) + 1;\n \/\/ subtract off the branching number:\n if (pIt != path.begin() && pIt + 1 != path.end()) {\n --code;\n }\n pathCodes.push_back(code);\n }\n if (pathCodes.size()) {\n OutputType code;\n if (hashResults) {\n code = getTopologicalTorsionHash(pathCodes);\n } else {\n code = getTopologicalTorsionCode(\n pathCodes, topologicalTorsionArguments->df_includeChirality);\n }\n\n result.push_back(new TopologicalTorsionAtomEnv(code));\n }\n }\n }\n delete fromAtomsBV;\n delete ignoreAtomsBV;\n\n return result;\n};\n\ntemplate \nstd::string TopologicalTorsionEnvGenerator::infoString() const {\n return \"TopologicalTorsionEnvGenerator\";\n};\n\ntemplate \nFingerprintGenerator *getTopologicalTorsionGenerator(\n const bool includeChirality, const uint32_t torsionAtomCount,\n AtomInvariantsGenerator *atomInvariantsGenerator,\n const bool countSimulation, const std::vector countBounds,\n const std::uint32_t fpSize, const bool ownsAtomInvGen) {\n auto *envGenerator = new TopologicalTorsionEnvGenerator();\n\n auto *arguments = new TopologicalTorsionArguments(\n includeChirality, torsionAtomCount, countSimulation, countBounds, fpSize);\n\n bool ownsAtomInvGenerator = ownsAtomInvGen;\n if (!atomInvariantsGenerator) {\n atomInvariantsGenerator =\n new AtomPair::AtomPairAtomInvGenerator(includeChirality, true);\n ownsAtomInvGenerator = true;\n }\n\n return new FingerprintGenerator(envGenerator, arguments,\n atomInvariantsGenerator, nullptr,\n ownsAtomInvGenerator, false);\n};\n\n\/\/ Topological torsion fingerprint does not support 32 bit output yet\n\ntemplate RDKIT_FINGERPRINTS_EXPORT FingerprintGenerator *\ngetTopologicalTorsionGenerator(const bool includeChirality,\n const uint32_t torsionAtomCount,\n AtomInvariantsGenerator *atomInvariantsGenerator,\n const bool countSimulation,\n const std::vector countBounds,\n const std::uint32_t fpSize,\n const bool ownsAtomInvGen);\n\n} \/\/ namespace TopologicalTorsion\n} \/\/ namespace RDKit\n","lang_cluster":"C++","diff_tag":0,"review_comment":"","id":146} {"diff_hunk":"@@ -187,13 +187,19 @@ void init_io_random(int seed) {\n \n ::io_generator_seed_base = seed;\n ::io_generator_seed_inited = true;\n- \/\/\/ Reset the init flag so that generator will reinitialize\n- ::io_generator_inited = false;\n-\n ::fast_io_generator_seed_base = seed;\n ::fast_io_generator_seed_inited = true;\n- \/\/\/ Reset the init flag so that generator will reinitialize\n- ::fast_io_generator_inited = false;\n+\n+ ::io_generator.resize(num_io_RNGs);\n+ ::fast_io_generator.resize(num_io_RNGs);\n+ ::io_generator_inited.resize(num_io_RNGs);\n+ ::fast_io_generator_inited.resize(num_io_RNGs);\n+ for(int i = 0; i < num_io_RNGs; i++) {\n+ \/\/\/ Reset the init flag so that I\/O generator will reinitialize\n+ ::io_generator_inited[i] = false;\n+ \/\/\/ Reset the init flag so that fast I\/O generator will reinitialize\n+ ::fast_io_generator_inited[i] = false;\n+ }\n }\n \n } \/\/ namespace lbann","source_code":"\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC.\n\/\/ Produced at the Lawrence Livermore National Laboratory.\n\/\/ Written by the LBANN Research Team (B. Van Essen, et al.) listed in\n\/\/ the CONTRIBUTORS file. \n\/\/\n\/\/ LLNL-CODE-697807.\n\/\/ All rights reserved.\n\/\/\n\/\/ This file is part of LBANN: Livermore Big Artificial Neural Network\n\/\/ Toolkit. For details, see http:\/\/software.llnl.gov\/LBANN or\n\/\/ https:\/\/github.com\/LLNL\/LBANN.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"Licensee\"); you\n\/\/ may not use this file except in compliance with the License. You may\n\/\/ obtain a copy of the License at:\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n\/\/ implied. See the License for the specific language governing\n\/\/ permissions and limitations under the license.\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n#include \n#include \"lbann\/utils\/random_number_generators.hpp\"\n#include \"lbann\/utils\/hash.hpp\"\n#include \"lbann\/utils\/exception.hpp\"\n#include \n\nnamespace {\n#ifdef __ICC\nlbann::rng_gen generator;\n#pragma omp threadprivate(generator)\n\nlbann::fast_rng_gen fast_generator;\n#pragma omp threadprivate(fast_generator)\n#else\n\/\/ Random number generator, file-visible only.\n\/\/ Defined like this to work around a GCC problem with threadprivate objects:\n\/\/ https:\/\/stackoverflow.com\/questions\/23552077\/how-to-define-a-object-or-struct-as-threadprivate-in-openmp\/\nextern lbann::rng_gen generator;\n#pragma omp threadprivate(generator)\nlbann::rng_gen generator;\n\nextern lbann::fast_rng_gen fast_generator;\n#pragma omp threadprivate(fast_generator)\nlbann::fast_rng_gen fast_generator;\n#endif\n\nbool generator_inited = false;\nbool fast_generator_inited = false;\n\nthread_local lbann::rng_gen data_seq_generator;\nthread_local bool data_seq_generator_inited = false;\nint data_seq_generator_seed_base = 0;\nbool data_seq_generator_seed_inited = false;\n\nthread_local lbann::rng_gen io_generator;\nthread_local bool io_generator_inited = false;\nint io_generator_seed_base = 0;\nbool io_generator_seed_inited = false;\n\nthread_local lbann::fast_rng_gen fast_io_generator;\nthread_local bool fast_io_generator_inited = false;\nint fast_io_generator_seed_base = 0;\nbool fast_io_generator_seed_inited = false;\n}\n\nnamespace lbann {\n\nrng_gen& get_generator() {\n if (!::generator_inited) { LBANN_ERROR(\"RNG seed not set\"); }\n return ::generator;\n}\n\nfast_rng_gen& get_fast_generator() {\n if (!::fast_generator_inited) { LBANN_ERROR(\"Fast RNG seed not set\"); }\n return ::fast_generator;\n}\n\nrng_gen& get_data_seq_generator() {\n if (!::data_seq_generator_inited) {\n if (!::data_seq_generator_seed_inited) { LBANN_ERROR(\"data sequence RNG seed not set\"); }\n ::data_seq_generator.seed(::data_seq_generator_seed_base);\n ::data_seq_generator_inited = true;\n }\n return ::data_seq_generator;\n}\n\nrng_gen& get_io_generator() {\n if (!::io_generator_inited) {\n if (!::io_generator_seed_inited) { LBANN_ERROR(\"I\/O RNG seed not set\"); }\n ::io_generator.seed(hash_combine(::io_generator_seed_base,\n std::this_thread::get_id()));\n ::io_generator_inited = true;\n }\n return ::io_generator;\n}\n\nfast_rng_gen& get_fast_io_generator() {\n if (!::fast_io_generator_inited) {\n if (!::fast_io_generator_seed_inited) { LBANN_ERROR(\"Fast I\/O RNG seed not set\"); }\n ::fast_io_generator.seed(hash_combine(::fast_io_generator_seed_base,\n std::this_thread::get_id()));\n ::fast_io_generator_inited = true;\n }\n return ::fast_io_generator;\n}\nvoid init_random(int seed, lbann_comm *comm) {\n generator_inited = true;\n fast_generator_inited = true;\n if (seed != -1) {\n \/\/ Seed every OpenMP thread, if present.\n \/\/ Note: Threadprivate OMP variables don't work with dynamic threads.\n#ifdef _OPENMP\n #pragma omp parallel\n {\n get_generator().seed(hash_combine(seed, omp_get_thread_num()));\n get_fast_generator().seed(hash_combine(seed, omp_get_thread_num()));\n }\n#else\n get_generator().seed(seed);\n get_fast_generator().seed(seed);\n#endif\n\n#ifdef LBANN_SET_EL_RNG\n \/\/ Set Elemental's RNG seed\n auto elemental_seed = hash_combine(seed, 104729); \/\/ 10000th prime\n int mpi_initialized = 0;\n MPI_Initialized(&mpi_initialized);\n if(mpi_initialized) {\n \/\/ If MPI is initialized mix in the rank to ensure that Hydrogen\n \/\/ has good RNGs. Note that under some configurations LBANN\n \/\/ will not do this, so it is good to ensure that Hydrogen is\n \/\/ well seeded.\n elemental_seed = (comm == nullptr\n ? hash_combine(elemental_seed, El::mpi::Rank(El::mpi::COMM_WORLD))\n : hash_combine(elemental_seed, comm->get_rank_in_trainer()));\n }\n El::Generator().seed(elemental_seed);\n#endif\n\n } else {\n \/\/ Seed with a random value.\n std::random_device rd;\n unsigned rand_val = rd();\n#ifdef _OPENMP\n #pragma omp parallel\n {\n get_generator().seed(hash_combine(rand_val, omp_get_thread_num()));\n get_fast_generator().seed(hash_combine(rand_val, omp_get_thread_num()));\n }\n#else\n get_generator().seed(rand_val);\n get_fast_generator().seed(rand_val);\n#endif\n#ifdef LBANN_SET_EL_RNG\n El::Generator().seed(rand_val);\n#endif\n }\n\n init_io_random(seed);\n}\n\nvoid init_data_seq_random(int seed) {\n if (seed == -1) {\n \/\/ Seed with a random value.\n std::random_device rd;\n seed = rd();\n }\n\n ::data_seq_generator_seed_base = seed;\n ::data_seq_generator_seed_inited = true;\n \/\/\/ Reset the init flag so that generator will reinitialize\n ::data_seq_generator_inited = false;\n}\n\nvoid init_io_random(int seed) {\n if (seed == -1) {\n \/\/ Seed with a random value.\n std::random_device rd;\n seed = rd();\n }\n\n ::io_generator_seed_base = seed;\n ::io_generator_seed_inited = true;\n \/\/\/ Reset the init flag so that generator will reinitialize\n ::io_generator_inited = false;\n\n ::fast_io_generator_seed_base = seed;\n ::fast_io_generator_seed_inited = true;\n \/\/\/ Reset the init flag so that generator will reinitialize\n ::fast_io_generator_inited = false;\n}\n\n} \/\/ namespace lbann\n","lang_cluster":"C++","diff_tag":0,"review_comment":"","id":147} {"diff_hunk":"@@ -74,11 +74,8 @@ void LXQtMountPlugin::settingsChanged()\n delete mDeviceAction;\n mDeviceAction = DeviceAction::create(actionId, this);\n \n- connect(Solid::DeviceNotifier::instance(), &Solid::DeviceNotifier::deviceAdded,\n- mDeviceAction, &DeviceAction::onDeviceAdded);\n-\n- connect(Solid::DeviceNotifier::instance(), &Solid::DeviceNotifier::deviceRemoved,\n- mDeviceAction, &DeviceAction::onDeviceRemoved);\n+ connect(mPopup, &Popup::deviceAdded, mDeviceAction, &DeviceAction::onDeviceAdded);\n+ connect(mPopup, &Popup::deviceRemoved, mDeviceAction, &DeviceAction::onDeviceRemoved);\n }\n \n }","source_code":"\/* BEGIN_COMMON_COPYRIGHT_HEADER\n * (c)LGPL2+\n *\n * LXDE-Qt - a lightweight, Qt based, desktop toolset\n * http:\/\/razor-qt.org\n *\n * Copyright: 2010-2011 Razor team\n * Authors:\n * Petr Vanek \n *\n * This program or library is free software; you can redistribute it\n * and\/or modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * This library is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n * Lesser General Public License for more details.\n\n * You should have received a copy of the GNU Lesser General\n * Public License along with this library; if not, write to the\n * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,\n * Boston, MA 02110-1301 USA\n *\n * END_COMMON_COPYRIGHT_HEADER *\/\n\n#include \"lxqtmountplugin.h\"\n#include \"configuration.h\"\n\n#include \n\nLXQtMountPlugin::LXQtMountPlugin(const ILXQtPanelPluginStartupInfo &startupInfo):\n QObject(),\n ILXQtPanelPlugin(startupInfo),\n mPopup(nullptr),\n mDeviceAction(nullptr)\n{\n mButton = new Button;\n mPopup = new Popup(this);\n\n connect(mButton, &QToolButton::clicked, mPopup, &Popup::showHide);\n connect(mPopup, &Popup::visibilityChanged, mButton, &QToolButton::setDown);\n}\n\nLXQtMountPlugin::~LXQtMountPlugin()\n{\n delete mButton;\n delete mPopup;\n}\n\nQDialog *LXQtMountPlugin::configureDialog()\n{\n if (mPopup)\n mPopup->hide();\n\n Configuration *configWindow = new Configuration(settings());\n configWindow->setAttribute(Qt::WA_DeleteOnClose, true);\n return configWindow;\n}\n\nvoid LXQtMountPlugin::realign()\n{\n \/\/nothing to do\n}\n\nvoid LXQtMountPlugin::settingsChanged()\n{\n QString s = settings()->value(QLatin1String(CFG_KEY_ACTION)).toString();\n DeviceAction::ActionId actionId = DeviceAction::stringToActionId(s, DeviceAction::ActionMenu);\n\n if (mDeviceAction == nullptr || mDeviceAction->Type() != actionId)\n {\n delete mDeviceAction;\n mDeviceAction = DeviceAction::create(actionId, this);\n\n connect(Solid::DeviceNotifier::instance(), &Solid::DeviceNotifier::deviceAdded,\n mDeviceAction, &DeviceAction::onDeviceAdded);\n\n connect(Solid::DeviceNotifier::instance(), &Solid::DeviceNotifier::deviceRemoved,\n mDeviceAction, &DeviceAction::onDeviceRemoved);\n }\n\n}\n","lang_cluster":"C++","diff_tag":0,"review_comment":"","id":148} {"diff_hunk":"@@ -94,7 +94,8 @@ uint16_t Adafruit_MPR121::filteredData(uint8_t t) {\n }\n \n uint16_t Adafruit_MPR121::baselineData(uint8_t t) {\n- if (t > 12) return 0;\n+ if ((_sensitivity == MPR212_EXTRA_SENSITIVITY) && (t > 3)) return 0;\n+ if (t > 12) return 0; \/\/ MPR212_NORMAL_SENSITIVITY\n uint16_t bl = readRegister8(MPR121_BASELINE_0 + t);\n return (bl << 2);\n }","source_code":"\/***************************************************\n This is a library for the MPR121 I2C 12-chan Capacitive Sensor\n\n Designed specifically to work with the MPR121 sensor from Adafruit\n ----> https:\/\/www.adafruit.com\/products\/1982\n\n These sensors use I2C to communicate, 2+ pins are required to\n interface\n Adafruit invests time and resources providing this open source code,\n please support Adafruit and open-source hardware by purchasing\n products from Adafruit!\n\n Written by Limor Fried\/Ladyada for Adafruit Industries.\n BSD license, all text above must be included in any redistribution\n ****************************************************\/\n\n#include \"Adafruit_MPR121.h\"\n\nAdafruit_MPR121::Adafruit_MPR121() {\n}\n\nboolean Adafruit_MPR121::begin(uint8_t i2caddr) {\n \/\/Wire.begin(); called in ESPEasy framework\n\n _i2caddr = i2caddr;\n\n \/\/ soft reset\n writeRegister(MPR121_SOFTRESET, 0x63);\n delay(1);\n for (uint8_t i=0; i<0x7F; i++) {\n \/\/ Serial.print(\"$\"); Serial.print(i, HEX);\n \/\/ Serial.print(\": 0x\"); Serial.println(readRegister8(i));\n }\n\n\n writeRegister(MPR121_ECR, 0x0);\n\n uint8_t c = readRegister8(MPR121_CONFIG2);\n\n if (c != 0x24) return false;\n\n\n setThresholds(12, 6);\n writeRegister(MPR121_MHDR, 0x01);\n writeRegister(MPR121_NHDR, 0x01);\n writeRegister(MPR121_NCLR, 0x0E);\n writeRegister(MPR121_FDLR, 0x00);\n\n writeRegister(MPR121_MHDF, 0x01);\n writeRegister(MPR121_NHDF, 0x05);\n writeRegister(MPR121_NCLF, 0x01);\n writeRegister(MPR121_FDLF, 0x00);\n\n writeRegister(MPR121_NHDT, 0x00);\n writeRegister(MPR121_NCLT, 0x00);\n writeRegister(MPR121_FDLT, 0x00);\n\n writeRegister(MPR121_DEBOUNCE, 0);\n writeRegister(MPR121_CONFIG1, 0x10); \/\/ default, 16uA charge current\n writeRegister(MPR121_CONFIG2, 0x20); \/\/ 0.5uS encoding, 1ms period\n\n\/\/ writeRegister(MPR121_AUTOCONFIG0, 0x8F);\n\n\/\/ writeRegister(MPR121_UPLIMIT, 150);\n\/\/ writeRegister(MPR121_TARGETLIMIT, 100); \/\/ should be ~400 (100 shifted)\n\/\/ writeRegister(MPR121_LOWLIMIT, 50);\n \/\/ enable all electrodes\n writeRegister(MPR121_ECR, 0x8F); \/\/ start with first 5 bits of baseline tracking\n\n return true;\n}\n\nvoid Adafruit_MPR121::setThreshholds(uint8_t touch, uint8_t release) {\n\n setThresholds(touch, release);\n }\n\nvoid Adafruit_MPR121::setThresholds(uint8_t touch, uint8_t release) {\n for (uint8_t i=0; i<12; i++) {\n writeRegister(MPR121_TOUCHTH_0 + 2*i, touch);\n writeRegister(MPR121_RELEASETH_0 + 2*i, release);\n }\n}\n\nvoid Adafruit_MPR121::setThreshold(uint8_t t, uint8_t touch, uint8_t release) {\n if (t > 12) return;\n writeRegister(MPR121_TOUCHTH_0 + 2 * t, touch);\n writeRegister(MPR121_RELEASETH_0 + 2 * t, release);\n}\n\nuint16_t Adafruit_MPR121::filteredData(uint8_t t) {\n if (t > 12) return 0;\n return readRegister16(MPR121_FILTDATA_0L + t*2);\n}\n\nuint16_t Adafruit_MPR121::baselineData(uint8_t t) {\n if (t > 12) return 0;\n uint16_t bl = readRegister8(MPR121_BASELINE_0 + t);\n return (bl << 2);\n}\n\nuint16_t Adafruit_MPR121::touched(void) {\n uint16_t t = readRegister16(MPR121_TOUCHSTATUS_L);\n return t & 0x0FFF;\n}\n\n\/*********************************************************************\/\n\n\nuint8_t Adafruit_MPR121::readRegister8(uint8_t reg) {\n Wire.beginTransmission(_i2caddr);\n Wire.write(reg);\n Wire.endTransmission(false);\n while (Wire.requestFrom(_i2caddr, 1) != 1);\n return ( Wire.read());\n}\n\nuint16_t Adafruit_MPR121::readRegister16(uint8_t reg) {\n Wire.beginTransmission(_i2caddr);\n Wire.write(reg);\n Wire.endTransmission(false);\n while (Wire.requestFrom(_i2caddr, 2) != 2);\n uint16_t v = Wire.read();\n v |= ((uint16_t) Wire.read()) << 8;\n return v;\n}\n\n\/**************************************************************************\/\n\/*!\n @brief Writes 8-bits to the specified destination register\n*\/\n\/**************************************************************************\/\nvoid Adafruit_MPR121::writeRegister(uint8_t reg, uint8_t value) {\n Wire.beginTransmission(_i2caddr);\n Wire.write((uint8_t)reg);\n Wire.write((uint8_t)(value));\n Wire.endTransmission();\n}\n","lang_cluster":"C++","diff_tag":0,"review_comment":"","id":149} {"diff_hunk":"@@ -67,20 +67,20 @@ struct WaitListInfo\n \tWaitList priorityWaitList;\n \tWaitList waitList;\n \n-\tstd::pair findClient(const Player *player) {\n+\tstd::tuple findClient(const Player *player) {\n \t\tstd::size_t slot = 1;\n \t\tfor (auto it = priorityWaitList.begin(), end = priorityWaitList.end(); it != end; ++it, ++slot) {\n \t\t\tif (it->playerGUID == player->getGUID()) {\n-\t\t\t\treturn {it, slot};\n+\t\t\t\treturn std::make_tuple(std::ref(priorityWaitList), it, slot);\n \t\t\t}\n \t\t}\n \n \t\tfor (auto it = waitList.begin(), end = waitList.end(); it != end; ++it, ++slot) {\n \t\t\tif (it->playerGUID == player->getGUID()) {\n-\t\t\t\treturn {it, slot};\n+\t\t\t\treturn std::make_tuple(std::ref(waitList), it, slot);\n \t\t\t}\n \t\t}\n-\t\treturn {waitList.end(), slot};\n+\t\treturn std::make_tuple(std::ref(waitList), waitList.end(), slot);\n \t}\n };\n ","source_code":"\/**\n * The Forgotten Server - a free and open-source MMORPG server emulator\n * Copyright (C) 2019 Mark Samman \n *\n * This program is free software; you can redistribute it and\/or modify\n * it under the terms of the GNU General Public License as published by\n * the Free Software Foundation; either version 2 of the License, or\n * (at your option) any later version.\n *\n * This program is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n * GNU General Public License for more details.\n *\n * You should have received a copy of the GNU General Public License along\n * with this program; if not, write to the Free Software Foundation, Inc.,\n * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n *\/\n\n#include \"otpch.h\"\n\n#include \"configmanager.h\"\n#include \"game.h\"\n#include \"waitlist.h\"\n\nextern ConfigManager g_config;\nextern Game g_game;\n\n\nnamespace {\n\nstruct Wait\n{\n\tconstexpr Wait(std::size_t timeout, uint32_t playerGUID) :\n\t\t\ttimeout(timeout), playerGUID(playerGUID) {}\n\n\tstd::size_t timeout;\n\tuint32_t playerGUID;\n};\n\nusing WaitList = std::list;\n\nvoid cleanupList(WaitList& list)\n{\n\tint64_t time = OTSYS_TIME();\n\n\tauto it = list.begin(), end = list.end();\n\twhile (it != end) {\n\t\tif ((it->timeout - time) <= 0) {\n\t\t\tit = list.erase(it);\n\t\t} else {\n\t\t\t++it;\n\t\t}\n\t}\n}\n\nstd::size_t getTimeout(std::size_t slot)\n{\n\t\/\/timeout is set to 15 seconds longer than expected retry attempt\n\treturn WaitingList::getTime(slot) + 15;\n}\n\n} \/\/ namespace\n\nstruct WaitListInfo\n{\n\tWaitList priorityWaitList;\n\tWaitList waitList;\n\n\tstd::pair findClient(const Player *player) {\n\t\tstd::size_t slot = 1;\n\t\tfor (auto it = priorityWaitList.begin(), end = priorityWaitList.end(); it != end; ++it, ++slot) {\n\t\t\tif (it->playerGUID == player->getGUID()) {\n\t\t\t\treturn {it, slot};\n\t\t\t}\n\t\t}\n\n\t\tfor (auto it = waitList.begin(), end = waitList.end(); it != end; ++it, ++slot) {\n\t\t\tif (it->playerGUID == player->getGUID()) {\n\t\t\t\treturn {it, slot};\n\t\t\t}\n\t\t}\n\t\treturn {waitList.end(), slot};\n\t}\n};\n\nWaitingList& WaitingList::getInstance()\n{\n\tstatic WaitingList waitingList;\n\treturn waitingList;\n}\n\nstd::size_t WaitingList::getTime(std::size_t slot)\n{\n\tif (slot < 5) {\n\t\treturn 5;\n\t} else if (slot < 10) {\n\t\treturn 10;\n\t} else if (slot < 20) {\n\t\treturn 20;\n\t} else if (slot < 50) {\n\t\treturn 60;\n\t} else {\n\t\treturn 120;\n\t}\n}\n\nbool WaitingList::clientLogin(const Player* player)\n{\n\tif (player->hasFlag(PlayerFlag_CanAlwaysLogin) || player->getAccountType() >= ACCOUNT_TYPE_GAMEMASTER) {\n\t\treturn true;\n\t}\n\n\tauto maxPlayers = static_cast(g_config.getNumber(ConfigManager::MAX_PLAYERS));\n\tif (maxPlayers == 0 || (info->priorityWaitList.empty() && info->waitList.empty() && g_game.getPlayersOnline() < maxPlayers)) {\n\t\treturn true;\n\t}\n\n\tcleanupList(info->priorityWaitList);\n\tcleanupList(info->waitList);\n\n\tWaitList::iterator it;\n\tWaitList::size_type slot;\n\tstd::tie(it, slot) = info->findClient(player);\n\tif (it != info->waitList.end()) {\n\t\tif ((g_game.getPlayersOnline() + slot) <= maxPlayers) {\n\t\t\t\/\/should be able to login now\n\t\t\tinfo->waitList.erase(it);\n\t\t\treturn true;\n\t\t}\n\n\t\t\/\/let them wait a bit longer\n\t\tit->timeout = OTSYS_TIME() + (getTimeout(slot) * 1000);\n\t\treturn false;\n\t}\n\n\tslot = info->priorityWaitList.size();\n\tif (player->isPremium()) {\n\t\tinfo->priorityWaitList.emplace_back(OTSYS_TIME() + (getTimeout(slot + 1) * 1000), player->getGUID());\n\t} else {\n\t\tslot += info->waitList.size();\n\t\tinfo->waitList.emplace_back(OTSYS_TIME() + (getTimeout(slot + 1) * 1000), player->getGUID());\n\t}\n\treturn false;\n}\n\nstd::size_t WaitingList::getClientSlot(const Player* player)\n{\n\tWaitList::iterator it;\n\tWaitList::size_type slot;\n\tstd::tie(it, slot) = info->findClient(player);\n\tif (it == info->waitList.end()) {\n\t\treturn 0;\n\t}\n\treturn slot;\n}\n\nWaitingList::WaitingList() : info(new WaitListInfo) {}\n","lang_cluster":"C++","diff_tag":0,"review_comment":"","id":150} {"diff_hunk":"@@ -10,6 +10,7 @@ import (\n \t\"strconv\"\n \t\"strings\"\n \n+\t\"github.com\/opencontainers\/runc\/libcontainer\"\n \t\"github.com\/urfave\/cli\"\n )\n ","source_code":"\/\/ +build linux\n\npackage main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/urfave\/cli\"\n)\n\nvar psCommand = cli.Command{\n\tName: \"ps\",\n\tUsage: \"ps displays the processes running inside a container\",\n\tArgsUsage: ` [ps options]`,\n\tFlags: []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"format, f\",\n\t\t\tValue: \"table\",\n\t\t\tUsage: `select one of: ` + formatOptions,\n\t\t},\n\t},\n\tAction: func(context *cli.Context) error {\n\t\tif err := checkArgs(context, 1, minArgs); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcontainer, err := getContainer(context)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tpids, err := container.Processes()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tswitch context.String(\"format\") {\n\t\tcase \"table\":\n\t\tcase \"json\":\n\t\t\treturn json.NewEncoder(os.Stdout).Encode(pids)\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"invalid format option\")\n\t\t}\n\n\t\t\/\/ [1:] is to remove command name, ex:\n\t\t\/\/ context.Args(): [containet_id ps_arg1 ps_arg2 ...]\n\t\t\/\/ psArgs: [ps_arg1 ps_arg2 ...]\n\t\t\/\/\n\t\tpsArgs := context.Args()[1:]\n\t\tif len(psArgs) == 0 {\n\t\t\tpsArgs = []string{\"-ef\"}\n\t\t}\n\n\t\tcmd := exec.Command(\"ps\", psArgs...)\n\t\toutput, err := cmd.CombinedOutput()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"%s: %s\", err, output)\n\t\t}\n\n\t\tlines := strings.Split(string(output), \"\\n\")\n\t\tpidIndex, err := getPidIndex(lines[0])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfmt.Println(lines[0])\n\t\tfor _, line := range lines[1:] {\n\t\t\tif len(line) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfields := strings.Fields(line)\n\t\t\tp, err := strconv.Atoi(fields[pidIndex])\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"unexpected pid '%s': %s\", fields[pidIndex], err)\n\t\t\t}\n\n\t\t\tfor _, pid := range pids {\n\t\t\t\tif pid == p {\n\t\t\t\t\tfmt.Println(line)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t},\n\tSkipArgReorder: true,\n}\n\nfunc getPidIndex(title string) (int, error) {\n\ttitles := strings.Fields(title)\n\n\tpidIndex := -1\n\tfor i, name := range titles {\n\t\tif name == \"PID\" {\n\t\t\treturn i, nil\n\t\t}\n\t}\n\n\treturn pidIndex, fmt.Errorf(\"couldn't find PID field in ps output\")\n}\n","lang_cluster":"Go","diff_tag":0,"review_comment":"","id":151} {"diff_hunk":"@@ -23,6 +23,7 @@ package thrift\n import (\n \t\"bytes\"\n \t\"fmt\"\n+\t\"io\"\n \t\"math\/rand\"\n \t\"reflect\"\n \t\"testing\"","source_code":"\/\/ Copyright (c) 2021 Uber Technologies, Inc.\n\/\/\n\/\/ Permission is hereby granted, free of charge, to any person obtaining a copy\n\/\/ of this software and associated documentation files (the \"Software\"), to deal\n\/\/ in the Software without restriction, including without limitation the rights\n\/\/ to use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\n\/\/ copies of the Software, and to permit persons to whom the Software is\n\/\/ furnished to do so, subject to the following conditions:\n\/\/\n\/\/ The above copyright notice and this permission notice shall be included in\n\/\/ all copies or substantial portions of the Software.\n\/\/\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n\/\/ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n\/\/ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n\/\/ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n\/\/ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n\/\/ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n\/\/ THE SOFTWARE.\n\npackage thrift\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"reflect\"\n\t\"testing\"\n\t\"testing\/quick\"\n\t\"time\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"go.uber.org\/thriftrw\/protocol\/binary\"\n\t\"go.uber.org\/thriftrw\/wire\"\n)\n\nfunc TestDisableEnveloperEncode(t *testing.T) {\n\trand := rand.New(rand.NewSource(time.Now().Unix()))\n\n\ttests := []struct {\n\t\tvalue wire.Value\n\t\twant []byte\n\t}{\n\t\t{\n\t\t\twire.NewValueStruct(wire.Struct{Fields: []wire.Field{}}),\n\t\t\t[]byte{0x00},\n\t\t},\n\t\t{\n\t\t\twire.NewValueStruct(wire.Struct{Fields: []wire.Field{\n\t\t\t\t{ID: 1, Value: wire.NewValueI32(42)},\n\t\t\t}}),\n\t\t\t[]byte{\n\t\t\t\t0x08, 0x00, 0x01,\n\t\t\t\t0x00, 0x00, 0x00, 0x2a,\n\t\t\t\t0x00,\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\te := wire.Envelope{Value: tt.value, Type: wire.Call}\n\t\tgenerate(&e.Name, rand)\n\t\tgenerate(&e.SeqID, rand)\n\n\t\tvar buffer bytes.Buffer\n\t\tproto := disableEnvelopingProtocol{binary.Default, wire.Reply}\n\t\tif !assert.NoError(t, proto.EncodeEnveloped(e, &buffer)) {\n\t\t\tcontinue\n\t\t}\n\n\t\tassert.Equal(t, tt.want, buffer.Bytes())\n\n\t\tgotE, err := proto.DecodeEnveloped(bytes.NewReader(tt.want))\n\t\tif !assert.NoError(t, err) {\n\t\t\tcontinue\n\t\t}\n\n\t\tassert.Equal(t, wire.Reply, gotE.Type)\n\t\tassert.True(t, wire.ValuesAreEqual(tt.value, gotE.Value))\n\t}\n}\n\n\/\/ generate generates a random value into the given pointer.\n\/\/\n\/\/ \tvar i int\n\/\/ \tgenerate(&i, rand)\n\/\/\n\/\/ If the type implements the quick.Generator interface, that is used.\nfunc generate(v interface{}, r *rand.Rand) {\n\tt := reflect.TypeOf(v)\n\tif t.Kind() != reflect.Ptr {\n\t\tpanic(fmt.Sprintf(\"%v is not a pointer type\", t))\n\t}\n\n\tout, ok := quick.Value(t.Elem(), r)\n\tif !ok {\n\t\tpanic(fmt.Sprintf(\"could not generate a value for %v\", t))\n\t}\n\n\treflect.ValueOf(v).Elem().Set(out)\n}\n","lang_cluster":"Go","diff_tag":0,"review_comment":"","id":152} {"diff_hunk":"@@ -15,6 +15,14 @@ import (\n \/\/ Compile-time variable\n var existingServer = \"False\"\n \n+const lockFile = \"\/var\/lock\/k3s-test.lock\"\n+\n+type K3sServer struct {\n+\tcmd *exec.Cmd\n+\tscanner *bufio.Scanner\n+\tlock int\n+}\n+\n func findK3sExecutable() string {\n \t\/\/ if running on an existing cluster, it maybe installed via k3s.service\n \t\/\/ or run manually from dist\/artifacts\/k3s","source_code":"package util\n\nimport (\n\t\"bufio\"\n\t\"encoding\/json\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"os\/user\"\n\t\"strings\"\n\n\t\"github.com\/rancher\/k3s\/pkg\/flock\"\n\t\"github.com\/sirupsen\/logrus\"\n)\n\n\/\/ Compile-time variable\nvar existingServer = \"False\"\n\nfunc findK3sExecutable() string {\n\t\/\/ if running on an existing cluster, it maybe installed via k3s.service\n\t\/\/ or run manually from dist\/artifacts\/k3s\n\tif IsExistingServer() {\n\t\tk3sBin, err := exec.LookPath(\"k3s\")\n\t\tif err == nil {\n\t\t\treturn k3sBin\n\t\t}\n\t}\n\tk3sBin := \"dist\/artifacts\/k3s\"\n\tfor {\n\t\t_, err := os.Stat(k3sBin)\n\t\tif err != nil {\n\t\t\tk3sBin = \"..\/\" + k3sBin\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\treturn k3sBin\n}\n\n\/\/ IsRoot return true if the user is root (UID 0)\nfunc IsRoot() bool {\n\tcurrentUser, err := user.Current()\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn currentUser.Uid == \"0\"\n}\n\nfunc IsExistingServer() bool {\n\treturn existingServer == \"True\"\n}\n\n\/\/ K3sCmd launches the provided K3s command via exec. Command blocks until finished.\n\/\/ Command output from both Stderr and Stdout is provided via string.\n\/\/ cmdEx1, err := K3sCmd(\"etcd-snapshot\", \"ls\")\n\/\/ cmdEx2, err := K3sCmd(\"kubectl\", \"get\", \"pods\", \"-A\")\nfunc K3sCmd(cmdName string, cmdArgs ...string) (string, error) {\n\tk3sBin := findK3sExecutable()\n\t\/\/ Only run sudo if not root\n\tvar cmd *exec.Cmd\n\tif IsRoot() {\n\t\tk3sCmd := append([]string{cmdName}, cmdArgs...)\n\t\tcmd = exec.Command(k3sBin, k3sCmd...)\n\t} else {\n\t\tk3sCmd := append([]string{k3sBin, cmdName}, cmdArgs...)\n\t\tcmd = exec.Command(\"sudo\", k3sCmd...)\n\t}\n\tbyteOut, err := cmd.CombinedOutput()\n\treturn string(byteOut), err\n}\n\nfunc contains(source []string, target string) bool {\n\tfor _, s := range source {\n\t\tif s == target {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ ServerArgsPresent checks if the given arguments are found in the running k3s server\nfunc ServerArgsPresent(neededArgs []string) bool {\n\tcurrentArgs := K3sServerArgs()\n\tfor _, arg := range neededArgs {\n\t\tif !contains(currentArgs, arg) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/ K3sServerArgs returns the list of arguments that the k3s server launched with\nfunc K3sServerArgs() []string {\n\tresults, err := K3sCmd(\"kubectl\", \"get\", \"nodes\", \"-o\", `jsonpath='{.items[0].metadata.annotations.k3s\\.io\/node-args}'`)\n\tif err != nil {\n\t\treturn nil\n\t}\n\tres := strings.ReplaceAll(results, \"'\", \"\")\n\tvar args []string\n\tif err := json.Unmarshal([]byte(res), &args); err != nil {\n\t\tlogrus.Error(err)\n\t\treturn nil\n\t}\n\treturn args\n}\n\nfunc FindStringInCmdAsync(scanner *bufio.Scanner, target string) bool {\n\tfor scanner.Scan() {\n\t\tif strings.Contains(scanner.Text(), target) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\ntype K3sServer struct {\n\tcmd *exec.Cmd\n\tscanner *bufio.Scanner\n\tlock int\n}\n\n\/\/ K3sStartServer acquires an exclusive lock on a temporary file, then launches a k3s cluster\n\/\/ with the provided arguments. Subsequent\/parallel calls to this function will block until\n\/\/ the original lock is cleared using K3sKillServer\nfunc K3sStartServer(cmdArgs ...string) (*K3sServer, error) {\n\tlogrus.Info(\"waiting to get server lock\")\n\tk3sLock, err := flock.Acquire(\"\/var\/lock\/k3s-test.lock\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tk3sBin := findK3sExecutable()\n\tvar cmd *exec.Cmd\n\tif IsRoot() {\n\t\tk3sCmd := append([]string{\"server\"}, cmdArgs...)\n\t\tcmd = exec.Command(k3sBin, k3sCmd...)\n\t} else {\n\t\tk3sCmd := append([]string{k3sBin, \"server\"}, cmdArgs...)\n\t\tcmd = exec.Command(\"sudo\", k3sCmd...)\n\t}\n\tcmdOut, _ := cmd.StderrPipe()\n\tcmd.Stderr = os.Stderr\n\terr = cmd.Start()\n\treturn &K3sServer{cmd, bufio.NewScanner(cmdOut), k3sLock}, err\n}\n\n\/\/ K3sKillServer terminates the running K3s server and unlocks the file for\n\/\/ other tests\nfunc K3sKillServer(server *K3sServer) error {\n\tif IsRoot() {\n\t\tif err := server.cmd.Process.Kill(); err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\t\/\/ Since k3s was launched as sudo, we can't just kill the process\n\t\tkillCmd := exec.Command(\"sudo\", \"pkill\", \"k3s\")\n\t\tif err := killCmd.Run(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn flock.Release(server.lock)\n}\n","lang_cluster":"Go","diff_tag":0,"review_comment":"","id":153} {"diff_hunk":"@@ -72,7 +72,8 @@ func Example_withTLS() {\n \t\tlog.Fatalf(\"failed to create gRPC client TLS credentials: %v\", err)\n \t}\n \n-\texp, err := otlp.NewExporter(otlp.WithTLSCredentials(creds))\n+\texp, err := otlp.NewExporter(otlp.EmptyConfiguration,\n+\t\totlp.NewConnectionConfig(otlp.WithTLSCredentials(creds)))\n \tif err != nil {\n \t\tlog.Fatalf(\"failed to create the collector exporter: %v\", err)\n \t}","source_code":"\/\/ Copyright The OpenTelemetry Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage otlp_test\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log\"\n\t\"time\"\n\n\t\"google.golang.org\/grpc\/credentials\"\n\n\t\"go.opentelemetry.io\/otel\/api\/global\"\n\t\"go.opentelemetry.io\/otel\/exporters\/otlp\"\n\tsdktrace \"go.opentelemetry.io\/otel\/sdk\/trace\"\n)\n\nfunc Example_insecure() {\n\texp, err := otlp.NewExporter(otlp.WithInsecure())\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to create the collector exporter: %v\", err)\n\t}\n\tdefer func() {\n\t\tctx, cancel := context.WithTimeout(context.Background(), time.Second)\n\t\tdefer cancel()\n\t\tif err := exp.Shutdown(ctx); err != nil {\n\t\t\tglobal.Handle(err)\n\t\t}\n\t}()\n\n\ttp := sdktrace.NewProvider(\n\t\tsdktrace.WithConfig(sdktrace.Config{DefaultSampler: sdktrace.AlwaysSample()}),\n\t\tsdktrace.WithBatcher(\n\t\t\texp,\n\t\t\t\/\/ add following two options to ensure flush\n\t\t\tsdktrace.WithBatchTimeout(5),\n\t\t\tsdktrace.WithMaxExportBatchSize(10),\n\t\t),\n\t)\n\tglobal.SetTracerProvider(tp)\n\n\ttracer := global.Tracer(\"test-tracer\")\n\n\t\/\/ Then use the OpenTelemetry tracing library, like we normally would.\n\tctx, span := tracer.Start(context.Background(), \"CollectorExporter-Example\")\n\tdefer span.End()\n\n\tfor i := 0; i < 10; i++ {\n\t\t_, iSpan := tracer.Start(ctx, fmt.Sprintf(\"Sample-%d\", i))\n\t\t<-time.After(6 * time.Millisecond)\n\t\tiSpan.End()\n\t}\n}\n\nfunc Example_withTLS() {\n\t\/\/ Please take at look at https:\/\/pkg.go.dev\/google.golang.org\/grpc\/credentials#TransportCredentials\n\t\/\/ for ways on how to initialize gRPC TransportCredentials.\n\tcreds, err := credentials.NewClientTLSFromFile(\"my-cert.pem\", \"\")\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create gRPC client TLS credentials: %v\", err)\n\t}\n\n\texp, err := otlp.NewExporter(otlp.WithTLSCredentials(creds))\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create the collector exporter: %v\", err)\n\t}\n\tdefer func() {\n\t\tctx, cancel := context.WithTimeout(context.Background(), time.Second)\n\t\tdefer cancel()\n\t\tif err := exp.Shutdown(ctx); err != nil {\n\t\t\tglobal.Handle(err)\n\t\t}\n\t}()\n\n\ttp := sdktrace.NewProvider(\n\t\tsdktrace.WithConfig(sdktrace.Config{DefaultSampler: sdktrace.AlwaysSample()}),\n\t\tsdktrace.WithBatcher(\n\t\t\texp,\n\t\t\t\/\/ add following two options to ensure flush\n\t\t\tsdktrace.WithBatchTimeout(5),\n\t\t\tsdktrace.WithMaxExportBatchSize(10),\n\t\t),\n\t)\n\tglobal.SetTracerProvider(tp)\n\n\ttracer := global.Tracer(\"test-tracer\")\n\n\t\/\/ Then use the OpenTelemetry tracing library, like we normally would.\n\tctx, span := tracer.Start(context.Background(), \"Securely-Talking-To-Collector-Span\")\n\tdefer span.End()\n\n\tfor i := 0; i < 10; i++ {\n\t\t_, iSpan := tracer.Start(ctx, fmt.Sprintf(\"Sample-%d\", i))\n\t\t<-time.After(6 * time.Millisecond)\n\t\tiSpan.End()\n\t}\n}\n","lang_cluster":"Go","diff_tag":0,"review_comment":"","id":154} {"diff_hunk":"@@ -84,20 +84,20 @@ func TestChunkUploadDownload(t *testing.T) {\n \t\t)\n \n \t\t\/\/ try to fetch the same chunk\n-\t\tresp := request(t, client, http.MethodGet, resource(validHash), nil, http.StatusOK)\n+\t\tresp := request(t, client, http.MethodGet, resource(chunk.Address()), nil, http.StatusOK)\n \t\tdata, err := ioutil.ReadAll(resp.Body)\n \t\tif err != nil {\n \t\t\tt.Fatal(err)\n \t\t}\n \n-\t\tif !bytes.Equal(validContent, data) {\n+\t\tif !bytes.Equal(chunk.Data(), data) {\n \t\t\tt.Fatal(\"data retrieved doesnt match uploaded content\")\n \t\t}\n \t})\n \n \tt.Run(\"pin-invalid-value\", func(t *testing.T) {\n-\t\tjsonhttptest.Request(t, client, http.MethodPost, resource(validHash), http.StatusOK,\n-\t\t\tjsonhttptest.WithRequestBody(bytes.NewReader(validContent)),\n+\t\tjsonhttptest.Request(t, client, http.MethodPost, resource(chunk.Address()), http.StatusOK,\n+\t\t\tjsonhttptest.WithRequestBody(bytes.NewReader(chunk.Data())),\n \t\t\tjsonhttptest.WithExpectedJSONResponse(jsonhttp.StatusResponse{\n \t\t\t\tMessage: http.StatusText(http.StatusOK),\n \t\t\t\tCode: http.StatusOK,","source_code":"\/\/ Copyright 2020 The Swarm Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage api_test\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"testing\"\n\n\t\"github.com\/ethersphere\/bee\/pkg\/logging\"\n\tstatestore \"github.com\/ethersphere\/bee\/pkg\/statestore\/mock\"\n\n\t\"github.com\/ethersphere\/bee\/pkg\/tags\"\n\n\t\"github.com\/ethersphere\/bee\/pkg\/api\"\n\t\"github.com\/ethersphere\/bee\/pkg\/jsonhttp\"\n\t\"github.com\/ethersphere\/bee\/pkg\/jsonhttp\/jsonhttptest\"\n\t\"github.com\/ethersphere\/bee\/pkg\/storage\"\n\t\"github.com\/ethersphere\/bee\/pkg\/storage\/mock\"\n\t\"github.com\/ethersphere\/bee\/pkg\/storage\/mock\/validator\"\n\t\"github.com\/ethersphere\/bee\/pkg\/swarm\"\n)\n\n\/\/ TestChunkUploadDownload uploads a chunk to an API that verifies the chunk according\n\/\/ to a given validator, then tries to download the uploaded data.\nfunc TestChunkUploadDownload(t *testing.T) {\n\n\tvar (\n\t\ttargets = \"0x222\"\n\t\tresource = func(addr swarm.Address) string { return \"\/chunks\/\" + addr.String() }\n\t\tresourceTargets = func(addr swarm.Address) string { return \"\/chunks\/\" + addr.String() + \"?targets=\" + targets }\n\t\tvalidHash = swarm.MustParseHexAddress(\"aabbcc\")\n\t\tinvalidHash = swarm.MustParseHexAddress(\"bbccdd\")\n\t\tvalidContent = []byte(\"bbaatt\")\n\t\tinvalidContent = []byte(\"bbaattss\")\n\t\tmockValidator = validator.NewMockValidator(validHash, validContent)\n\t\tmockStatestore = statestore.NewStateStore()\n\t\tlogger = logging.New(ioutil.Discard, 0)\n\t\ttag = tags.NewTags(mockStatestore, logger)\n\t\tmockValidatingStorer = mock.NewStorer(mock.WithValidator(mockValidator))\n\t\tclient, _, _ = newTestServer(t, testServerOptions{\n\t\t\tStorer: mockValidatingStorer,\n\t\t\tTags: tag,\n\t\t})\n\t)\n\n\tt.Run(\"invalid hash\", func(t *testing.T) {\n\t\tjsonhttptest.Request(t, client, http.MethodPost, resource(invalidHash), http.StatusBadRequest,\n\t\t\tjsonhttptest.WithRequestBody(bytes.NewReader(validContent)),\n\t\t\tjsonhttptest.WithExpectedJSONResponse(jsonhttp.StatusResponse{\n\t\t\t\tMessage: \"chunk write error\",\n\t\t\t\tCode: http.StatusBadRequest,\n\t\t\t}),\n\t\t)\n\n\t\t\/\/ make sure chunk is not retrievable\n\t\t_ = request(t, client, http.MethodGet, resource(invalidHash), nil, http.StatusNotFound)\n\t})\n\n\tt.Run(\"invalid content\", func(t *testing.T) {\n\t\tjsonhttptest.Request(t, client, http.MethodPost, resource(invalidHash), http.StatusBadRequest,\n\t\t\tjsonhttptest.WithRequestBody(bytes.NewReader(invalidContent)),\n\t\t\tjsonhttptest.WithExpectedJSONResponse(jsonhttp.StatusResponse{\n\t\t\t\tMessage: \"chunk write error\",\n\t\t\t\tCode: http.StatusBadRequest,\n\t\t\t}),\n\t\t)\n\n\t\t\/\/ make sure not retrievable\n\t\t_ = request(t, client, http.MethodGet, resource(validHash), nil, http.StatusNotFound)\n\t})\n\n\tt.Run(\"ok\", func(t *testing.T) {\n\t\tjsonhttptest.Request(t, client, http.MethodPost, resource(validHash), http.StatusOK,\n\t\t\tjsonhttptest.WithRequestBody(bytes.NewReader(validContent)),\n\t\t\tjsonhttptest.WithExpectedJSONResponse(jsonhttp.StatusResponse{\n\t\t\t\tMessage: http.StatusText(http.StatusOK),\n\t\t\t\tCode: http.StatusOK,\n\t\t\t}),\n\t\t)\n\n\t\t\/\/ try to fetch the same chunk\n\t\tresp := request(t, client, http.MethodGet, resource(validHash), nil, http.StatusOK)\n\t\tdata, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tif !bytes.Equal(validContent, data) {\n\t\t\tt.Fatal(\"data retrieved doesnt match uploaded content\")\n\t\t}\n\t})\n\n\tt.Run(\"pin-invalid-value\", func(t *testing.T) {\n\t\tjsonhttptest.Request(t, client, http.MethodPost, resource(validHash), http.StatusOK,\n\t\t\tjsonhttptest.WithRequestBody(bytes.NewReader(validContent)),\n\t\t\tjsonhttptest.WithExpectedJSONResponse(jsonhttp.StatusResponse{\n\t\t\t\tMessage: http.StatusText(http.StatusOK),\n\t\t\t\tCode: http.StatusOK,\n\t\t\t}),\n\t\t\tjsonhttptest.WithRequestHeader(api.SwarmPinHeader, \"invalid-pin\"),\n\t\t)\n\n\t\t\/\/ Also check if the chunk is NOT pinned\n\t\tif mockValidatingStorer.GetModeSet(validHash) == storage.ModeSetPin {\n\t\t\tt.Fatal(\"chunk should not be pinned\")\n\t\t}\n\t})\n\tt.Run(\"pin-header-missing\", func(t *testing.T) {\n\t\tjsonhttptest.Request(t, client, http.MethodPost, resource(validHash), http.StatusOK,\n\t\t\tjsonhttptest.WithRequestBody(bytes.NewReader(validContent)),\n\t\t\tjsonhttptest.WithExpectedJSONResponse(jsonhttp.StatusResponse{\n\t\t\t\tMessage: http.StatusText(http.StatusOK),\n\t\t\t\tCode: http.StatusOK,\n\t\t\t}),\n\t\t)\n\n\t\t\/\/ Also check if the chunk is NOT pinned\n\t\tif mockValidatingStorer.GetModeSet(validHash) == storage.ModeSetPin {\n\t\t\tt.Fatal(\"chunk should not be pinned\")\n\t\t}\n\t})\n\tt.Run(\"pin-ok\", func(t *testing.T) {\n\t\tjsonhttptest.Request(t, client, http.MethodPost, resource(validHash), http.StatusOK,\n\t\t\tjsonhttptest.WithRequestBody(bytes.NewReader(validContent)),\n\t\t\tjsonhttptest.WithExpectedJSONResponse(jsonhttp.StatusResponse{\n\t\t\t\tMessage: http.StatusText(http.StatusOK),\n\t\t\t\tCode: http.StatusOK,\n\t\t\t}),\n\t\t\tjsonhttptest.WithRequestHeader(api.SwarmPinHeader, \"True\"),\n\t\t)\n\n\t\t\/\/ Also check if the chunk is pinned\n\t\tif mockValidatingStorer.GetModePut(validHash) != storage.ModePutUploadPin {\n\t\t\tt.Fatal(\"chunk is not pinned\")\n\t\t}\n\n\t})\n\tt.Run(\"retrieve-targets\", func(t *testing.T) {\n\t\tresp := request(t, client, http.MethodGet, resourceTargets(validHash), nil, http.StatusOK)\n\n\t\t\/\/ Check if the target is obtained correctly\n\t\tif resp.Header.Get(api.TargetsRecoveryHeader) != targets {\n\t\t\tt.Fatalf(\"targets mismatch. got %s, want %s\", resp.Header.Get(api.TargetsRecoveryHeader), targets)\n\t\t}\n\t})\n}\n\nfunc request(t *testing.T, client *http.Client, method, resource string, body io.Reader, responseCode int) *http.Response {\n\tt.Helper()\n\n\treq, err := http.NewRequest(method, resource, body)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif resp.StatusCode != responseCode {\n\t\tt.Fatalf(\"got response status %s, want %v %s\", resp.Status, responseCode, http.StatusText(responseCode))\n\t}\n\treturn resp\n}\n","lang_cluster":"Go","diff_tag":0,"review_comment":"","id":155} {"diff_hunk":"@@ -58,6 +58,13 @@ func Register(r *gin.RouterGroup, s *Service) {\n \tendpoint.GET(\"\", s.listExperiments)\n \tendpoint.GET(\"\/detail\/search\", s.experimentDetailSearch)\n \tendpoint.GET(\"\/detail\", s.experimentDetail)\n+\tendpoint.GET(\"\/report\", s.experimentReport)\n+}\n+\n+\/\/ ArchiveExperimentDetail represents an experiment instance.\n+type ArchiveExperimentDetail struct {\n+\tcore.ArchiveExperimentMeta\n+\tExperimentInfo core.ExperimentInfo `json:\"experiment_info\"`\n }\n \n \/\/ @Summary Get archived chaos experiments.","source_code":"\/\/ Copyright 2020 Chaos Mesh Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage archive\n\nimport (\n\t\"context\"\n\t\"net\/http\"\n\n\t\"github.com\/gin-gonic\/gin\"\n\t\"github.com\/jinzhu\/gorm\"\n\n\t\"github.com\/chaos-mesh\/chaos-mesh\/pkg\/apiserver\/utils\"\n\t\"github.com\/chaos-mesh\/chaos-mesh\/pkg\/config\"\n\t\"github.com\/chaos-mesh\/chaos-mesh\/pkg\/core\"\n\n\t\"sigs.k8s.io\/controller-runtime\/pkg\/client\"\n)\n\n\/\/ Service defines a handler service for archive experiments.\ntype Service struct {\n\tconf *config.ChaosDashboardConfig\n\tkubeCli client.Client\n\tarchive core.ExperimentStore\n\tevent core.EventStore\n}\n\n\/\/ NewService returns an archive experiment service instance.\nfunc NewService(\n\tconf *config.ChaosDashboardConfig,\n\tcli client.Client,\n\tarchive core.ExperimentStore,\n\tevent core.EventStore,\n) *Service {\n\treturn &Service{\n\t\tconf: conf,\n\t\tkubeCli: cli,\n\t\tarchive: archive,\n\t\tevent: event,\n\t}\n}\n\n\/\/ Register mounts our HTTP handler on the mux.\nfunc Register(r *gin.RouterGroup, s *Service) {\n\tendpoint := r.Group(\"\/archives\")\n\n\t\/\/ TODO: add more api handlers\n\tendpoint.GET(\"\", s.listExperiments)\n\tendpoint.GET(\"\/detail\/search\", s.experimentDetailSearch)\n\tendpoint.GET(\"\/detail\", s.experimentDetail)\n}\n\n\/\/ @Summary Get archived chaos experiments.\n\/\/ @Description Get archived chaos experiments.\n\/\/ @Tags archives\n\/\/ @Produce json\n\/\/ @Param namespace query string false \"namespace\"\n\/\/ @Param name query string false \"name\"\n\/\/ @Param kind query string false \"kind\" Enums(PodChaos, IoChaos, NetworkChaos, TimeChaos, KernelChaos, StressChaos)\n\/\/ @Success 200 {array} core.ArchiveExperimentMeta\n\/\/ @Router \/api\/archives [get]\n\/\/ @Failure 500 {object} utils.APIError\nfunc (s *Service) listExperiments(c *gin.Context) {\n\tkind := c.Query(\"kind\")\n\tname := c.Query(\"name\")\n\tns := c.Query(\"namespace\")\n\n\tdata, err := s.archive.ListMeta(context.TODO(), kind, ns, name)\n\tif err != nil {\n\t\tc.Status(http.StatusInternalServerError)\n\t\t_ = c.Error(utils.ErrInternalServer.NewWithNoMessage())\n\t\treturn\n\t}\n\n\tc.JSON(http.StatusOK, data)\n}\n\n\/\/ @Summary Get the details of chaos experiment.\n\/\/ @Description Get the details of chaos experiment.\n\/\/ @Tags archives\n\/\/ @Produce json\n\/\/ @Param namespace query string false \"namespace\"\n\/\/ @Param name query string false \"name\"\n\/\/ @Param kind query string false \"kind\" Enums(PodChaos, IoChaos, NetworkChaos, TimeChaos, KernelChaos, StressChaos)\n\/\/ @Param uid query string false \"uid\"\n\/\/ @Success 200 {array} core.ArchiveExperiment\n\/\/ @Router \/api\/archives\/detail\/search [get]\n\/\/ @Failure 500 {object} utils.APIError\nfunc (s *Service) experimentDetailSearch(c *gin.Context) {\n\tkind := c.Query(\"kind\")\n\tname := c.Query(\"name\")\n\tns := c.Query(\"namespace\")\n\tuid := c.Query(\"uid\")\n\n\tdata, err := s.archive.DetailList(context.TODO(), kind, ns, name, uid)\n\tif err != nil {\n\t\tif !gorm.IsRecordNotFoundError(err) {\n\t\t\tc.Status(http.StatusInternalServerError)\n\t\t\t_ = c.Error(utils.ErrInternalServer.NewWithNoMessage())\n\t\t} else {\n\t\t\tc.Status(http.StatusInternalServerError)\n\t\t\t_ = c.Error(utils.ErrInvalidRequest.New(\"the archive is not found\"))\n\t\t}\n\t\treturn\n\t}\n\n\tc.JSON(http.StatusOK, data)\n}\n\n\/\/ @Summary Get the details of chaos experiment.\n\/\/ @Description Get the details of chaos experiment.\n\/\/ @Tags archives\n\/\/ @Produce json\n\/\/ @Param uid query string true \"uid\"\n\/\/ @Success 200 {array} core.ArchiveExperiment\n\/\/ @Router \/api\/archives\/detail [get]\n\/\/ @Failure 500 {object} utils.APIError\nfunc (s *Service) experimentDetail(c *gin.Context) {\n\n\tuid := c.Query(\"uid\")\n\n\tif uid == \"\" {\n\t\tc.Status(http.StatusBadRequest)\n\t\t_ = c.Error(utils.ErrInvalidRequest.New(\"uid cannot be empty\"))\n\t\treturn\n\t}\n\n\tdata, err := s.archive.FindByUID(context.TODO(), uid)\n\tif err != nil {\n\t\tif !gorm.IsRecordNotFoundError(err) {\n\t\t\tc.Status(http.StatusInternalServerError)\n\t\t\t_ = c.Error(utils.ErrInternalServer.NewWithNoMessage())\n\t\t} else {\n\t\t\tc.Status(http.StatusInternalServerError)\n\t\t\t_ = c.Error(utils.ErrInvalidRequest.New(\"the archive is not found\"))\n\t\t}\n\t\treturn\n\t}\n\n\tc.JSON(http.StatusOK, data)\n}\n","lang_cluster":"Go","diff_tag":0,"review_comment":"","id":156} {"diff_hunk":"@@ -60,7 +60,7 @@ func DefaultIpcPath() (string, error) {\n \n \/\/ NewSigner creates a new connection to the signer at endpoint\n \/\/ As clef does not expose public keys it signs a test message to recover the public key\n-func NewSigner(clef ExternalSignerInterface, recoverFunc crypto.RecoverFunc) (signer crypto.Signer, err error) {\n+func NewSigner(clef ExternalSignerInterface, client RpcClient, recoverFunc crypto.RecoverFunc) (signer crypto.Signer, err error) {\n \t\/\/ get the list of available ethereum accounts\n \tclefAccounts := clef.Accounts()\n \tif len(clefAccounts) == 0 {","source_code":"\/\/ Copyright 2020 The Swarm Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage clef\n\nimport (\n\t\"crypto\/ecdsa\"\n\t\"errors\"\n\t\"math\/big\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\n\t\"github.com\/ethereum\/go-ethereum\/accounts\"\n\t\"github.com\/ethereum\/go-ethereum\/common\"\n\t\"github.com\/ethereum\/go-ethereum\/core\/types\"\n\t\"github.com\/ethersphere\/bee\/pkg\/crypto\"\n)\n\nvar (\n\tErrNoAccounts = errors.New(\"no accounts found in clef\")\n\tclefRecoveryMessage = []byte(\"public key recovery message\")\n)\n\n\/\/ ExternalSignerInterface is the interface for the clef client from go-ethereum\ntype ExternalSignerInterface interface {\n\tSignData(account accounts.Account, mimeType string, data []byte) ([]byte, error)\n\tSignTx(account accounts.Account, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error)\n\tAccounts() []accounts.Account\n}\n\ntype clefSigner struct {\n\tclef ExternalSignerInterface\n\taccount accounts.Account \/\/ the account this signer will use\n\tpubKey *ecdsa.PublicKey \/\/ the public key for the account\n}\n\n\/\/ DefaultIpcPath returns the os-dependent default ipc path for clef\nfunc DefaultIpcPath() (string, error) {\n\tsocket := \"clef.ipc\"\n\t\/\/ on windows clef uses top level pipes\n\tif runtime.GOOS == \"windows\" {\n\t\treturn `\\\\.\\pipe\\` + socket, nil\n\t}\n\n\thome, err := os.UserHomeDir()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t\/\/ on mac os clef defaults to ~\/Library\/Signer\/clef.ipc\n\tif runtime.GOOS == \"darwin\" {\n\t\treturn filepath.Join(home, \"Library\", \"Signer\", socket), nil\n\t}\n\n\t\/\/ on unix clef defaults to ~\/.clef\/clef.ipc\n\treturn filepath.Join(home, \".clef\", socket), nil\n}\n\n\/\/ NewSigner creates a new connection to the signer at endpoint\n\/\/ As clef does not expose public keys it signs a test message to recover the public key\nfunc NewSigner(clef ExternalSignerInterface, recoverFunc crypto.RecoverFunc) (signer crypto.Signer, err error) {\n\t\/\/ get the list of available ethereum accounts\n\tclefAccounts := clef.Accounts()\n\tif len(clefAccounts) == 0 {\n\t\treturn nil, ErrNoAccounts\n\t}\n\n\t\/\/ pick the first account as the one we use\n\taccount := clefAccounts[0]\n\n\t\/\/ clef currently does not expose the public key\n\t\/\/ sign some data so we can recover it\n\tsig, err := clef.SignData(account, accounts.MimetypeTextPlain, clefRecoveryMessage)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpubKey, err := recoverFunc(sig, clefRecoveryMessage)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &clefSigner{\n\t\tclef: clef,\n\t\taccount: account,\n\t\tpubKey: pubKey,\n\t}, nil\n}\n\n\/\/ PublicKey returns the public key recovered during creation\nfunc (c *clefSigner) PublicKey() (*ecdsa.PublicKey, error) {\n\treturn c.pubKey, nil\n}\n\n\/\/ SignData signs with the text\/plain type which is the standard Ethereum prefix method\nfunc (c *clefSigner) Sign(data []byte) ([]byte, error) {\n\treturn c.clef.SignData(c.account, accounts.MimetypeTextPlain, data)\n}\n\n\/\/ SignTx signs an ethereum transaction\nfunc (c *clefSigner) SignTx(transaction *types.Transaction) (*types.Transaction, error) {\n\t\/\/ chainId is nil here because it is set on the clef side\n\treturn c.clef.SignTx(c.account, transaction, nil)\n}\n\n\/\/ EthereumAddress returns the ethereum address this signer uses\nfunc (c *clefSigner) EthereumAddress() (common.Address, error) {\n\treturn c.account.Address, nil\n}\n","lang_cluster":"Go","diff_tag":0,"review_comment":"","id":157} {"diff_hunk":"@@ -19,6 +19,7 @@ package validation\n import (\n \t\"fmt\"\n \t\"net\"\n+\t\"net\/mail\"\n \n \t\"k8s.io\/apimachinery\/pkg\/runtime\"\n \t\"k8s.io\/apimachinery\/pkg\/util\/validation\/field\"","source_code":"\/*\nCopyright 2019 The Jetstack cert-manager contributors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage validation\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/validation\/field\"\n\n\t\"github.com\/jetstack\/cert-manager\/pkg\/api\/util\"\n\tcmapiv1alpha2 \"github.com\/jetstack\/cert-manager\/pkg\/apis\/certmanager\/v1alpha2\"\n\tcmapi \"github.com\/jetstack\/cert-manager\/pkg\/internal\/apis\/certmanager\"\n\tcmmeta \"github.com\/jetstack\/cert-manager\/pkg\/internal\/apis\/meta\"\n)\n\n\/\/ Validation functions for cert-manager Certificate types\n\nfunc ValidateCertificateSpec(crt *cmapi.CertificateSpec, fldPath *field.Path) field.ErrorList {\n\tel := field.ErrorList{}\n\tif crt.SecretName == \"\" {\n\t\tel = append(el, field.Required(fldPath.Child(\"secretName\"), \"must be specified\"))\n\t}\n\n\tel = append(el, validateIssuerRef(crt.IssuerRef, fldPath)...)\n\n\tif len(crt.CommonName) == 0 && len(crt.DNSNames) == 0 && len(crt.URISANs) == 0 {\n\t\tel = append(el, field.Required(fldPath.Child(\"commonName\", \"dnsNames\", \"uriSANs\"),\n\t\t\t\"at least one of commonName, dnsNames, or uriSANs must be set\"))\n\t}\n\n\t\/\/ if a common name has been specified, ensure it is no longer than 64 chars\n\tif len(crt.CommonName) > 64 {\n\t\tel = append(el, field.TooLong(fldPath.Child(\"commonName\"), crt.CommonName, 64))\n\t}\n\n\tif len(crt.IPAddresses) > 0 {\n\t\tel = append(el, validateIPAddresses(crt, fldPath)...)\n\t}\n\tswitch crt.KeyAlgorithm {\n\tcase cmapi.KeyAlgorithm(\"\"):\n\tcase cmapi.RSAKeyAlgorithm:\n\t\tif crt.KeySize > 0 && (crt.KeySize < 2048 || crt.KeySize > 8192) {\n\t\t\tel = append(el, field.Invalid(fldPath.Child(\"keySize\"), crt.KeySize, \"must be between 2048 & 8192 for rsa keyAlgorithm\"))\n\t\t}\n\tcase cmapi.ECDSAKeyAlgorithm:\n\t\tif crt.KeySize > 0 && crt.KeySize != 256 && crt.KeySize != 384 && crt.KeySize != 521 {\n\t\t\tel = append(el, field.NotSupported(fldPath.Child(\"keySize\"), crt.KeySize, []string{\"256\", \"384\", \"521\"}))\n\t\t}\n\tdefault:\n\t\tel = append(el, field.Invalid(fldPath.Child(\"keyAlgorithm\"), crt.KeyAlgorithm, \"must be either empty or one of rsa or ecdsa\"))\n\t}\n\n\tif crt.Duration != nil || crt.RenewBefore != nil {\n\t\tel = append(el, ValidateDuration(crt, fldPath)...)\n\t}\n\tif len(crt.Usages) > 0 {\n\t\tel = append(el, validateUsages(crt, fldPath)...)\n\t}\n\treturn el\n}\n\nfunc ValidateCertificate(obj runtime.Object) field.ErrorList {\n\tcrt := obj.(*cmapi.Certificate)\n\tallErrs := ValidateCertificateSpec(&crt.Spec, field.NewPath(\"spec\"))\n\treturn allErrs\n}\n\nfunc validateIssuerRef(issuerRef cmmeta.ObjectReference, fldPath *field.Path) field.ErrorList {\n\tel := field.ErrorList{}\n\n\tissuerRefPath := fldPath.Child(\"issuerRef\")\n\tif issuerRef.Name == \"\" {\n\t\tel = append(el, field.Required(issuerRefPath.Child(\"name\"), \"must be specified\"))\n\t}\n\tif issuerRef.Group == \"\" || issuerRef.Group == cmapi.SchemeGroupVersion.Group {\n\t\tswitch issuerRef.Kind {\n\t\tcase \"\":\n\t\tcase \"Issuer\", \"ClusterIssuer\":\n\t\tdefault:\n\t\t\tel = append(el, field.Invalid(issuerRefPath.Child(\"kind\"), issuerRef.Kind, \"must be one of Issuer or ClusterIssuer\"))\n\t\t}\n\t}\n\treturn el\n}\n\nfunc validateIPAddresses(a *cmapi.CertificateSpec, fldPath *field.Path) field.ErrorList {\n\tif len(a.IPAddresses) <= 0 {\n\t\treturn nil\n\t}\n\tel := field.ErrorList{}\n\tfor i, d := range a.IPAddresses {\n\t\tip := net.ParseIP(d)\n\t\tif ip == nil {\n\t\t\tel = append(el, field.Invalid(fldPath.Child(\"ipAddresses\").Index(i), d, \"invalid IP address\"))\n\t\t}\n\t}\n\treturn el\n}\n\nfunc validateUsages(a *cmapi.CertificateSpec, fldPath *field.Path) field.ErrorList {\n\tel := field.ErrorList{}\n\tfor i, u := range a.Usages {\n\t\t_, kok := util.KeyUsageType(cmapiv1alpha2.KeyUsage(u))\n\t\t_, ekok := util.ExtKeyUsageType(cmapiv1alpha2.KeyUsage(u))\n\t\tif !kok && !ekok {\n\t\t\tel = append(el, field.Invalid(fldPath.Child(\"usages\").Index(i), u, \"unknown keyusage\"))\n\t\t}\n\t}\n\treturn el\n}\n\nfunc ValidateDuration(crt *cmapi.CertificateSpec, fldPath *field.Path) field.ErrorList {\n\tel := field.ErrorList{}\n\n\tduration := util.DefaultCertDuration(crt.Duration)\n\trenewBefore := cmapiv1alpha2.DefaultRenewBefore\n\tif crt.RenewBefore != nil {\n\t\trenewBefore = crt.RenewBefore.Duration\n\t}\n\tif duration < cmapiv1alpha2.MinimumCertificateDuration {\n\t\tel = append(el, field.Invalid(fldPath.Child(\"duration\"), duration, fmt.Sprintf(\"certificate duration must be greater than %s\", cmapiv1alpha2.MinimumCertificateDuration)))\n\t}\n\tif renewBefore < cmapiv1alpha2.MinimumRenewBefore {\n\t\tel = append(el, field.Invalid(fldPath.Child(\"renewBefore\"), renewBefore, fmt.Sprintf(\"certificate renewBefore must be greater than %s\", cmapiv1alpha2.MinimumRenewBefore)))\n\t}\n\tif duration <= renewBefore {\n\t\tel = append(el, field.Invalid(fldPath.Child(\"renewBefore\"), renewBefore, fmt.Sprintf(\"certificate duration %s must be greater than renewBefore %s\", duration, renewBefore)))\n\t}\n\treturn el\n}\n","lang_cluster":"Go","diff_tag":0,"review_comment":"","id":158} {"diff_hunk":"@@ -95,37 +95,9 @@ func buildAndCreateCVR() {\n \t\tTargetIP: cvrObj.Spec.TargetIP,\n \t\tPhase: \"Recreate\",\n \t\tCapacity: cvrObj.Spec.Capacity,\n+\t\tReplicaID: replicaID,\n \t}\n \tops.Config = cvrConfig\n \tnewCVRObj = ops.BuildAndCreateCVR()\n-\n-\tcvrName := pvcObj.Spec.VolumeName + \"-\" + cspObj.Name\n-\thashUID, err := hash.Hash(newCVRObj.UID)\n-\tExpect(err).To(BeNil())\n-\tReplicaID = strings.ToUpper(hashUID)\n-\tfor i := 0; i < retryUpdate; i++ {\n-\t\tnewCVRObj.Spec.ReplicaID = ReplicaID\n-\t\tnewCVRObj, err = ops.CVRClient.\n-\t\t\tWithNamespace(openebsNamespace).\n-\t\t\tUpdate(newCVRObj)\n-\t\tif err == nil {\n-\t\t\tbreak\n-\t\t}\n-\t\ttime.Sleep(time.Second * 5)\n-\t\tnewCVRObj, getErr = ops.CVRClient.Get(cvrName, metav1.GetOptions{})\n-\t\tExpect(getErr).To(BeNil())\n-\t}\n-\tExpect(err).To(BeNil())\n-\t\/\/TODO: Need to fix bug in cvr during creation time\n-\tpodLabel := cspLabel + cspObj.Name\n-\tpodObjList, err := ops.PodClient.\n-\t\tWithNamespace(openebsNamespace).\n-\t\tList(metav1.ListOptions{LabelSelector: podLabel})\n-\tExpect(err).To(BeNil())\n-\terr = ops.PodClient.Delete(podObjList.Items[0].Name, &metav1.DeleteOptions{})\n-\tExpect(err).To(BeNil())\n-\tisPodDeleted := ops.IsPodDeletedEventually(\n-\t\tpodObjList.Items[0].Namespace,\n-\t\tpodObjList.Items[0].Name)\n-\tExpect(isPodDeleted).To(Equal(true))\n+\tReplicaID = replicaID\n }","source_code":"\/*\nCopyright 2019 The OpenEBS Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage replicascaleup\n\nimport (\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t. \"github.com\/onsi\/gomega\"\n\tapis \"github.com\/openebs\/maya\/pkg\/apis\/openebs.io\/v1alpha1\"\n\thash \"github.com\/openebs\/maya\/pkg\/hash\"\n\t\"github.com\/openebs\/maya\/tests\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\n\t\/\/ auth plugins\n\t_ \"k8s.io\/client-go\/plugin\/pkg\/client\/auth\/gcp\"\n)\n\nfunc verifyVolumeConfigurationEventually() {\n\tvar err error\n\tconsistencyFactor := (ReplicaCount \/ 2) + 1\n\tfor i := 0; i < MaxRetry; i++ {\n\t\tcvObj, err = ops.CVClient.WithNamespace(openebsNamespace).\n\t\t\tGet(pvcObj.Spec.VolumeName, metav1.GetOptions{})\n\t\tExpect(err).To(BeNil())\n\t\tif cvObj.Spec.ReplicationFactor == ReplicaCount {\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(5 * time.Second)\n\t}\n\tExpect(cvObj.Spec.ConsistencyFactor).To(Equal(consistencyFactor), \"mismatch of consistencyFactor\")\n\t_, isReplicaIDExist := cvObj.Status.ReplicaDetails.KnownReplicas[apis.ReplicaID(ReplicaID)]\n\tExpect(isReplicaIDExist).To(Equal(true), \"replicaId should exist in known replicas of cstorvolume\")\n\tExpect(cvObj.Status.Phase).To(Equal(apis.CStorVolumePhase(\"Healthy\")))\n}\n\nfunc buildAndCreateSC() {\n\tcasConfig := strings.Replace(\n\t\topenebsCASConfigValue, \"$spcName\", spcObj.Name, 1)\n\tcasConfig = strings.Replace(\n\t\tcasConfig, \"$count\", strconv.Itoa(ReplicaCount), 1)\n\tannotations[string(apis.CASTypeKey)] = string(apis.CstorVolume)\n\tannotations[string(apis.CASConfigKey)] = casConfig\n\tscConfig := &tests.SCConfig{\n\t\tName: scName,\n\t\tAnnotations: annotations,\n\t\tProvisioner: openebsProvisioner,\n\t}\n\tops.Config = scConfig\n\tscObj = ops.CreateStorageClass()\n}\n\nfunc updateDesiredReplicationFactor() {\n\tvar err error\n\tcvObj, err = ops.CVClient.WithNamespace(openebsNamespace).\n\t\tGet(pvcObj.Spec.VolumeName, metav1.GetOptions{})\n\tExpect(err).To(BeNil())\n\tcvObj.Spec.DesiredReplicationFactor = cvObj.Spec.DesiredReplicationFactor + 1\n\t\/\/ Namespace is already set to CVClient in above step\n\tcvObj, err = ops.CVClient.Update(cvObj)\n\tExpect(err).To(BeNil())\n}\n\nfunc buildAndCreateCVR() {\n\tvar err, getErr error\n\tretryUpdate := 3\n\tvolumeLabel := pvLabel + pvcObj.Spec.VolumeName\n\tcvrObjList, err := ops.CVRClient.\n\t\tWithNamespace(openebsNamespace).\n\t\tList(metav1.ListOptions{LabelSelector: volumeLabel})\n\tExpect(err).To(BeNil())\n\n\tcvrObj = &cvrObjList.Items[0]\n\tpoolLabel := string(apis.StoragePoolClaimCPK) + \"=\" + spcObj.Name\n\tcspObj = ops.GetUnUsedCStorPool(cvrObjList, poolLabel)\n\tcvrConfig := &tests.CVRConfig{\n\t\tVolumeName: pvcObj.Spec.VolumeName,\n\t\tPoolObj: cspObj,\n\t\tNamespace: openebsNamespace,\n\t\tTargetIP: cvrObj.Spec.TargetIP,\n\t\tPhase: \"Recreate\",\n\t\tCapacity: cvrObj.Spec.Capacity,\n\t}\n\tops.Config = cvrConfig\n\tnewCVRObj = ops.BuildAndCreateCVR()\n\n\tcvrName := pvcObj.Spec.VolumeName + \"-\" + cspObj.Name\n\thashUID, err := hash.Hash(newCVRObj.UID)\n\tExpect(err).To(BeNil())\n\tReplicaID = strings.ToUpper(hashUID)\n\tfor i := 0; i < retryUpdate; i++ {\n\t\tnewCVRObj.Spec.ReplicaID = ReplicaID\n\t\tnewCVRObj, err = ops.CVRClient.\n\t\t\tWithNamespace(openebsNamespace).\n\t\t\tUpdate(newCVRObj)\n\t\tif err == nil {\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(time.Second * 5)\n\t\tnewCVRObj, getErr = ops.CVRClient.Get(cvrName, metav1.GetOptions{})\n\t\tExpect(getErr).To(BeNil())\n\t}\n\tExpect(err).To(BeNil())\n\t\/\/TODO: Need to fix bug in cvr during creation time\n\tpodLabel := cspLabel + cspObj.Name\n\tpodObjList, err := ops.PodClient.\n\t\tWithNamespace(openebsNamespace).\n\t\tList(metav1.ListOptions{LabelSelector: podLabel})\n\tExpect(err).To(BeNil())\n\terr = ops.PodClient.Delete(podObjList.Items[0].Name, &metav1.DeleteOptions{})\n\tExpect(err).To(BeNil())\n\tisPodDeleted := ops.IsPodDeletedEventually(\n\t\tpodObjList.Items[0].Namespace,\n\t\tpodObjList.Items[0].Name)\n\tExpect(isPodDeleted).To(Equal(true))\n}\n","lang_cluster":"Go","diff_tag":0,"review_comment":"","id":159} {"diff_hunk":"@@ -57,7 +57,7 @@ type balanceState string\n type PromiseProcessor struct {\n \tdialog communication.Dialog\n \tbalance identity.Balance\n-\tstorage storage.Storage\n+\tstorage Storer\n \n \tbalanceInterval time.Duration\n \tbalanceState balanceState","source_code":"\/*\n * Copyright (C) 2018 The \"MysteriumNetwork\/node\" Authors.\n *\n * This program is free software: you can redistribute it and\/or modify\n * it under the terms of the GNU General Public License as published by\n * the Free Software Foundation, either version 3 of the License, or\n * (at your option) any later version.\n *\n * This program is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n * GNU General Public License for more details.\n *\n * You should have received a copy of the GNU General Public License\n * along with this program. If not, see .\n *\/\n\npackage noop\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\tlog \"github.com\/cihub\/seelog\"\n\t\"github.com\/mysteriumnetwork\/node\/communication\"\n\t\"github.com\/mysteriumnetwork\/node\/core\/promise\"\n\t\"github.com\/mysteriumnetwork\/node\/core\/storage\"\n\t\"github.com\/mysteriumnetwork\/node\/identity\"\n\t\"github.com\/mysteriumnetwork\/node\/money\"\n\t\"github.com\/mysteriumnetwork\/node\/service_discovery\/dto\"\n)\n\nconst (\n\tprocessorLogPrefix = \"[promise-processor] \"\n\n\tbalanceNotifying = balanceState(\"Notifying\")\n\tbalanceStopped = balanceState(\"Stopped\")\n)\n\n\/\/ NewPromiseProcessor creates instance of PromiseProcessor\nfunc NewPromiseProcessor(dialog communication.Dialog, balance identity.Balance, storage storage.Storage) *PromiseProcessor {\n\treturn &PromiseProcessor{\n\t\tdialog: dialog,\n\t\tbalance: balance,\n\t\tstorage: storage,\n\n\t\tbalanceInterval: 5 * time.Second,\n\t\tbalanceState: balanceStopped,\n\t\tbalanceShutdown: make(chan bool, 1),\n\t}\n}\n\ntype balanceState string\n\n\/\/ PromiseProcessor process promises in such way, what no actual money is deducted from promise\ntype PromiseProcessor struct {\n\tdialog communication.Dialog\n\tbalance identity.Balance\n\tstorage storage.Storage\n\n\tbalanceInterval time.Duration\n\tbalanceState balanceState\n\tbalanceStateMutex sync.RWMutex\n\tbalanceShutdown chan bool\n\n\t\/\/ these are populated later at runtime\n\tlastPromise promise.Promise\n}\n\n\/\/ Start processing promises for given service proposal\nfunc (processor *PromiseProcessor) Start(proposal dto.ServiceProposal) error {\n\t\/\/ TODO: replace static value with some real data\n\tprocessor.lastPromise = promise.Promise{\n\t\tAmount: money.NewMoney(10, money.CURRENCY_MYST),\n\t}\n\n\tconsumer := promise.NewConsumer(proposal, processor.balance, processor.storage)\n\tif err := processor.dialog.Respond(consumer); err != nil {\n\t\treturn err\n\t}\n\n\tprocessor.balanceShutdown = make(chan bool, 1)\n\tgo processor.balanceLoop()\n\n\treturn nil\n}\n\n\/\/ Stop stops processing promises\nfunc (processor *PromiseProcessor) Stop() error {\n\tprocessor.balanceShutdown <- true\n\treturn nil\n}\n\nfunc (processor *PromiseProcessor) balanceLoop() {\n\tprocessor.setBalanceState(balanceNotifying)\n\nbalanceLoop:\n\tfor {\n\t\tselect {\n\t\tcase <-processor.balanceShutdown:\n\t\t\tbreak balanceLoop\n\n\t\tcase <-time.After(processor.balanceInterval):\n\t\t\t\/\/ TODO: replace static value with some real data\n\t\t\tprocessor.balanceSend(\n\t\t\t\tpromise.BalanceMessage{1, true, processor.lastPromise.Amount},\n\t\t\t)\n\t\t}\n\t}\n\n\tprocessor.setBalanceState(balanceStopped)\n}\n\nfunc (processor *PromiseProcessor) setBalanceState(state balanceState) {\n\tprocessor.balanceStateMutex.Lock()\n\tdefer processor.balanceStateMutex.Unlock()\n\n\tprocessor.balanceState = state\n}\n\nfunc (processor *PromiseProcessor) getBalanceState() balanceState {\n\tprocessor.balanceStateMutex.RLock()\n\tdefer processor.balanceStateMutex.RUnlock()\n\n\treturn processor.balanceState\n}\n\nfunc (processor *PromiseProcessor) balanceSend(message promise.BalanceMessage) error {\n\tlog.Info(processorLogPrefix, fmt.Sprintf(\"Notifying balance %s\", message.Balance.String()))\n\treturn processor.dialog.Send(&promise.BalanceMessageProducer{\n\t\tMessage: message,\n\t})\n}\n","lang_cluster":"Go","diff_tag":0,"review_comment":"","id":160} {"diff_hunk":"@@ -114,11 +114,22 @@ func (it *InvoiceTracker) getNotReceivedExchangeMessageCount() uint64 {\n }\n \n func (it *InvoiceTracker) sendInvoiceExpectExchangeMessage() error {\n-\terr := it.sendInvoice()\n+\t\/\/ TODO: this should be calculated according to the passed in payment period\n+\tshouldBe := uint64(math.Trunc(it.timeTracker.Elapsed().Minutes() * float64(it.paymentInfo.GetPrice().Amount) * 100000000))\n+\n+\t\/\/ TODO: fill in the fee\n+\tinvoice := crypto.CreateInvoice(it.lastInvoice.invoice.AgreementID, shouldBe, 0, it.lastInvoice.r)\n+\tinvoice.Provider = it.providerID.Address\n+\terr := it.peerInvoiceSender.Send(invoice)\n \tif err != nil {\n \t\treturn err\n \t}\n \n+\terr = it.invoiceStorage.Store(it.peer, invoice)\n+\tif err != nil {\n+\t\treturn errors.Wrap(err, \"could not store invoice\")\n+\t}\n+\n \terr = it.receiveExchangeMessageOrTimeout()\n \tif err != nil {\n \t\thandlerErr := it.handleExchangeMessageReceiveError(err)","source_code":"\/*\n * Copyright (C) 2019 The \"MysteriumNetwork\/node\" Authors.\n *\n * This program is free software: you can redistribute it and\/or modify\n * it under the terms of the GNU General Public License as published by\n * the Free Software Foundation, either version 3 of the License, or\n * (at your option) any later version.\n *\n * This program is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n * GNU General Public License for more details.\n *\n * You should have received a copy of the GNU General Public License\n * along with this program. If not, see .\n *\/\n\npackage pingpong\n\nimport (\n\t\"math\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/ethereum\/go-ethereum\/common\"\n\t\"github.com\/mysteriumnetwork\/node\/identity\"\n\t\"github.com\/mysteriumnetwork\/payments\/crypto\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/rs\/zerolog\/log\"\n)\n\n\/\/ PeerInvoiceSender allows to send invoices\ntype PeerInvoiceSender interface {\n\tSend(crypto.Invoice) error\n}\n\n\/\/ ErrExchangeWaitTimeout indicates that we did not get an exchange message in time\nvar ErrExchangeWaitTimeout = errors.New(\"did not get a new exchange message\")\n\n\/\/ ErrExchangeValidationFailed indicates that there was an error with the exchange signature\nvar ErrExchangeValidationFailed = errors.New(\"exchange validation failed\")\n\nconst chargePeriodLeeway = time.Hour * 2\n\n\/\/ InvoiceTracker keeps tab of invoices and sends them to the consumer\ntype InvoiceTracker struct {\n\tpeer identity.Identity\n\tstop chan struct{}\n\tpeerInvoiceSender PeerInvoiceSender\n\texchangeMessageChan chan crypto.ExchangeMessage\n\tchargePeriod time.Duration\n\texchangeMessageWaitTimeout time.Duration\n\tnotReceivedExchangeMessageCount uint64\n\tmaxNotReceivedExchangeMessages uint64\n\tonce sync.Once\n}\n\n\/\/ NewInvoiceTracker creates a new instance of invoice tracker\nfunc NewInvoiceTracker(\n\tpeer identity.Identity,\n\tpeerInvoiceSender PeerInvoiceSender,\n\tchargePeriod time.Duration,\n\texchangeMessageChan chan crypto.ExchangeMessage,\n\texchangeMessageWaitTimeout time.Duration) *InvoiceTracker {\n\treturn &InvoiceTracker{\n\t\tpeer: peer,\n\t\tstop: make(chan struct{}),\n\t\tpeerInvoiceSender: peerInvoiceSender,\n\t\texchangeMessageChan: exchangeMessageChan,\n\t\texchangeMessageWaitTimeout: exchangeMessageWaitTimeout,\n\t\tchargePeriod: chargePeriod,\n\t\tmaxNotReceivedExchangeMessages: calculateMaxNotReceivedExchangeMessageCount(chargePeriodLeeway, chargePeriod),\n\t}\n}\n\nfunc calculateMaxNotReceivedExchangeMessageCount(chargeLeeway, chargePeriod time.Duration) uint64 {\n\treturn uint64(math.Round(float64(chargeLeeway) \/ float64(chargePeriod)))\n}\n\n\/\/ Start stars the invoice tracker\nfunc (it *InvoiceTracker) Start() error {\n\tlog.Debug().Msg(\"Starting...\")\n\t\/\/ give the consumer a second to start up his payments before sending the first request\n\tfirstSend := time.After(time.Second)\n\tfor {\n\t\tselect {\n\t\tcase <-firstSend:\n\t\t\terr := it.sendInvoiceExpectExchangeMessage()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase <-it.stop:\n\t\t\treturn nil\n\t\tcase <-time.After(it.chargePeriod):\n\t\t\terr := it.sendInvoiceExpectExchangeMessage()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (it *InvoiceTracker) markExchangeMessageNotReceived() {\n\tatomic.AddUint64(&it.notReceivedExchangeMessageCount, 1)\n}\n\nfunc (it *InvoiceTracker) resetNotReceivedExchangeMessageCount() {\n\tatomic.SwapUint64(&it.notReceivedExchangeMessageCount, 0)\n}\n\nfunc (it *InvoiceTracker) getNotReceivedExchangeMessageCount() uint64 {\n\treturn atomic.LoadUint64(&it.notReceivedExchangeMessageCount)\n}\n\nfunc (it *InvoiceTracker) sendInvoiceExpectExchangeMessage() error {\n\terr := it.sendInvoice()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = it.receiveExchangeMessageOrTimeout()\n\tif err != nil {\n\t\thandlerErr := it.handleExchangeMessageReceiveError(err)\n\t\tif handlerErr != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tit.resetNotReceivedExchangeMessageCount()\n\t}\n\treturn nil\n}\n\nfunc (it *InvoiceTracker) handleExchangeMessageReceiveError(err error) error {\n\t\/\/ if it's a timeout, we'll want to ignore it if we're not exceeding maxNotReceivedexchangeMessages\n\tif err == ErrExchangeWaitTimeout {\n\t\tit.markExchangeMessageNotReceived()\n\t\tif it.getNotReceivedExchangeMessageCount() >= it.maxNotReceivedExchangeMessages {\n\t\t\treturn err\n\t\t}\n\t\tlog.Warn().Err(err).Msg(\"Failed to receive exchangeMessage\")\n\t\treturn nil\n\t}\n\treturn err\n}\n\nfunc (it *InvoiceTracker) sendInvoice() error {\n\t\/\/ TODO: a ton of actions should go here\n\n\t\/\/ TODO: fill the fields\n\treturn it.peerInvoiceSender.Send(crypto.Invoice{AgreementID: 1234})\n}\n\nfunc (it *InvoiceTracker) receiveExchangeMessageOrTimeout() error {\n\tselect {\n\tcase pm := <-it.exchangeMessageChan:\n\t\tif res := pm.ValidateExchangeMessage(common.HexToAddress(it.peer.Address)); !res {\n\t\t\treturn ErrExchangeValidationFailed\n\t\t}\n\tcase <-time.After(it.exchangeMessageWaitTimeout):\n\t\treturn ErrExchangeWaitTimeout\n\tcase <-it.stop:\n\t\treturn nil\n\t}\n\treturn nil\n}\n\n\/\/ Stop stops the invoice tracker\nfunc (it *InvoiceTracker) Stop() {\n\tit.once.Do(func() {\n\t\tlog.Debug().Msg(\"Stopping...\")\n\t\tclose(it.stop)\n\t})\n}\n","lang_cluster":"Go","diff_tag":0,"review_comment":"","id":161} {"diff_hunk":"@@ -6,6 +6,7 @@ package full\n \n import (\n \t\"context\"\n+\t\"errors\"\n \t\"math\/rand\"\n \t\"sync\"\n \t\"time\"","source_code":"\/\/ Copyright 2020 The Swarm Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage full\n\nimport (\n\t\"context\"\n\t\"math\/rand\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/ethersphere\/bee\/pkg\/addressbook\"\n\t\"github.com\/ethersphere\/bee\/pkg\/discovery\"\n\t\"github.com\/ethersphere\/bee\/pkg\/logging\"\n\t\"github.com\/ethersphere\/bee\/pkg\/p2p\"\n\t\"github.com\/ethersphere\/bee\/pkg\/swarm\"\n\t\"github.com\/ethersphere\/bee\/pkg\/topology\"\n)\n\nfunc init() {\n\trand.Seed(time.Now().UnixNano())\n}\n\nvar _ topology.Driver = (*Driver)(nil)\n\n\/\/ Driver drives the connectivity between nodes. It is a basic implementation of a connectivity Driver.\n\/\/ that enabled full connectivity in the sense that:\n\/\/ - Every peer which is added to the Driver gets broadcasted to every other peer regardless of its address.\n\/\/ - A random peer is picked when asking for a peer to retrieve an arbitrary chunk (Peerer interface).\ntype Driver struct {\n\tdiscovery discovery.Driver\n\taddressBook addressbook.GetPutter\n\tp2pService p2p.Service\n\treceivedPeers map[string]struct{} \/\/ track already received peers. Note: implement cleanup or expiration if needed to stop infinite grow\n\tmtx sync.Mutex \/\/ guards received peers\n\tlogger logging.Logger\n}\n\nfunc New(disc discovery.Driver, addressBook addressbook.GetPutter, p2pService p2p.Service, logger logging.Logger) *Driver {\n\treturn &Driver{\n\t\tdiscovery: disc,\n\t\taddressBook: addressBook,\n\t\tp2pService: p2pService,\n\t\treceivedPeers: make(map[string]struct{}),\n\t\tlogger: logger,\n\t}\n}\n\n\/\/ AddPeer adds a new peer to the topology driver.\n\/\/ The peer would be subsequently broadcasted to all connected peers.\n\/\/ All conneceted peers are also broadcasted to the new peer.\nfunc (d *Driver) AddPeer(ctx context.Context, addr swarm.Address) error {\n\td.mtx.Lock()\n\tif _, ok := d.receivedPeers[addr.ByteString()]; ok {\n\t\td.mtx.Unlock()\n\t\treturn nil\n\t}\n\n\td.receivedPeers[addr.ByteString()] = struct{}{}\n\td.mtx.Unlock()\n\n\tconnectedPeers := d.p2pService.Peers()\n\tma, exists := d.addressBook.Get(addr)\n\tif !exists {\n\t\treturn topology.ErrNotFound\n\t}\n\n\tif !isConnected(addr, connectedPeers) {\n\t\tpeerAddr, err := d.p2pService.Connect(ctx, ma)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ update addr if it is wrong or it has been changed\n\t\tif !addr.Equal(peerAddr) {\n\t\t\taddr = peerAddr\n\t\t\td.addressBook.Put(peerAddr, ma)\n\t\t}\n\t}\n\n\tconnectedAddrs := []swarm.Address{}\n\tfor _, addressee := range connectedPeers {\n\t\t\/\/ skip newly added peer\n\t\tif addressee.Address.Equal(addr) {\n\t\t\tcontinue\n\t\t}\n\n\t\tconnectedAddrs = append(connectedAddrs, addressee.Address)\n\t\tif err := d.discovery.BroadcastPeers(context.Background(), addressee.Address, addr); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(connectedAddrs) == 0 {\n\t\treturn nil\n\t}\n\n\tif err := d.discovery.BroadcastPeers(context.Background(), addr, connectedAddrs...); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ ChunkPeer is used to suggest a peer to ask a certain chunk from.\nfunc (d *Driver) ChunkPeer(addr swarm.Address) (peerAddr swarm.Address, err error) {\n\tconnectedPeers := d.p2pService.Peers()\n\tif len(connectedPeers) == 0 {\n\t\treturn swarm.Address{}, topology.ErrNotFound\n\t}\n\n\titemIdx := rand.Intn(len(connectedPeers))\n\ti := 0\n\tfor _, v := range connectedPeers {\n\t\tif i == itemIdx {\n\t\t\treturn v.Address, nil\n\t\t}\n\t\ti++\n\t}\n\n\treturn swarm.Address{}, topology.ErrNotFound\n}\n\nfunc isConnected(addr swarm.Address, connectedPeers []p2p.Peer) bool {\n\tfor _, p := range connectedPeers {\n\t\tif p.Address.Equal(addr) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n","lang_cluster":"Go","diff_tag":0,"review_comment":"","id":162} {"diff_hunk":"@@ -96,17 +96,17 @@ func (api *API) ConfigGet(dottedPath string) (interface{}, error) {\n \n \/\/ ChainHead returns the head tipset\n func (api *API) ChainHead(ctx context.Context) types.TipSet {\n-\treturn api.chain.Head(ctx)\n+\treturn api.chain.Head()\n }\n \n \/\/ ChainLs returns a channel of tipsets from head to genesis\n func (api *API) ChainLs(ctx context.Context) <-chan interface{} {\n-\treturn api.chain.Ls(ctx)\n+\treturn api.chain.BlockHistory(ctx, api.chain.Head())\n }\n \n \/\/ BlockGet gets a block by CID\n func (api *API) BlockGet(ctx context.Context, id cid.Cid) (*types.Block, error) {\n-\treturn api.chain.BlockGet(ctx, id)\n+\treturn api.chain.GetBlock(ctx, id)\n }\n \n \/\/ MessagePoolRemove removes a message from the message pool","source_code":"package plumbing\n\nimport (\n\t\"context\"\n\n\t\"gx\/ipfs\/QmR8BauakNcBa3RbE4nbQu76PDiJgoQgz8AJdhJuiU4TAw\/go-cid\"\n\t\"gx\/ipfs\/QmTu65MVbemtUxJEWgsTtzv9Zv9P8rvmqNA4eG9TrTRGYc\/go-libp2p-peer\"\n\tlogging \"gx\/ipfs\/QmbkT7eMTyXfpeyB3ZMxxcxg7XH8t6uXp49jqzz4HB7BGF\/go-log\"\n\n\t\"github.com\/filecoin-project\/go-filecoin\/address\"\n\t\"github.com\/filecoin-project\/go-filecoin\/core\"\n\t\"github.com\/filecoin-project\/go-filecoin\/exec\"\n\t\"github.com\/filecoin-project\/go-filecoin\/plumbing\/cfg\"\n\t\"github.com\/filecoin-project\/go-filecoin\/plumbing\/chn\"\n\t\"github.com\/filecoin-project\/go-filecoin\/plumbing\/msg\"\n\t\"github.com\/filecoin-project\/go-filecoin\/plumbing\/mthdsig\"\n\t\"github.com\/filecoin-project\/go-filecoin\/plumbing\/ntwk\"\n\t\"github.com\/filecoin-project\/go-filecoin\/types\"\n\t\"github.com\/filecoin-project\/go-filecoin\/wallet\"\n)\n\n\/\/ API is the plumbing implementation, the irreducible set of calls required\n\/\/ to implement protocols and user\/network-facing features. You probably should\n\/\/ depend on the higher level porcelain.API instead of this api, as it includes\n\/\/ these calls in addition to higher level convenience calls to make them more\n\/\/ ergonomic.\ntype API struct {\n\tlogger logging.EventLogger\n\n\tchain *chn.Reader\n\tconfig *cfg.Config\n\tmessagePool *core.MessagePool\n\tmsgPreviewer *msg.Previewer\n\tmsgQueryer *msg.Queryer\n\tmsgSender *msg.Sender\n\tmsgWaiter *msg.Waiter\n\tnetwork *ntwk.Network\n\tsigGetter *mthdsig.Getter\n\twallet *wallet.Wallet\n}\n\n\/\/ APIDeps contains all the API's dependencies\ntype APIDeps struct {\n\tChain *chn.Reader\n\tConfig *cfg.Config\n\tMessagePool *core.MessagePool\n\tMsgPreviewer *msg.Previewer\n\tMsgQueryer *msg.Queryer\n\tMsgSender *msg.Sender\n\tMsgWaiter *msg.Waiter\n\tNetwork *ntwk.Network\n\tSigGetter *mthdsig.Getter\n\tWallet *wallet.Wallet\n}\n\n\/\/ New constructs a new instance of the API.\nfunc New(deps *APIDeps) *API {\n\treturn &API{\n\t\tlogger: logging.Logger(\"porcelain\"),\n\n\t\tchain: deps.Chain,\n\t\tconfig: deps.Config,\n\t\tmessagePool: deps.MessagePool,\n\t\tmsgPreviewer: deps.MsgPreviewer,\n\t\tmsgQueryer: deps.MsgQueryer,\n\t\tmsgSender: deps.MsgSender,\n\t\tmsgWaiter: deps.MsgWaiter,\n\t\tnetwork: deps.Network,\n\t\tsigGetter: deps.SigGetter,\n\t\twallet: deps.Wallet,\n\t}\n}\n\n\/\/ ActorGetSignature returns the signature of the given actor's given method.\n\/\/ The function signature is typically used to enable a caller to decode the\n\/\/ output of an actor method call (message).\nfunc (api *API) ActorGetSignature(ctx context.Context, actorAddr address.Address, method string) (_ *exec.FunctionSignature, err error) {\n\treturn api.sigGetter.Get(ctx, actorAddr, method)\n}\n\n\/\/ ConfigSet sets the given parameters at the given path in the local config.\n\/\/ The given path may be either a single field name, or a dotted path to a field.\n\/\/ The JSON value may be either a single value or a whole data structure to be replace.\n\/\/ For example:\n\/\/ ConfigSet(\"datastore.path\", \"dev\/null\") and ConfigSet(\"datastore\", \"{\\\"path\\\":\\\"dev\/null\\\"}\")\n\/\/ are the same operation.\nfunc (api *API) ConfigSet(dottedPath string, paramJSON string) error {\n\treturn api.config.Set(dottedPath, paramJSON)\n}\n\n\/\/ ConfigGet gets config parameters from the given path.\n\/\/ The path may be either a single field name, or a dotted path to a field.\nfunc (api *API) ConfigGet(dottedPath string) (interface{}, error) {\n\treturn api.config.Get(dottedPath)\n}\n\n\/\/ ChainHead returns the head tipset\nfunc (api *API) ChainHead(ctx context.Context) types.TipSet {\n\treturn api.chain.Head(ctx)\n}\n\n\/\/ ChainLs returns a channel of tipsets from head to genesis\nfunc (api *API) ChainLs(ctx context.Context) <-chan interface{} {\n\treturn api.chain.Ls(ctx)\n}\n\n\/\/ BlockGet gets a block by CID\nfunc (api *API) BlockGet(ctx context.Context, id cid.Cid) (*types.Block, error) {\n\treturn api.chain.BlockGet(ctx, id)\n}\n\n\/\/ MessagePoolRemove removes a message from the message pool\nfunc (api *API) MessagePoolRemove(cid cid.Cid) {\n\tapi.messagePool.Remove(cid)\n}\n\n\/\/ MessagePreview previews the Gas cost of a message by running it locally on the client and\n\/\/ recording the amount of Gas used.\nfunc (api *API) MessagePreview(ctx context.Context, from, to address.Address, method string, params ...interface{}) (types.GasUnits, error) {\n\treturn api.msgPreviewer.Preview(ctx, from, to, method, params...)\n}\n\n\/\/ MessageQuery calls an actor's method using the most recent chain state. It is read-only,\n\/\/ it does not change any state. It is use to interrogate actor state. The from address\n\/\/ is optional; if not provided, an address will be chosen from the node's wallet.\nfunc (api *API) MessageQuery(ctx context.Context, optFrom, to address.Address, method string, params ...interface{}) ([][]byte, *exec.FunctionSignature, error) {\n\treturn api.msgQueryer.Query(ctx, optFrom, to, method, params...)\n}\n\n\/\/ MessageSend sends a message. It uses the default from address if none is given and signs the\n\/\/ message using the wallet. This call \"sends\" in the sense that it enqueues the\n\/\/ message in the msg pool and broadcasts it to the network; it does not wait for the\n\/\/ message to go on chain. Note that no default from address is provided. If you need\n\/\/ a default address, use MessageSendWithDefaultAddress instead.\nfunc (api *API) MessageSend(ctx context.Context, from, to address.Address, value *types.AttoFIL, gasPrice types.AttoFIL, gasLimit types.GasUnits, method string, params ...interface{}) (cid.Cid, error) {\n\treturn api.msgSender.Send(ctx, from, to, value, gasPrice, gasLimit, method, params...)\n}\n\n\/\/ MessageWait invokes the callback when a message with the given cid appears on chain.\n\/\/ It will find the message in both the case that it is already on chain and\n\/\/ the case that it appears in a newly mined block. An error is returned if one is\n\/\/ encountered or if the context is canceled. Otherwise, it waits forever for the message\n\/\/ to appear on chain.\nfunc (api *API) MessageWait(ctx context.Context, msgCid cid.Cid, cb func(*types.Block, *types.SignedMessage, *types.MessageReceipt) error) error {\n\treturn api.msgWaiter.Wait(ctx, msgCid, cb)\n}\n\n\/\/ NetworkGetPeerID gets the current peer id from Util\nfunc (api *API) NetworkGetPeerID() peer.ID {\n\treturn api.network.GetPeerID()\n}\n\n\/\/ SignBytes uses private key information associated with the given address to sign the given bytes.\nfunc (api *API) SignBytes(data []byte, addr address.Address) (types.Signature, error) {\n\treturn api.wallet.SignBytes(data, addr)\n}\n\n\/\/ WalletAddresses gets addresses from the wallet\nfunc (api *API) WalletAddresses() []address.Address {\n\treturn api.wallet.Addresses()\n}\n\n\/\/ WalletFind finds addresses on the wallet\nfunc (api *API) WalletFind(address address.Address) (wallet.Backend, error) {\n\treturn api.wallet.Find(address)\n}\n\n\/\/ WalletNewAddress generates a new wallet address\nfunc (api *API) WalletNewAddress() (address.Address, error) {\n\treturn wallet.NewAddress(api.wallet)\n}\n","lang_cluster":"Go","diff_tag":0,"review_comment":"","id":163} {"diff_hunk":"@@ -26,6 +26,10 @@ import (\n )\n \n const cloneStepID = \"CLONE\"\n+const checkoutStepID = \"CHECKOUT\"\n+const javaArtifactsID = \"JAVA-ARTIFACTS\"\n+const preStepID = \"PREPROCESS\"\n+const extractStepID = \"EXTRACT\"\n \n \/\/ commonSteps returns cloudbuild BuildSteps for copying a repo and creating\n \/\/ an output directory.","source_code":"\/*\n * Copyright 2018 The Kythe Authors. All rights reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage config\n\nimport (\n\t\"fmt\"\n\t\"path\"\n\n\t\"kythe.io\/kythe\/go\/extractors\/constants\"\n\n\t\"google.golang.org\/api\/cloudbuild\/v1\"\n)\n\nconst cloneStepID = \"CLONE\"\n\n\/\/ commonSteps returns cloudbuild BuildSteps for copying a repo and creating\n\/\/ an output directory.\n\/\/\n\/\/ The BuildStep for the repo copy uses id cloneStepID, as described in\n\/\/ https:\/\/cloud.google.com\/cloud-build\/docs\/build-config#id, for any future\n\/\/ steps that need to depend on the repo clone step. The repo copy step puts\n\/\/ the code into \/workspace\/code.\n\/\/\n\/\/ The output directory is \/workspace\/out.\nfunc commonSteps() []*cloudbuild.BuildStep {\n\treturn []*cloudbuild.BuildStep{\n\t\t&cloudbuild.BuildStep{\n\t\t\tName: constants.GCRGitImage, \/\/ This triggers with command 'git'.\n\t\t\tArgs: []string{\"clone\", repoName, \"\/workspace\/code\"},\n\t\t\tId: cloneStepID,\n\t\t\tWaitFor: []string{\"-\"},\n\t\t},\n\t\t&cloudbuild.BuildStep{\n\t\t\tName: \"ubuntu\", \/\/ This, however, has no entrypoint command.\n\t\t\tArgs: []string{\"mkdir\", \"\/workspace\/out\"},\n\t\t\tWaitFor: []string{\"-\"},\n\t\t},\n\t}\n}\n\nfunc preprocessorStep(build string) *cloudbuild.BuildStep {\n\treturn &cloudbuild.BuildStep{\n\t\tName: constants.KytheBuildPreprocessorImage,\n\t\tArgs: []string{build},\n\t\tWaitFor: []string{cloneStepID},\n\t}\n}\n\n\/\/ TODO(#3095): This step needs to be configurable by the java version used for\n\/\/ a given BuildTarget.\nfunc javaExtractorsStep() *cloudbuild.BuildStep {\n\treturn &cloudbuild.BuildStep{\n\t\tName: constants.KytheJavacExtractorArtifactsImage,\n\t\tVolumes: []*cloudbuild.Volume{\n\t\t\t&cloudbuild.Volume{\n\t\t\t\tName: javaVolumeName,\n\t\t\t\tPath: constants.DefaultExtractorsDir,\n\t\t\t},\n\t\t},\n\t\tWaitFor: []string{\"-\"},\n\t}\n}\n\nfunc zipMergeStep() *cloudbuild.BuildStep {\n\treturn &cloudbuild.BuildStep{\n\t\tName: constants.KytheKzipToolsImage,\n\t\tEntrypoint: \"bash\",\n\t\tArgs: []string{\n\t\t\t\"-c\",\n\t\t\tfmt.Sprintf(\n\t\t\t\t\"%s merge --output %s %s\/*.kzip\",\n\t\t\t\tconstants.DefaultKzipToolLocation,\n\t\t\t\tpath.Join(outputDirectory, outputFilePattern),\n\t\t\t\toutputDirectory),\n\t\t},\n\t}\n}\n","lang_cluster":"Go","diff_tag":0,"review_comment":"","id":164} {"diff_hunk":"@@ -74,6 +74,20 @@ var configSetCmd = &cobra.Command{\n \t},\n }\n \n+\/\/ configResetCmd represents the config reset command\n+var configResetCmd = &cobra.Command{\n+\tUse: \"reset\",\n+\tShort: \"Reset config to default\",\n+\tRunE: func(cmd *cobra.Command, args []string) error {\n+\t\tcmd.SilenceUsage = true\n+\t\toutput, err := reset()\n+\t\tif err == nil {\n+\t\t\tfmt.Println(output)\n+\t\t}\n+\t\treturn err\n+\t},\n+}\n+\n func init() {\n \tconfigSetCmd.Flags().BoolVar(&Insecure, \"insecure\", false,\n \t\t\"set insecure connection as default\")","source_code":"\/\/ Copyright (c) 2019 IoTeX\n\/\/ This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no\n\/\/ warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent\n\/\/ permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache\n\/\/ License 2.0 that can be found in the LICENSE file.\n\npackage config\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/spf13\/cobra\"\n\t\"gopkg.in\/yaml.v2\"\n\n\t\"github.com\/iotexproject\/iotex-core\/ioctl\/validator\"\n)\n\nconst (\n\tipPattern = `((25[0-5]|2[0-4]\\d|[01]?\\d\\d?)\\.){3}(25[0-5]|2[0-4]\\d|[01]?\\d\\d?)`\n\tdomainPattern = `[a-zA-Z0-9][a-zA-Z0-9_-]{0,62}(\\.[a-zA-Z0-9][a-zA-Z0-9_-]{0,62})*(\\.[a-zA-Z][a-zA-Z0-9]{0,10}){1}`\n\tlocalPattern = \"localhost\"\n\tendpointPattern = \"(\" + ipPattern + \"|(\" + domainPattern + \")\" + \"|(\" + localPattern + \"))\" + `(:\\d{1,5})?`\n)\n\nvar (\n\tvalidArgs = []string{\"endpoint\", \"wallet\", \"currentcontext\"}\n\tendpointCompile = regexp.MustCompile(\"^\" + endpointPattern + \"$\")\n)\n\n\/\/ configGetCmd represents the config get command\nvar configGetCmd = &cobra.Command{\n\tUse: \"get VARIABLE\",\n\tShort: \"Get config from ioctl\",\n\tValidArgs: validArgs,\n\tArgs: func(cmd *cobra.Command, args []string) error {\n\t\tif len(args) != 1 {\n\t\t\treturn fmt.Errorf(\"accepts 1 arg(s), received %d,\"+\n\t\t\t\t\" valid arg(s): %s\", len(args), validArgs)\n\t\t}\n\t\treturn cobra.OnlyValidArgs(cmd, args)\n\t},\n\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\tcmd.SilenceUsage = true\n\t\toutput, err := Get(args[0])\n\t\tif err == nil {\n\t\t\tfmt.Println(output)\n\t\t}\n\t\treturn err\n\t},\n}\n\n\/\/ configSetCmd represents the config set command\nvar configSetCmd = &cobra.Command{\n\tUse: \"set VARIABLE VALUE\",\n\tShort: \"Set config for ioctl\",\n\tValidArgs: validArgs,\n\tArgs: func(cmd *cobra.Command, args []string) error {\n\t\tif len(args) != 2 {\n\t\t\treturn fmt.Errorf(\"accepts 2 arg(s), received %d,\"+\n\t\t\t\t\" valid arg(s): %s\", len(args), validArgs)\n\t\t}\n\t\treturn cobra.OnlyValidArgs(cmd, args[:1])\n\t},\n\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\tcmd.SilenceUsage = true\n\t\toutput, err := set(args)\n\t\tif err == nil {\n\t\t\tfmt.Println(output)\n\t\t}\n\t\treturn err\n\t},\n}\n\nfunc init() {\n\tconfigSetCmd.Flags().BoolVar(&Insecure, \"insecure\", false,\n\t\t\"set insecure connection as default\")\n}\n\n\/\/ Get gets config variable\nfunc Get(arg string) (string, error) {\n\tswitch arg {\n\tdefault:\n\t\treturn \"\", ErrConfigNotMatch\n\tcase \"endpoint\":\n\t\tif ReadConfig.Endpoint == \"\" {\n\t\t\treturn \"\", ErrEmptyEndpoint\n\t\t}\n\t\treturn fmt.Sprint(ReadConfig.Endpoint, \" secure connect(TLS):\",\n\t\t\tReadConfig.SecureConnect), nil\n\tcase \"wallet\":\n\t\treturn ReadConfig.Wallet, nil\n\tcase \"currentcontext\":\n\t\treturn fmt.Sprint(ReadConfig.CurrentContext), nil\n\t}\n}\n\n\/\/ GetContextAddressOrAlias gets current context\nfunc GetContextAddressOrAlias() (string, error) {\n\tcurrentcontext := ReadConfig.CurrentContext\n\tif strings.EqualFold(currentcontext.AddressOrAlias, \"\") {\n\t\treturn \"\", fmt.Errorf(`use \"ioctl config set currentcontext address or alias\" to config current account first`)\n\t}\n\treturn currentcontext.AddressOrAlias, nil\n}\n\n\/\/ GetAddressOrAlias gets address from args or context\nfunc GetAddressOrAlias(args []string) (address string, err error) {\n\tif len(args) == 1 && !strings.EqualFold(args[0], \"\") {\n\t\taddress = args[0]\n\t} else {\n\t\taddress, err = GetContextAddressOrAlias()\n\t}\n\treturn\n}\n\n\/\/ make sure endpoint match pattern\nfunc isMatch(endpoint string) bool {\n\treturn endpointCompile.MatchString(endpoint)\n}\n\n\/\/ set sets config variable\nfunc set(args []string) (string, error) {\n\tswitch args[0] {\n\tdefault:\n\t\treturn \"\", ErrConfigNotMatch\n\tcase \"endpoint\":\n\t\tif !isMatch(args[1]) {\n\t\t\treturn \"\", fmt.Errorf(\"Endpoint %s is not valid\", args[1])\n\t\t}\n\t\tReadConfig.Endpoint = args[1]\n\t\tReadConfig.SecureConnect = !Insecure\n\tcase \"wallet\":\n\t\tReadConfig.Wallet = args[1]\n\tcase \"currentcontext\":\n\t\terr1 := validator.ValidateAlias(args[1])\n\t\terr2 := validator.ValidateAddress(args[1])\n\t\tif err1 != nil && err2 != nil {\n\t\t\treturn \"\", fmt.Errorf(\"failed to validate alias or address:%s %s\", err1, err2)\n\t\t}\n\t\tReadConfig.CurrentContext.AddressOrAlias = args[1]\n\t}\n\tout, err := yaml.Marshal(&ReadConfig)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif err := ioutil.WriteFile(DefaultConfigFile, out, 0600); err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to write to config file %s\", DefaultConfigFile)\n\t}\n\treturn args[0] + \" is set to \" + args[1], nil\n}\n","lang_cluster":"Go","diff_tag":0,"review_comment":"","id":165} {"diff_hunk":"@@ -112,11 +112,10 @@ func (s *Store) GetObject(ctx context.Context, path string) (object filestore.Ob\n \n \tobject.Path = path\n \tobject.Content = content\n-\tobject.Size = int64(len(content))\n \treturn\n }\n \n-func (s *Store) PutObject(ctx context.Context, path string, content []byte) error {\n+func (s *Store) Put(ctx context.Context, path string, content []byte) error {\n \twc := s.client.Bucket(s.bucket).Object(path).NewWriter(ctx)\n \tif _, err := wc.Write(content); err != nil {\n \t\twc.Close()","source_code":"\/\/ Copyright 2020 The PipeCD Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage gcs\n\nimport (\n\t\"context\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\n\t\"cloud.google.com\/go\/storage\"\n\t\"go.uber.org\/zap\"\n\t\"google.golang.org\/api\/iterator\"\n\t\"google.golang.org\/api\/option\"\n\n\t\"github.com\/pipe-cd\/pipe\/pkg\/filestore\"\n)\n\ntype Store struct {\n\tclient *storage.Client\n\tbucket string\n\tcredentialsFile string\n\thttpClient *http.Client\n\tlogger *zap.Logger\n}\n\ntype Option func(*Store)\n\nfunc WithCredentialsFile(path string) Option {\n\treturn func(s *Store) {\n\t\ts.credentialsFile = path\n\t}\n}\n\nfunc WithHTTPClient(client *http.Client) Option {\n\treturn func(s *Store) {\n\t\ts.httpClient = client\n\t}\n}\n\nfunc WithLogger(logger *zap.Logger) Option {\n\treturn func(s *Store) {\n\t\ts.logger = logger.Named(\"gcs\")\n\t}\n}\n\nfunc NewStore(ctx context.Context, bucket string, opts ...Option) (*Store, error) {\n\ts := &Store{\n\t\tbucket: bucket,\n\t\tlogger: zap.NewNop(),\n\t}\n\tfor _, opt := range opts {\n\t\topt(s)\n\t}\n\n\tvar options []option.ClientOption\n\tif s.credentialsFile != \"\" {\n\t\toptions = append(options, option.WithCredentialsFile(s.credentialsFile))\n\t}\n\tif s.httpClient != nil {\n\t\toptions = append(options, option.WithHTTPClient(s.httpClient))\n\t}\n\tclient, err := storage.NewClient(ctx, options...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ts.client = client\n\treturn s, nil\n}\n\nfunc (s *Store) NewReader(ctx context.Context, path string) (rc io.ReadCloser, err error) {\n\trc, err = s.client.Bucket(s.bucket).Object(path).NewReader(ctx)\n\tswitch err {\n\tcase nil:\n\tcase storage.ErrObjectNotExist:\n\t\terr = filestore.ErrNotFound\n\t\treturn\n\tdefault:\n\t\ts.logger.Error(\"failed to create GCS object reader\", zap.String(\"path\", path), zap.Error(err))\n\t\treturn\n\t}\n\treturn\n}\n\nfunc (s *Store) GetObject(ctx context.Context, path string) (object filestore.Object, err error) {\n\trc, err := s.NewReader(ctx, path)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer func() {\n\t\tif err := rc.Close(); err != nil {\n\t\t\ts.logger.Error(\"failed to close object reader\")\n\t\t}\n\t}()\n\n\tcontent, err := ioutil.ReadAll(rc)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tobject.Path = path\n\tobject.Content = content\n\tobject.Size = int64(len(content))\n\treturn\n}\n\nfunc (s *Store) PutObject(ctx context.Context, path string, content []byte) error {\n\twc := s.client.Bucket(s.bucket).Object(path).NewWriter(ctx)\n\tif _, err := wc.Write(content); err != nil {\n\t\twc.Close()\n\t\treturn err\n\t}\n\tif err := wc.Close(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (s *Store) ListObjects(ctx context.Context, prefix string) ([]filestore.Object, error) {\n\tvar objects []filestore.Object\n\tquery := &storage.Query{\n\t\tPrefix: prefix,\n\t}\n\tit := s.client.Bucket(s.bucket).Objects(ctx, query)\n\tfor {\n\t\tattrs, err := it.Next()\n\t\tif err == iterator.Done {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\ts.logger.Error(\"failed to iterate to the next object\",\n\t\t\t\tzap.String(\"prefix\", prefix),\n\t\t\t\tzap.Error(err),\n\t\t\t)\n\t\t\treturn nil, err\n\t\t}\n\t\tobject := filestore.Object{\n\t\t\tPath: attrs.Name,\n\t\t\tSize: attrs.Size,\n\t\t\tContent: []byte{},\n\t\t}\n\t\tobjects = append(objects, object)\n\t}\n\treturn objects, nil\n}\n\nfunc (s *Store) Close() error {\n\treturn s.client.Close()\n}\n","lang_cluster":"Go","diff_tag":0,"review_comment":"","id":166} {"diff_hunk":"@@ -1,3 +1,4 @@\n+\/\/go:build cgo\n \/\/ +build cgo\n \n package sqlstore","source_code":"\/\/ +build cgo\n\npackage sqlstore\n\nimport (\n\t\"errors\"\n\t\"net\/url\"\n\n\t\"github.com\/jinzhu\/gorm\"\n\t\"github.com\/mattn\/go-sqlite3\"\n\t\"github.com\/sirupsen\/logrus\"\n\n\t\/\/ gorm sqlite dialect init registration\n\t_ \"github.com\/jinzhu\/gorm\/dialects\/sqlite\"\n)\n\ntype sqliteDB struct {\n\tlog logrus.FieldLogger\n}\n\nfunc (s sqliteDB) connect(cfg *configuration, isReadOnly bool) (db *gorm.DB, version string, supportsCTE bool, err error) {\n\tif isReadOnly {\n\t\ts.log.Warn(\"Read-only connection is not applicable for sqlite3. Falling back to primary connection\")\n\t}\n\n\tdb, err = openSQLite3(cfg.ConnectionString)\n\tif err != nil {\n\t\treturn nil, \"\", false, err\n\t}\n\n\tversion, err = queryVersion(db, \"SELECT sqlite_version()\")\n\tif err != nil {\n\t\treturn nil, \"\", false, err\n\t}\n\n\t\/\/ The embedded version of SQLite3 unconditionally supports CTE.\n\treturn db, version, true, nil\n}\n\nfunc (s sqliteDB) isConstraintViolation(err error) bool {\n\tif err == nil {\n\t\treturn false\n\t}\n\tvar e sqlite3.Error\n\tok := errors.As(err, &e)\n\treturn ok && e.Code == sqlite3.ErrConstraint\n}\n\nfunc openSQLite3(connString string) (*gorm.DB, error) {\n\tembellished, err := embellishSQLite3ConnString(connString)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdb, err := gorm.Open(\"sqlite3\", embellished)\n\tif err != nil {\n\t\treturn nil, sqlError.Wrap(err)\n\t}\n\treturn db, nil\n}\n\n\/\/ embellishSQLite3ConnString adds query values supported by\n\/\/ github.com\/mattn\/go-sqlite3 to enable journal mode and foreign key support.\n\/\/ These query values MUST be part of the connection string in order to be\n\/\/ enabled for *each* connection opened by db\/sql. If the connection string is\n\/\/ not already a file: URI, it is converted first.\nfunc embellishSQLite3ConnString(connectionString string) (string, error) {\n\tu, err := url.Parse(connectionString)\n\tif err != nil {\n\t\treturn \"\", sqlError.Wrap(err)\n\t}\n\n\tswitch {\n\tcase u.Scheme == \"\":\n\t\t\/\/ connection string is a path. move the path section into the\n\t\t\/\/ opaque section so it renders property for sqlite3, for example:\n\t\t\/\/ data.db = file:data.db\n\t\t\/\/ .\/data.db = file:.\/data.db\n\t\t\/\/ \/data.db = file:\/data.db\n\t\tu.Scheme = \"file\"\n\t\tu.Opaque, u.Path = u.Path, \"\"\n\tcase u.Scheme != \"file\":\n\t\t\/\/ only no scheme (i.e. file path) or file scheme is supported\n\t\treturn \"\", sqlError.New(\"unsupported scheme %q\", u.Scheme)\n\t}\n\n\tq := u.Query()\n\tq.Set(\"_foreign_keys\", \"ON\")\n\tq.Set(\"_journal_mode\", \"WAL\")\n\tu.RawQuery = q.Encode()\n\treturn u.String(), nil\n}\n","lang_cluster":"Go","diff_tag":0,"review_comment":"","id":167} {"diff_hunk":"@@ -5,6 +5,14 @@\n \/\/ This file defines service deployment resources.\n package deploy\n \n+import (\n+\t\"fmt\"\n+\t\"strings\"\n+)\n+\n+\/\/ FmtTaskECRRepoName is the pattern used to generate the ECR repository's name\n+const FmtTaskECRRepoName = \"copilot-%s\"\n+\n \/\/ CreateTaskResourcesInput holds the fields required to create a task stack.\n type CreateTaskResourcesInput struct {\n \tName string","source_code":"\/\/ Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\/\/ SPDX-License-Identifier: Apache-2.0\n\n\/\/ Package deploy holds the structures to deploy infrastructure resources.\n\/\/ This file defines service deployment resources.\npackage deploy\n\n\/\/ CreateTaskResourcesInput holds the fields required to create a task stack.\ntype CreateTaskResourcesInput struct {\n\tName string\n\tCPU int\n\tMemory int\n\n\tImage string\n\tTaskRole string\n\tExecutionRole string\n\tCommand []string\n\tEnvVars map[string]string\n\n\tApp string\n\tEnv string\n\n\tAdditionalTags map[string]string\n}\n","lang_cluster":"Go","diff_tag":0,"review_comment":"","id":168} {"diff_hunk":"@@ -1,12 +1,14 @@\n package action\n \n import (\n+\t\"math\/big\"\n \t\"testing\"\n \n \t\"github.com\/iotexproject\/iotex-proto\/golang\/iotextypes\"\n \t\"github.com\/stretchr\/testify\/require\"\n \n \t\"github.com\/iotexproject\/iotex-core\/pkg\/unit\"\n+\t\"github.com\/iotexproject\/iotex-core\/state\"\n \t\"github.com\/iotexproject\/iotex-core\/test\/identityset\"\n )\n ","source_code":"package action\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/iotexproject\/iotex-proto\/golang\/iotextypes\"\n\t\"github.com\/stretchr\/testify\/require\"\n\n\t\"github.com\/iotexproject\/iotex-core\/pkg\/unit\"\n\t\"github.com\/iotexproject\/iotex-core\/test\/identityset\"\n)\n\nfunc TestEnvelope_Basic(t *testing.T) {\n\treq := require.New(t)\n\tevlp, tsf := createEnvelope()\n\treq.Equal(uint32(1), evlp.Version())\n\treq.Equal(uint64(10), evlp.Nonce())\n\treq.Equal(uint64(20010), evlp.GasLimit())\n\treq.Equal(\"11000000000000000000\", evlp.GasPrice().String())\n\tc, err := evlp.Cost()\n\treq.NoError(err)\n\treq.Equal(\"111010000000000000000000\", c.String())\n\tg, err := evlp.IntrinsicGas()\n\treq.NoError(err)\n\treq.Equal(uint64(10000), g)\n\td, ok := evlp.Destination()\n\treq.True(ok)\n\treq.Equal(\"io1jh0ekmccywfkmj7e8qsuzsupnlk3w5337hjjg2\", d)\n\ttsf2, ok := evlp.Action().(*Transfer)\n\treq.True(ok)\n\treq.Equal(tsf, tsf2)\n}\nfunc TestEnvelope_Proto(t *testing.T) {\n\treq := require.New(t)\n\teb, tsf := createEnvelope()\n\tevlp, ok := eb.(*envelope)\n\treq.True(ok)\n\n\tproto := evlp.Proto()\n\tactCore := &iotextypes.ActionCore{\n\t\tVersion: evlp.version,\n\t\tNonce: evlp.nonce,\n\t\tGasLimit: evlp.gasLimit,\n\t}\n\tactCore.GasPrice = evlp.gasPrice.String()\n\tactCore.Action = &iotextypes.ActionCore_Transfer{Transfer: tsf.Proto()}\n\treq.Equal(actCore, proto)\n\n\treq.NoError(evlp.LoadProto(proto))\n\ttsf2, ok := evlp.Action().(*Transfer)\n\treq.True(ok)\n\treq.Equal(tsf.amount, tsf2.amount)\n\treq.Equal(tsf.recipient, tsf2.recipient)\n\treq.Equal(tsf.payload, tsf2.payload)\n}\n\nfunc createEnvelope() (Envelope, *Transfer) {\n\ttsf, _ := NewTransfer(\n\t\tuint64(10),\n\t\tunit.ConvertIotxToRau(1000+int64(10)),\n\t\tidentityset.Address(10%identityset.Size()).String(),\n\t\tnil,\n\t\t20000+uint64(10),\n\t\tunit.ConvertIotxToRau(1+int64(10)),\n\t)\n\teb := EnvelopeBuilder{}\n\tevlp := eb.\n\t\tSetAction(tsf).\n\t\tSetGasLimit(tsf.GasLimit()).\n\t\tSetGasPrice(tsf.GasPrice()).\n\t\tSetNonce(tsf.Nonce()).\n\t\tSetVersion(1).\n\t\tBuild()\n\treturn evlp, tsf\n}\n","lang_cluster":"Go","diff_tag":0,"review_comment":"","id":169} {"diff_hunk":"@@ -35,7 +35,7 @@ func (b *builder) createGoInstruction(funcPtr llvm.Value, params []llvm.Value, p\n \t\t\t\/\/ The stack size is fixed at compile time. By emitting it here as a\n \t\t\t\/\/ constant, it can be optimized.\n \t\t\tif b.Scheduler == \"tasks\" && b.DefaultStackSize == 0 {\n-\t\t\t\tb.addError(pos, \"default stack size for goroutines is not set\")\n+\t\t\t\tb.addError(instr.Pos(), \"default stack size for goroutines is not set\")\n \t\t\t}\n \t\t\tstackSize = llvm.ConstInt(b.uintptrType, b.DefaultStackSize, false)\n \t\t}","source_code":"package compiler\n\n\/\/ This file implements the 'go' keyword to start a new goroutine. See\n\/\/ goroutine-lowering.go for more details.\n\nimport (\n\t\"go\/token\"\n\n\t\"github.com\/tinygo-org\/tinygo\/compiler\/llvmutil\"\n\t\"golang.org\/x\/tools\/go\/ssa\"\n\t\"tinygo.org\/x\/go-llvm\"\n)\n\n\/\/ createGoInstruction starts a new goroutine with the provided function pointer\n\/\/ and parameters.\n\/\/ In general, you should pass all regular parameters plus the context parameter.\n\/\/ There is one exception: the task-based scheduler needs to have the function\n\/\/ pointer passed in as a parameter too in addition to the context.\n\/\/\n\/\/ Because a go statement doesn't return anything, return undef.\nfunc (b *builder) createGoInstruction(funcPtr llvm.Value, params []llvm.Value, prefix string, pos token.Pos) llvm.Value {\n\tparamBundle := b.emitPointerPack(params)\n\tvar callee, stackSize llvm.Value\n\tswitch b.Scheduler {\n\tcase \"none\", \"tasks\":\n\t\tcallee = b.createGoroutineStartWrapper(funcPtr, prefix, pos)\n\t\tif b.AutomaticStackSize {\n\t\t\t\/\/ The stack size is not known until after linking. Call a dummy\n\t\t\t\/\/ function that will be replaced with a load from a special ELF\n\t\t\t\/\/ section that contains the stack size (and is modified after\n\t\t\t\/\/ linking).\n\t\t\tstackSizeFn := b.getFunction(b.program.ImportedPackage(\"internal\/task\").Members[\"getGoroutineStackSize\"].(*ssa.Function))\n\t\t\tstackSize = b.createCall(stackSizeFn, []llvm.Value{callee, llvm.Undef(b.i8ptrType), llvm.Undef(b.i8ptrType)}, \"stacksize\")\n\t\t} else {\n\t\t\t\/\/ The stack size is fixed at compile time. By emitting it here as a\n\t\t\t\/\/ constant, it can be optimized.\n\t\t\tif b.Scheduler == \"tasks\" && b.DefaultStackSize == 0 {\n\t\t\t\tb.addError(pos, \"default stack size for goroutines is not set\")\n\t\t\t}\n\t\t\tstackSize = llvm.ConstInt(b.uintptrType, b.DefaultStackSize, false)\n\t\t}\n\tcase \"coroutines\":\n\t\tcallee = b.CreatePtrToInt(funcPtr, b.uintptrType, \"\")\n\t\t\/\/ There is no goroutine stack size: coroutines are used instead of\n\t\t\/\/ stacks.\n\t\tstackSize = llvm.Undef(b.uintptrType)\n\tdefault:\n\t\tpanic(\"unreachable\")\n\t}\n\tstart := b.getFunction(b.program.ImportedPackage(\"internal\/task\").Members[\"start\"].(*ssa.Function))\n\tb.createCall(start, []llvm.Value{callee, paramBundle, stackSize, llvm.Undef(b.i8ptrType), llvm.ConstPointerNull(b.i8ptrType)}, \"\")\n\treturn llvm.Undef(funcPtr.Type().ElementType().ReturnType())\n}\n\n\/\/ createGoroutineStartWrapper creates a wrapper for the task-based\n\/\/ implementation of goroutines. For example, to call a function like this:\n\/\/\n\/\/ func add(x, y int) int { ... }\n\/\/\n\/\/ It creates a wrapper like this:\n\/\/\n\/\/ func add$gowrapper(ptr *unsafe.Pointer) {\n\/\/ args := (*struct{\n\/\/ x, y int\n\/\/ })(ptr)\n\/\/ add(args.x, args.y)\n\/\/ }\n\/\/\n\/\/ This is useful because the task-based goroutine start implementation only\n\/\/ allows a single (pointer) argument to the newly started goroutine. Also, it\n\/\/ ignores the return value because newly started goroutines do not have a\n\/\/ return value.\nfunc (c *compilerContext) createGoroutineStartWrapper(fn llvm.Value, prefix string, pos token.Pos) llvm.Value {\n\tvar wrapper llvm.Value\n\n\tbuilder := c.ctx.NewBuilder()\n\tdefer builder.Dispose()\n\n\tif !fn.IsAFunction().IsNil() {\n\t\t\/\/ See whether this wrapper has already been created. If so, return it.\n\t\tname := fn.Name()\n\t\twrapper = c.mod.NamedFunction(name + \"$gowrapper\")\n\t\tif !wrapper.IsNil() {\n\t\t\treturn llvm.ConstPtrToInt(wrapper, c.uintptrType)\n\t\t}\n\n\t\t\/\/ Create the wrapper.\n\t\twrapperType := llvm.FunctionType(c.ctx.VoidType(), []llvm.Type{c.i8ptrType}, false)\n\t\twrapper = llvm.AddFunction(c.mod, name+\"$gowrapper\", wrapperType)\n\t\twrapper.SetLinkage(llvm.LinkOnceODRLinkage)\n\t\twrapper.SetUnnamedAddr(true)\n\t\twrapper.AddAttributeAtIndex(-1, c.ctx.CreateStringAttribute(\"tinygo-gowrapper\", name))\n\t\tentry := c.ctx.AddBasicBlock(wrapper, \"entry\")\n\t\tbuilder.SetInsertPointAtEnd(entry)\n\n\t\tif c.Debug {\n\t\t\tpos := c.program.Fset.Position(pos)\n\t\t\tdiFuncType := c.dibuilder.CreateSubroutineType(llvm.DISubroutineType{\n\t\t\t\tFile: c.getDIFile(pos.Filename),\n\t\t\t\tParameters: nil, \/\/ do not show parameters in debugger\n\t\t\t\tFlags: 0, \/\/ ?\n\t\t\t})\n\t\t\tdifunc := c.dibuilder.CreateFunction(c.getDIFile(pos.Filename), llvm.DIFunction{\n\t\t\t\tName: \"\",\n\t\t\t\tFile: c.getDIFile(pos.Filename),\n\t\t\t\tLine: pos.Line,\n\t\t\t\tType: diFuncType,\n\t\t\t\tLocalToUnit: true,\n\t\t\t\tIsDefinition: true,\n\t\t\t\tScopeLine: 0,\n\t\t\t\tFlags: llvm.FlagPrototyped,\n\t\t\t\tOptimized: true,\n\t\t\t})\n\t\t\twrapper.SetSubprogram(difunc)\n\t\t\tbuilder.SetCurrentDebugLocation(uint(pos.Line), uint(pos.Column), difunc, llvm.Metadata{})\n\t\t}\n\n\t\t\/\/ Create the list of params for the call.\n\t\tparamTypes := fn.Type().ElementType().ParamTypes()\n\t\tparams := llvmutil.EmitPointerUnpack(builder, c.mod, wrapper.Param(0), paramTypes[:len(paramTypes)-1])\n\t\tparams = append(params, llvm.Undef(c.i8ptrType))\n\n\t\t\/\/ Create the call.\n\t\tbuilder.CreateCall(fn, params, \"\")\n\n\t} else {\n\t\t\/\/ For a function pointer like this:\n\t\t\/\/\n\t\t\/\/ var funcPtr func(x, y int) int\n\t\t\/\/\n\t\t\/\/ A wrapper like the following is created:\n\t\t\/\/\n\t\t\/\/ func .gowrapper(ptr *unsafe.Pointer) {\n\t\t\/\/ args := (*struct{\n\t\t\/\/ x, y int\n\t\t\/\/ fn func(x, y int) int\n\t\t\/\/ })(ptr)\n\t\t\/\/ args.fn(x, y)\n\t\t\/\/ }\n\t\t\/\/\n\t\t\/\/ With a bit of luck, identical wrapper functions like these can be\n\t\t\/\/ merged into one.\n\n\t\t\/\/ Create the wrapper.\n\t\twrapperType := llvm.FunctionType(c.ctx.VoidType(), []llvm.Type{c.i8ptrType}, false)\n\t\twrapper = llvm.AddFunction(c.mod, prefix+\".gowrapper\", wrapperType)\n\t\twrapper.SetLinkage(llvm.LinkOnceODRLinkage)\n\t\twrapper.SetUnnamedAddr(true)\n\t\twrapper.AddAttributeAtIndex(-1, c.ctx.CreateStringAttribute(\"tinygo-gowrapper\", \"\"))\n\t\tentry := c.ctx.AddBasicBlock(wrapper, \"entry\")\n\t\tbuilder.SetInsertPointAtEnd(entry)\n\n\t\tif c.Debug {\n\t\t\tpos := c.program.Fset.Position(pos)\n\t\t\tdiFuncType := c.dibuilder.CreateSubroutineType(llvm.DISubroutineType{\n\t\t\t\tFile: c.getDIFile(pos.Filename),\n\t\t\t\tParameters: nil, \/\/ do not show parameters in debugger\n\t\t\t\tFlags: 0, \/\/ ?\n\t\t\t})\n\t\t\tdifunc := c.dibuilder.CreateFunction(c.getDIFile(pos.Filename), llvm.DIFunction{\n\t\t\t\tName: \"\",\n\t\t\t\tFile: c.getDIFile(pos.Filename),\n\t\t\t\tLine: pos.Line,\n\t\t\t\tType: diFuncType,\n\t\t\t\tLocalToUnit: true,\n\t\t\t\tIsDefinition: true,\n\t\t\t\tScopeLine: 0,\n\t\t\t\tFlags: llvm.FlagPrototyped,\n\t\t\t\tOptimized: true,\n\t\t\t})\n\t\t\twrapper.SetSubprogram(difunc)\n\t\t\tbuilder.SetCurrentDebugLocation(uint(pos.Line), uint(pos.Column), difunc, llvm.Metadata{})\n\t\t}\n\n\t\t\/\/ Get the list of parameters, with the extra parameters at the end.\n\t\tparamTypes := fn.Type().ElementType().ParamTypes()\n\t\tparamTypes[len(paramTypes)-1] = fn.Type() \/\/ the last element is the function pointer\n\t\tparams := llvmutil.EmitPointerUnpack(builder, c.mod, wrapper.Param(0), paramTypes)\n\n\t\t\/\/ Get the function pointer.\n\t\tfnPtr := params[len(params)-1]\n\n\t\t\/\/ Ignore the last param, which isn't used anymore.\n\t\t\/\/ TODO: avoid this extra \"parent handle\" parameter in most functions.\n\t\tparams[len(params)-1] = llvm.Undef(c.i8ptrType)\n\n\t\t\/\/ Create the call.\n\t\tbuilder.CreateCall(fnPtr, params, \"\")\n\t}\n\n\t\/\/ Finish the function. Every basic block must end in a terminator, and\n\t\/\/ because goroutines never return a value we can simply return void.\n\tbuilder.CreateRetVoid()\n\n\t\/\/ Return a ptrtoint of the wrapper, not the function itself.\n\treturn builder.CreatePtrToInt(wrapper, c.uintptrType, \"\")\n}\n","lang_cluster":"Go","diff_tag":0,"review_comment":"","id":170} {"diff_hunk":"@@ -50,6 +50,7 @@ var (\n \t\tName: \"agreed-terms-and-conditions\",\n \t\tUsage: \"Agree with terms & conditions\",\n \t}\n+\n \t\/\/ FlagAccessPolicyAddress Trust oracle URL for retrieving access policies.\n \tFlagAccessPolicyAddress = cli.StringFlag{\n \t\tName: \"access-policy.address\",","source_code":"\/*\n * Copyright (C) 2019 The \"MysteriumNetwork\/node\" Authors.\n *\n * This program is free software: you can redistribute it and\/or modify\n * it under the terms of the GNU General Public License as published by\n * the Free Software Foundation, either version 3 of the License, or\n * (at your option) any later version.\n *\n * This program is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n * GNU General Public License for more details.\n *\n * You should have received a copy of the GNU General Public License\n * along with this program. If not, see .\n *\/\n\npackage config\n\nimport (\n\t\"time\"\n\n\t\"github.com\/mysteriumnetwork\/node\/metadata\"\n\t\"github.com\/urfave\/cli\/v2\"\n)\n\n\/\/ ServicesOptions describes options shared among multiple services\ntype ServicesOptions struct {\n\tAccessPolicyAddress string\n\tAccessPolicyList []string\n\tAccessPolicyFetchInterval time.Duration\n\tShaperEnabled bool\n}\n\nvar (\n\t\/\/ FlagIdentity keystore's identity.\n\tFlagIdentity = cli.StringFlag{\n\t\tName: \"identity\",\n\t\tUsage: \"Keystore's identity used to provide service. If not given identity will be created automatically\",\n\t\tValue: \"\",\n\t}\n\t\/\/ FlagIdentityPassphrase passphrase to unlock the identity.\n\tFlagIdentityPassphrase = cli.StringFlag{\n\t\tName: \"identity.passphrase\",\n\t\tUsage: \"Used to unlock keystore's identity\",\n\t\tValue: \"\",\n\t}\n\t\/\/ FlagAgreedTermsConditions agree with terms & conditions.\n\tFlagAgreedTermsConditions = cli.BoolFlag{\n\t\tName: \"agreed-terms-and-conditions\",\n\t\tUsage: \"Agree with terms & conditions\",\n\t}\n\t\/\/ FlagAccessPolicyAddress Trust oracle URL for retrieving access policies.\n\tFlagAccessPolicyAddress = cli.StringFlag{\n\t\tName: \"access-policy.address\",\n\t\tUsage: \"URL of trust oracle endpoint for retrieving lists of access policies\",\n\t\tValue: metadata.DefaultNetwork.AccessPolicyOracleAddress,\n\t}\n\t\/\/ FlagAccessPolicyList a comma-separated list of access policies that determines allowed identities to use the service.\n\tFlagAccessPolicyList = cli.StringFlag{\n\t\tName: \"access-policy.list\",\n\t\tUsage: \"Comma separated list that determines the access policies applied to provide service.\",\n\t\tValue: \"\",\n\t}\n\t\/\/ FlagAccessPolicyFetchInterval policy list fetch interval.\n\tFlagAccessPolicyFetchInterval = cli.DurationFlag{\n\t\tName: \"access-policy.fetch\",\n\t\tUsage: `Proposal fetch interval { \"30s\", \"3m\", \"1h20m30s\" }`,\n\t\tValue: 10 * time.Minute,\n\t}\n\t\/\/ FlagShaperEnabled enables bandwidth limitation.\n\tFlagShaperEnabled = cli.BoolFlag{\n\t\tName: \"shaper.enabled\",\n\t\tUsage: \"Limit service bandwidth\",\n\t}\n\t\/\/ FlagNoopPriceMinute sets the price per minute for provided noop service.\n\tFlagNoopPriceMinute = cli.Float64Flag{\n\t\tName: \"noop.price-minute\",\n\t\tUsage: \"Sets the price of the noop service per minute.\",\n\t\tValue: 0.0001,\n\t\tHidden: true,\n\t}\n)\n\n\/\/ RegisterFlagsServiceShared registers shared service CLI flags\nfunc RegisterFlagsServiceShared(flags *[]cli.Flag) {\n\t*flags = append(*flags,\n\t\t&FlagIdentity,\n\t\t&FlagIdentityPassphrase,\n\t\t&FlagAgreedTermsConditions,\n\t\t&FlagAccessPolicyAddress,\n\t\t&FlagAccessPolicyList,\n\t\t&FlagAccessPolicyFetchInterval,\n\t\t&FlagShaperEnabled,\n\t\t&FlagNoopPriceMinute,\n\t)\n}\n\n\/\/ ParseFlagsServiceShared parses shared service CLI flags and registers values to the configuration\nfunc ParseFlagsServiceShared(ctx *cli.Context) {\n\tCurrent.ParseStringFlag(ctx, FlagIdentity)\n\tCurrent.ParseStringFlag(ctx, FlagIdentityPassphrase)\n\tCurrent.ParseBoolFlag(ctx, FlagAgreedTermsConditions)\n\tCurrent.ParseStringFlag(ctx, FlagAccessPolicyAddress)\n\tCurrent.ParseStringFlag(ctx, FlagAccessPolicyList)\n\tCurrent.ParseDurationFlag(ctx, FlagAccessPolicyFetchInterval)\n\tCurrent.ParseBoolFlag(ctx, FlagShaperEnabled)\n\tCurrent.ParseFloat64Flag(ctx, FlagNoopPriceMinute)\n}\n","lang_cluster":"Go","diff_tag":0,"review_comment":"","id":171} {"diff_hunk":"@@ -22,6 +22,14 @@ type Bounds struct {\n \tindex int\n }\n \n+\/\/ NewBounds create a new Bounds given start and stop values\n+func NewBounds(start, stop values.Time) Bounds {\n+\treturn Bounds{\n+\t\tstart: start,\n+\t\tstop: stop,\n+\t}\n+}\n+\n func (b Bounds) Start() values.Time {\n \treturn b.start\n }","source_code":"package interval\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\n\t\"github.com\/influxdata\/flux\/values\"\n)\n\nconst (\n\tMaxTime = math.MaxInt64\n\tMinTime = math.MinInt64\n)\n\ntype Bounds struct {\n\tstart values.Time\n\tstop values.Time\n\t\/\/ index keeps track of how many windows have been added or subtracted as additional\n\t\/\/ windows are added to or subtracted from the initial bounds. In essence, it tracks the\n\t\/\/ offset from the original bounds in order to keep operations more straightforward.\n\t\/\/ See the Window struct and the window tests for additional info.\n\tindex int\n}\n\nfunc (b Bounds) Start() values.Time {\n\treturn b.start\n}\n\nfunc (b Bounds) Stop() values.Time {\n\treturn b.stop\n}\n\nfunc (b Bounds) IsEmpty() bool {\n\treturn b.start >= b.stop\n}\n\nfunc (b Bounds) String() string {\n\treturn fmt.Sprintf(\"[%v, %v)\", b.start, b.stop)\n}\n\nfunc (b Bounds) Contains(t values.Time) bool {\n\treturn t >= b.start && t < b.stop\n}\n\nfunc (b Bounds) Overlaps(o Bounds) bool {\n\treturn b.Contains(o.start) || (b.Contains(o.stop) && o.stop > b.start) || o.Contains(b.start)\n}\n\nfunc (b Bounds) Equal(o Bounds) bool {\n\treturn b == o\n}\n\nfunc (b Bounds) Length() values.Duration {\n\tif b.IsEmpty() {\n\t\treturn values.ConvertDurationNsecs(0)\n\t}\n\treturn b.stop.Sub(b.start)\n}\n","lang_cluster":"Go","diff_tag":0,"review_comment":"","id":172} {"diff_hunk":"@@ -5,7 +5,6 @@ import (\n \t\"crypto\/tls\"\n \t\"fmt\"\n \t\"net\"\n-\t\"sync\"\n \t\"time\"\n \n \t\"github.com\/ethereum\/go-ethereum\/common\"","source_code":"package locator\n\nimport (\n\t\"crypto\/ecdsa\"\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"net\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/ethereum\/go-ethereum\/common\"\n\tlog \"github.com\/noxiouz\/zapctx\/ctxlog\"\n\t\"github.com\/pkg\/errors\"\n\tpb \"github.com\/sonm-io\/core\/proto\"\n\t\"github.com\/sonm-io\/core\/util\"\n\t\"go.uber.org\/zap\"\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/grpc\"\n\t\"google.golang.org\/grpc\/codes\"\n\t\"google.golang.org\/grpc\/credentials\"\n\t\"google.golang.org\/grpc\/peer\"\n\t\"google.golang.org\/grpc\/status\"\n)\n\nvar errNodeNotFound = errors.New(\"node with given Eth address cannot be found\")\n\ntype node struct {\n\tethAddr common.Address\n\tipAddr []string\n\tts time.Time\n}\n\ntype Locator struct {\n\tmx sync.Mutex\n\n\tconf *LocatorConfig\n\tdb map[common.Address]*node\n\tctx context.Context\n\tethKey *ecdsa.PrivateKey\n\tgrpc *grpc.Server\n\tcertRotator util.HitlessCertRotator\n\tcreds credentials.TransportCredentials\n}\n\nfunc (l *Locator) Announce(ctx context.Context, req *pb.AnnounceRequest) (*pb.Empty, error) {\n\tethAddr, err := l.extractEthAddr(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlog.G(l.ctx).Info(\"handling Announce request\",\n\t\tzap.Stringer(\"eth\", ethAddr), zap.Strings(\"ips\", req.IpAddr))\n\n\tl.putAnnounce(&node{\n\t\tethAddr: ethAddr,\n\t\tipAddr: req.IpAddr,\n\t})\n\n\treturn &pb.Empty{}, nil\n}\n\nfunc (l *Locator) Resolve(ctx context.Context, req *pb.ResolveRequest) (*pb.ResolveReply, error) {\n\tlog.G(l.ctx).Info(\"handling Resolve request\", zap.String(\"eth\", req.EthAddr))\n\n\tif !common.IsHexAddress(req.EthAddr) {\n\t\treturn nil, fmt.Errorf(\"invalid ethaddress %s\", req.EthAddr)\n\t}\n\n\tn, err := l.getResolve(common.HexToAddress(req.EthAddr))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &pb.ResolveReply{IpAddr: n.ipAddr}, nil\n}\n\nfunc (l *Locator) Serve() error {\n\tlis, err := net.Listen(\"tcp\", l.conf.ListenAddr)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn l.grpc.Serve(lis)\n}\n\nfunc (l *Locator) extractEthAddr(ctx context.Context) (common.Address, error) {\n\tpr, ok := peer.FromContext(ctx)\n\tif !ok {\n\t\treturn common.Address{}, status.Error(codes.DataLoss, \"failed to get peer from ctx\")\n\t}\n\n\tswitch info := pr.AuthInfo.(type) {\n\tcase util.EthAuthInfo:\n\t\treturn info.Wallet, nil\n\tdefault:\n\t\treturn common.Address{}, status.Error(codes.Unauthenticated, \"wrong AuthInfo type\")\n\t}\n}\n\nfunc (l *Locator) putAnnounce(n *node) {\n\tl.mx.Lock()\n\tdefer l.mx.Unlock()\n\n\tn.ts = time.Now()\n\tl.db[n.ethAddr] = n\n}\n\nfunc (l *Locator) getResolve(ethAddr common.Address) (*node, error) {\n\tl.mx.Lock()\n\tdefer l.mx.Unlock()\n\n\tn, ok := l.db[ethAddr]\n\tif !ok {\n\t\treturn nil, errNodeNotFound\n\t}\n\n\treturn n, nil\n}\n\nfunc (l *Locator) cleanExpiredNodes() {\n\tt := time.NewTicker(l.conf.CleanupPeriod)\n\tdefer t.Stop()\n\n\tfor {\n\t\tselect {\n\t\tcase <-t.C:\n\t\t\tl.traverseAndClean()\n\t\t}\n\t}\n}\n\nfunc (l *Locator) traverseAndClean() {\n\tdeadline := time.Now().Add(-1 * l.conf.NodeTTL)\n\n\tl.mx.Lock()\n\tdefer l.mx.Unlock()\n\n\tvar (\n\t\ttotal = len(l.db)\n\t\tdel uint64\n\t\tkeep uint64\n\t)\n\tfor addr, node := range l.db {\n\t\tif node.ts.Before(deadline) {\n\t\t\tdelete(l.db, addr)\n\t\t\tdel++\n\t\t} else {\n\t\t\tkeep++\n\t\t}\n\t}\n\n\tlog.G(l.ctx).Debug(\"expired nodes cleaned\",\n\t\tzap.Int(\"total\", total), zap.Uint64(\"keep\", keep), zap.Uint64(\"del\", del))\n}\n\nfunc NewLocator(ctx context.Context, conf *LocatorConfig, key *ecdsa.PrivateKey) (l *Locator, err error) {\n\tif key == nil {\n\t\treturn nil, errors.Wrap(err, \"private key should be provided\")\n\t}\n\n\tl = &Locator{\n\t\tdb: make(map[common.Address]*node),\n\t\tconf: conf,\n\t\tctx: ctx,\n\t\tethKey: key,\n\t}\n\n\tvar TLSConfig *tls.Config\n\tl.certRotator, TLSConfig, err = util.NewHitlessCertRotator(ctx, l.ethKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tl.creds = util.NewTLS(TLSConfig)\n\tsrv := util.MakeGrpcServer(l.creds)\n\tl.grpc = srv\n\n\tgo l.cleanExpiredNodes()\n\n\tpb.RegisterLocatorServer(srv, l)\n\n\treturn l, nil\n}\n","lang_cluster":"Go","diff_tag":0,"review_comment":"","id":173} {"diff_hunk":"@@ -5,6 +5,7 @@ import (\n \n \t\"google.golang.org\/grpc\/codes\"\n \t\"google.golang.org\/grpc\/status\"\n+\t\"google.golang.org\/protobuf\/types\/known\/structpb\"\n \tv1 \"k8s.io\/api\/core\/v1\"\n \tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n ","source_code":"package k8s\n\nimport (\n\t\"context\"\n\n\t\"google.golang.org\/grpc\/codes\"\n\t\"google.golang.org\/grpc\/status\"\n\tv1 \"k8s.io\/api\/core\/v1\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\n\tk8sapiv1 \"github.com\/lyft\/clutch\/backend\/api\/k8s\/v1\"\n)\n\nfunc (s *svc) DescribeConfigMap(ctx context.Context, clientset, cluster, namespace, name string) (*k8sapiv1.ConfigMap, error) {\n\tcs, err := s.manager.GetK8sClientset(ctx, clientset, cluster, namespace)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconfigMapList, err := cs.CoreV1().ConfigMaps(cs.Namespace()).List(ctx, metav1.ListOptions{\n\t\tFieldSelector: \"metadata.name=\" + name,\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(configMapList.Items) == 1 {\n\t\treturn protoForConfigMap(cs.Cluster(), &configMapList.Items[0]), nil\n\t} else if len(configMapList.Items) > 1 {\n\t\treturn nil, status.Errorf(codes.FailedPrecondition, \"located multiple config maps with name '%s'\", name)\n\t}\n\treturn nil, status.Error(codes.NotFound, \"unable to locate specified config map\")\n}\n\nfunc (s *svc) DeleteConfigMap(ctx context.Context, clientset, cluster, namespace, name string) error {\n\tcs, err := s.manager.GetK8sClientset(ctx, clientset, cluster, namespace)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\topts := metav1.DeleteOptions{}\n\n\treturn cs.CoreV1().ConfigMaps(cs.Namespace()).Delete(ctx, name, opts)\n}\n\nfunc (s *svc) ListConfigMaps(ctx context.Context, clientset, cluster, namespace string, listOptions *k8sapiv1.ListOptions) ([]*k8sapiv1.ConfigMap, error) {\n\tcs, err := s.manager.GetK8sClientset(ctx, clientset, cluster, namespace)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\topts := ApplyListOptions(listOptions)\n\n\tconfigMapList, err := cs.CoreV1().ConfigMaps(cs.Namespace()).List(ctx, opts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar configMaps []*k8sapiv1.ConfigMap\n\tfor _, cm := range configMapList.Items {\n\t\tconfigMap := cm\n\t\tconfigMaps = append(configMaps, protoForConfigMap(cs.Cluster(), &configMap))\n\t}\n\n\treturn configMaps, nil\n}\n\nfunc protoForConfigMap(cluster string, k8sconfigMap *v1.ConfigMap) *k8sapiv1.ConfigMap {\n\tclusterName := k8sconfigMap.ClusterName\n\tif clusterName == \"\" {\n\t\tclusterName = cluster\n\t}\n\n\treturn &k8sapiv1.ConfigMap{\n\t\tCluster: clusterName,\n\t\tNamespace: k8sconfigMap.Namespace,\n\t\tName: k8sconfigMap.Name,\n\t\tLabels: k8sconfigMap.Labels,\n\t\tAnnotations: k8sconfigMap.Annotations,\n\t}\n}\n","lang_cluster":"Go","diff_tag":0,"review_comment":"","id":174} {"diff_hunk":"@@ -2,6 +2,7 @@ package cfg\n \n import (\n \t\"github.com\/filecoin-project\/go-filecoin\/repo\"\n+\t\"github.com\/pkg\/errors\"\n \t\"sync\"\n )\n ","source_code":"package cfg\n\nimport (\n\t\"github.com\/filecoin-project\/go-filecoin\/repo\"\n\t\"sync\"\n)\n\n\/\/ Config is plumbing implementation for setting and retrieving values from local config.\ntype Config struct {\n\trepo repo.Repo\n\tlock sync.Mutex\n}\n\n\/\/ NewConfig returns a new Config.\nfunc NewConfig(repo repo.Repo) *Config {\n\treturn &Config{repo: repo}\n}\n\n\/\/ Set sets a value in config\nfunc (s *Config) Set(dottedKey string, jsonString string) error {\n\ts.lock.Lock()\n\tdefer s.lock.Unlock()\n\n\tcfg := s.repo.Config()\n\tif err := cfg.Set(dottedKey, jsonString); err != nil {\n\t\treturn err\n\t}\n\n\treturn s.repo.ReplaceConfig(cfg)\n}\n\n\/\/ Get gets a value from config\nfunc (s *Config) Get(dottedKey string) (interface{}, error) {\n\treturn s.repo.Config().Get(dottedKey)\n}\n","lang_cluster":"Go","diff_tag":0,"review_comment":"","id":175} {"diff_hunk":"@@ -130,7 +130,7 @@ func ListBuilderForYamls(yamls ...string) *ListBuilder {\n func ListBuilderForObjects(objs ...*unstructured.Unstructured) *ListBuilder {\n \tlb := &ListBuilder{list: &UnstructList{}}\n \tfor _, obj := range objs {\n-\t\tlb.list.items = append(lb.list.items, &Unstruct{obj})\n+\t\tlb.list.Items = append(lb.list.Items, &Unstruct{obj})\n \t}\n \treturn lb\n }","source_code":"\/*\nCopyright 2019 The OpenEBS Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage v1alpha2\n\nimport (\n\t\"strings\"\n\n\t\"github.com\/ghodss\/yaml\"\n\t\"github.com\/pkg\/errors\"\n\t\"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\/unstructured\"\n)\n\n\/\/ Unstruct holds an object of Unstructured\ntype Unstruct struct {\n\tobject *unstructured.Unstructured\n}\n\n\/\/ GetUnstructured converts Unstruct object\n\/\/ to API's Unstructured\nfunc (u *Unstruct) GetUnstructured() *unstructured.Unstructured {\n\treturn u.object\n}\n\n\/\/ Builder enables building of an\n\/\/ Unstructured instance\ntype Builder struct {\n\tunstruct *Unstruct\n\terrs []error\n}\n\n\/\/ NewBuilder returns a new instance of\n\/\/ empty Builder\nfunc NewBuilder() *Builder {\n\treturn &Builder{\n\t\tunstruct: &Unstruct{\n\t\t\t&unstructured.Unstructured{\n\t\t\t\tObject: map[string]interface{}{},\n\t\t\t},\n\t\t},\n\t}\n}\n\n\/\/ BuilderForYaml returns a new instance of\n\/\/ Unstruct Builder by making use of the provided\n\/\/ YAML\nfunc BuilderForYaml(doc string) *Builder {\n\tb := NewBuilder()\n\terr := yaml.Unmarshal([]byte(doc), &b.unstruct.object)\n\tif err != nil {\n\t\tb.errs = append(b.errs, err)\n\t}\n\treturn b\n}\n\n\/\/ BuilderForObject returns a new instance of\n\/\/ Unstruct Builder by making use of the provided object\nfunc BuilderForObject(obj *unstructured.Unstructured) *Builder {\n\tb := NewBuilder()\n\tb.unstruct.object = obj\n\treturn b\n}\n\n\/\/ Build returns the Unstruct object created by\n\/\/ the Builder\nfunc (b *Builder) Build() (*Unstruct, error) {\n\tif len(b.errs) != 0 {\n\t\treturn nil, errors.Errorf(\"errors {%+v}\", b.errs)\n\t}\n\treturn b.unstruct, nil\n}\n\n\/\/ BuildAPIUnstructured returns the Unstruct object created by\n\/\/ the Builder\nfunc (b *Builder) BuildAPIUnstructured() (*unstructured.Unstructured, error) {\n\tif len(b.errs) != 0 {\n\t\treturn nil, errors.Errorf(\"errors {%+v}\", b.errs)\n\t}\n\treturn b.unstruct.object, nil\n}\n\n\/\/ UnstructList contains a list of Unstructured\n\/\/ items\ntype UnstructList struct {\n\titems []*Unstruct\n}\n\n\/\/ ListBuilder enables building a list\n\/\/ of an Unstruct instance\ntype ListBuilder struct {\n\tlist *UnstructList\n\terrs []error\n}\n\n\/\/ ListBuilderForYamls returns a new instance of\n\/\/ list Unstruct Builder by making use of the provided YAMLs\nfunc ListBuilderForYamls(yamls ...string) *ListBuilder {\n\tlb := &ListBuilder{list: &UnstructList{}}\n\tfor _, yaml := range yamls {\n\t\ty := strings.Split(strings.Trim(yaml, \"---\"), \"---\")\n\t\tfor _, f := range y {\n\t\t\tf = strings.TrimSpace(f)\n\t\t\ta, err := BuilderForYaml(f).Build()\n\t\t\tif err != nil {\n\t\t\t\tlb.errs = append(lb.errs, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tlb.list.items = append(lb.list.items, a)\n\t\t}\n\t}\n\treturn lb\n}\n\n\/\/ ListBuilderForObjects returns a mew instance of\n\/\/ list Unstruct Builder by making use of the provided\n\/\/ Unstructured object\nfunc ListBuilderForObjects(objs ...*unstructured.Unstructured) *ListBuilder {\n\tlb := &ListBuilder{list: &UnstructList{}}\n\tfor _, obj := range objs {\n\t\tlb.list.items = append(lb.list.items, &Unstruct{obj})\n\t}\n\treturn lb\n}\n\n\/\/ Build returns the list of Unstruct objects created by\n\/\/ the Builder\nfunc (l *ListBuilder) Build() ([]*Unstruct, error) {\n\tif len(l.errs) > 0 {\n\t\treturn nil, errors.Errorf(\"errors {%+v}\", l.errs)\n\t}\n\treturn l.list.items, nil\n}\n","lang_cluster":"Go","diff_tag":0,"review_comment":"","id":176} {"diff_hunk":"@@ -39,7 +39,8 @@ type ArtifactStore cp.ArtifactStore\n \/\/ New returns a CodePipeline client configured against the input session.\n func New(s *session.Session) *CodePipeline {\n \treturn &CodePipeline{\n-\t\tclient: cp.New(s),\n+\t\tclient: cp.New(s),\n+\t\trgClient: rg.New(s),\n \t}\n }\n ","source_code":"\/\/ Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\/\/ SPDX-License-Identifier: Apache-2.0\n\n\/\/ Package codepipeline provides a client to make API requests to Amazon Elastic Container Service.\npackage codepipeline\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/session\"\n\tcp \"github.com\/aws\/aws-sdk-go\/service\/codepipeline\"\n)\n\ntype api interface {\n\tGetPipeline(*cp.GetPipelineInput) (*cp.GetPipelineOutput, error)\n\tListPipelines(*cp.ListPipelinesInput) (*cp.ListPipelinesOutput, error)\n}\n\n\/\/ CodePipeline wraps the AWS CodePipeline client.\ntype CodePipeline struct {\n\tclient api\n}\n\n\/\/ Pipeline contains information about the pipeline\n\/\/ TODO wrap nested resources or just use what the SDK provides?\ntype Pipeline struct {\n\tName string `json:\"name\"`\n\t\/\/ Stages []Stage `json:\"stages\"`\n\t\/\/ ArtifactStore ArtifactStore `json:\"artifactStore\"`\n}\n\n\/\/ Stage wraps the codepipeline pipeline stage\ntype Stage cp.StageDeclaration\n\n\/\/ ArtifactStore wraps the artifact store for the pipeline\ntype ArtifactStore cp.ArtifactStore\n\n\/\/ New returns a CodePipeline client configured against the input session.\nfunc New(s *session.Session) *CodePipeline {\n\treturn &CodePipeline{\n\t\tclient: cp.New(s),\n\t}\n}\n\n\/\/ GetPipeline retrieves information from a given pipeline\nfunc (c *CodePipeline) GetPipeline(pipelineName string) (*Pipeline, error) {\n\tinput := &cp.GetPipelineInput{\n\t\tName: aws.String(pipelineName),\n\t}\n\tresp, err := c.client.GetPipeline(input)\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"get pipeline %s: %w\", pipelineName, err)\n\t}\n\tpipeline := &Pipeline{\n\t\tName: aws.StringValue(resp.Pipeline.Name),\n\t}\n\n\treturn pipeline, nil\n}\n\n\/\/ ListPipelines retrieves summaries of all pipelines for a project\nfunc (c *CodePipeline) ListPipelines() ([]string, error) {\n\tinput := &cp.ListPipelinesInput{}\n\tresp, err := c.client.ListPipelines(input)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"list pipelines: %w\", err)\n\t}\n\n\tvar pipelines []string\n\n\tfor _, ps := range resp.Pipelines {\n\t\tp := aws.StringValue(ps.Name)\n\t\tpipelines = append(pipelines, p)\n\t}\n\n\treturn pipelines, nil\n}\n","lang_cluster":"Go","diff_tag":0,"review_comment":"","id":177} {"diff_hunk":"@@ -183,8 +183,13 @@ func (d decoder) DecodeMap(f func(key string, d2 driver.Decoder) bool) {\n }\n \n func (d decoder) AsSpecial(v reflect.Value) (bool, interface{}, error) {\n-\tif bin, ok := d.val.(primitive.Binary); ok {\n-\t\treturn true, bin.Data, nil\n+\tswitch v := d.val.(type) {\n+\tcase primitive.Binary:\n+\t\treturn true, v.Data, nil\n+\tcase primitive.DateTime:\n+\t\t\/\/ A DateTime represents milliseconds since the Unix epoch.\n+\t\treturn true, time.Unix(int64(v)\/1000, int64(v)%1000*1e6), nil\n+\tdefault:\n+\t\treturn false, nil, nil\n \t}\n-\treturn false, nil, nil\n }","source_code":"\/\/ Copyright 2019 The Go Cloud Development Kit Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ https:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage mongodocstore\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\n\t\"go.mongodb.org\/mongo-driver\/bson\/primitive\"\n\t\"gocloud.dev\/internal\/docstore\/driver\"\n)\n\n\/\/ Encode and decode to map[string]interface{}.\n\/\/ This isn't ideal, because the mongo client encodes\/decodes a second time.\n\/\/ TODO(jba): find a way do only one encode\/decode.\n\n\/\/ This code is copied from memdocstore\/codec.go, with some changes:\n\/\/ - special treatment for primitive.Binary\n\nfunc encodeDoc(doc driver.Document) (map[string]interface{}, error) {\n\tvar e encoder\n\tif err := doc.Encode(&e); err != nil {\n\t\treturn nil, err\n\t}\n\treturn e.val.(map[string]interface{}), nil\n}\n\nfunc encodeValue(x interface{}) (interface{}, error) {\n\tvar e encoder\n\tif err := driver.Encode(reflect.ValueOf(x), &e); err != nil {\n\t\treturn nil, err\n\t}\n\treturn e.val, nil\n}\n\ntype encoder struct {\n\tval interface{}\n}\n\nfunc (e *encoder) EncodeNil() { e.val = nil }\nfunc (e *encoder) EncodeBool(x bool) { e.val = x }\nfunc (e *encoder) EncodeInt(x int64) { e.val = x }\nfunc (e *encoder) EncodeUint(x uint64) { e.val = int64(x) }\nfunc (e *encoder) EncodeBytes(x []byte) { e.val = x }\nfunc (e *encoder) EncodeFloat(x float64) { e.val = x }\nfunc (e *encoder) EncodeComplex(x complex128) { e.val = x }\nfunc (e *encoder) EncodeString(x string) { e.val = x }\nfunc (e *encoder) ListIndex(int) { panic(\"impossible\") }\nfunc (e *encoder) MapKey(string) { panic(\"impossible\") }\nfunc (e *encoder) EncodeSpecial(reflect.Value) (bool, error) { return false, nil } \/\/ no special handling\n\nfunc (e *encoder) EncodeList(n int) driver.Encoder {\n\t\/\/ All slices and arrays are encoded as []interface{}\n\ts := make([]interface{}, n)\n\te.val = s\n\treturn &listEncoder{s: s}\n}\n\ntype listEncoder struct {\n\ts []interface{}\n\tencoder\n}\n\nfunc (e *listEncoder) ListIndex(i int) { e.s[i] = e.val }\n\ntype mapEncoder struct {\n\tm map[string]interface{}\n\tencoder\n}\n\nfunc (e *encoder) EncodeMap(n int) driver.Encoder {\n\tm := make(map[string]interface{}, n)\n\te.val = m\n\treturn &mapEncoder{m: m}\n}\n\nfunc (e *mapEncoder) MapKey(k string) { e.m[k] = e.val }\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/ decodeDoc decodes m into ddoc.\nfunc decodeDoc(m map[string]interface{}, ddoc driver.Document) error {\n\treturn ddoc.Decode(decoder{m})\n}\n\ntype decoder struct {\n\tval interface{}\n}\n\nfunc (d decoder) String() string {\n\treturn fmt.Sprint(d.val)\n}\n\nfunc (d decoder) AsNull() bool {\n\treturn d.val == nil\n}\n\nfunc (d decoder) AsBool() (bool, bool) {\n\tb, ok := d.val.(bool)\n\treturn b, ok\n}\n\nfunc (d decoder) AsString() (string, bool) {\n\ts, ok := d.val.(string)\n\treturn s, ok\n}\n\nfunc (d decoder) AsInt() (int64, bool) {\n\ti, ok := d.val.(int64)\n\treturn i, ok\n}\n\nfunc (d decoder) AsUint() (uint64, bool) {\n\ti, ok := d.val.(int64)\n\treturn uint64(i), ok\n}\n\nfunc (d decoder) AsFloat() (float64, bool) {\n\tf, ok := d.val.(float64)\n\treturn f, ok\n}\n\nfunc (d decoder) AsComplex() (complex128, bool) {\n\tc, ok := d.val.(complex128)\n\treturn c, ok\n}\n\nfunc (d decoder) AsBytes() ([]byte, bool) {\n\tswitch v := d.val.(type) {\n\tcase []byte:\n\t\treturn v, true\n\tcase primitive.Binary:\n\t\treturn v.Data, true\n\tdefault:\n\t\treturn nil, false\n\t}\n}\n\nfunc (d decoder) AsInterface() (interface{}, error) {\n\treturn d.val, nil\n}\n\nfunc (d decoder) ListLen() (int, bool) {\n\tif s, ok := d.val.([]interface{}); ok {\n\t\treturn len(s), true\n\t}\n\treturn 0, false\n}\n\nfunc (d decoder) DecodeList(f func(i int, d2 driver.Decoder) bool) {\n\tfor i, e := range d.val.([]interface{}) {\n\t\tif !f(i, decoder{e}) {\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (d decoder) MapLen() (int, bool) {\n\tif m, ok := d.val.(map[string]interface{}); ok {\n\t\treturn len(m), true\n\t}\n\treturn 0, false\n}\n\nfunc (d decoder) DecodeMap(f func(key string, d2 driver.Decoder) bool) {\n\tfor k, v := range d.val.(map[string]interface{}) {\n\t\tif !f(k, decoder{v}) {\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (d decoder) AsSpecial(v reflect.Value) (bool, interface{}, error) {\n\tif bin, ok := d.val.(primitive.Binary); ok {\n\t\treturn true, bin.Data, nil\n\t}\n\treturn false, nil, nil\n}\n","lang_cluster":"Go","diff_tag":0,"review_comment":"","id":178} {"diff_hunk":"@@ -8,6 +8,7 @@ import (\n \t\"time\"\n \n \t\"crypto\/ecdsa\"\n+\t\"fmt\"\n \t\"github.com\/spiffe\/spire\/pkg\/common\/util\"\n \t\"github.com\/spiffe\/spire\/proto\/api\/node\"\n \t\"github.com\/spiffe\/spire\/proto\/common\"","source_code":"package cache\n\nimport (\n\t\"crypto\/sha256\"\n\t\"hash\"\n\t\"sort\"\n\t\"sync\"\n\t\"time\"\n\n\t\"crypto\/ecdsa\"\n\t\"github.com\/spiffe\/spire\/pkg\/common\/util\"\n\t\"github.com\/spiffe\/spire\/proto\/api\/node\"\n\t\"github.com\/spiffe\/spire\/proto\/common\"\n)\n\ntype selectors []*common.Selector\n\ntype CacheEntry struct {\n\tRegistrationEntry *common.RegistrationEntry\n\tSVID *node.Svid\n\tPrivateKey *ecdsa.PrivateKey\n\tExpiry time.Time\n\n\t\/\/ Bundles stores the ID => Bundle map for\n\t\/\/ federated bundles. The registration entry\n\t\/\/ only stores references to the keys here.\n\tBundles map[string][]byte\n}\n\ntype Cache interface {\n\tEntry([]*common.Selector) (entry []CacheEntry)\n\tSetEntry(cacheEntry CacheEntry)\n\tDeleteEntry([]*common.Selector) (deleted bool)\n}\n\ntype cacheImpl struct {\n\tcache map[string][]CacheEntry\n\tm sync.Mutex\n}\n\nfunc NewCache() *cacheImpl {\n\treturn &cacheImpl{cache: make(map[string][]CacheEntry)}\n}\n\nfunc (c *cacheImpl) Entry(selectors []*common.Selector) (entry []CacheEntry) {\n\tkey := deriveCacheKey(selectors)\n\tc.m.Lock()\n\tdefer c.m.Unlock()\n\treturn c.cache[key]\n}\n\nfunc (c *cacheImpl) SetEntry(cacheEntry CacheEntry) {\n\tc.m.Lock()\n\tdefer c.m.Unlock()\n\tkey := deriveCacheKey(cacheEntry.RegistrationEntry.Selectors)\n\tc.cache[key] = append(c.cache[key], cacheEntry)\n\treturn\n\n}\n\nfunc (c *cacheImpl) DeleteEntry(selectors []*common.Selector) (deleted bool) {\n\tc.m.Lock()\n\tdefer c.m.Unlock()\n\tkey := deriveCacheKey(selectors)\n\tif _, exists := c.cache[key]; exists == true {\n\t\tdelete(c.cache, key)\n\t\tdeleted = true\n\t}\n\treturn\n}\n\nfunc deriveCacheKey(s selectors) (key string) {\n\tvar concatSelectors string\n\tsort.Slice(s, util.SelectorsSortFunction(s))\n\n\tfor _, selector := range s {\n\t\tconcatSelectors = concatSelectors + \"::\" + selector.Type + \":\" + selector.Value\n\t}\n\thashedSelectors := hash.Hash.Sum(sha256.New(), []byte(concatSelectors))\n\n\treturn string(hashedSelectors)\n}\n","lang_cluster":"Go","diff_tag":0,"review_comment":"","id":179} {"diff_hunk":"@@ -18,6 +18,8 @@ package deployment\n \n import (\n \t\"net\/http\"\n+\t\"os\/exec\"\n+\t\"path\"\n \t\"time\"\n \n \t. \"github.com\/onsi\/ginkgo\"","source_code":"\/*\nCopyright 2019 The KubeEdge Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage deployment\n\nimport (\n\t\"net\/http\"\n\t\"time\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\tappsv1 \"k8s.io\/api\/apps\/v1\"\n\tcorev1 \"k8s.io\/api\/core\/v1\"\n\n\t\"github.com\/kubeedge\/kubeedge\/tests\/e2e\/constants\"\n\t. \"github.com\/kubeedge\/kubeedge\/tests\/e2e\/testsuite\"\n\t\"github.com\/kubeedge\/kubeedge\/tests\/e2e\/utils\"\n)\n\nvar DeploymentTestTimerGroup *utils.TestTimerGroup = utils.NewTestTimerGroup()\n\n\/\/Run Test cases\nvar _ = Describe(\"Application deployment test in E2E scenario\", func() {\n\tvar UID string\n\tvar testTimer *utils.TestTimer\n\tvar testDescription GinkgoTestDescription\n\tContext(\"Test application deployment and delete deployment using deployment spec\", func() {\n\t\tBeforeEach(func() {\n\t\t\t\/\/ Get current test description\n\t\t\ttestDescription = CurrentGinkgoTestDescription()\n\t\t\t\/\/ Start test timer\n\t\t\ttestTimer = DeploymentTestTimerGroup.NewTestTimer(testDescription.TestText)\n\t\t})\n\t\tAfterEach(func() {\n\t\t\t\/\/ End test timer\n\t\t\ttestTimer.End()\n\t\t\t\/\/ Print result\n\t\t\ttestTimer.PrintResult()\n\t\t\tvar podlist corev1.PodList\n\t\t\tvar deploymentList appsv1.DeploymentList\n\t\t\terr := utils.GetDeployments(&deploymentList, ctx.Cfg.K8SMasterForKubeEdge+constants.DeploymentHandler)\n\t\t\tExpect(err).To(BeNil())\n\t\t\tfor _, deployment := range deploymentList.Items {\n\t\t\t\tif deployment.Name == UID {\n\t\t\t\t\tlabel := nodeName\n\t\t\t\t\tpodlist, err = utils.GetPods(ctx.Cfg.K8SMasterForKubeEdge+constants.AppHandler, label)\n\t\t\t\t\tExpect(err).To(BeNil())\n\t\t\t\t\tStatusCode := utils.DeleteDeployment(ctx.Cfg.K8SMasterForKubeEdge+constants.DeploymentHandler, deployment.Name)\n\t\t\t\t\tExpect(StatusCode).Should(Equal(http.StatusOK))\n\t\t\t\t}\n\t\t\t}\n\t\t\tutils.CheckPodDeleteState(ctx.Cfg.K8SMasterForKubeEdge+constants.AppHandler, podlist)\n\t\t\tutils.PrintTestcaseNameandStatus()\n\t\t})\n\n\t\tIt(\"E2E_APP_DEPLOYMENT_1: Create deployment and check the pods are coming up correctly\", func() {\n\t\t\treplica := 1\n\t\t\t\/\/Generate the random string and assign as a UID\n\t\t\tUID = \"edgecore-depl-app-\" + utils.GetRandomString(5)\n\t\t\tCreateDeploymentTest(replica, UID, nodeName, nodeSelector, ctx)\n\t\t})\n\t\tIt(\"E2E_APP_DEPLOYMENT_2: Create deployment with replicas and check the pods are coming up correctly\", func() {\n\t\t\treplica := 3\n\t\t\t\/\/Generate the random string and assign as a UID\n\t\t\tUID = \"edgecore-depl-app-\" + utils.GetRandomString(5)\n\t\t\tCreateDeploymentTest(replica, UID, nodeName, nodeSelector, ctx)\n\t\t})\n\n\t\tIt(\"E2E_APP_DEPLOYMENT_3: Create deployment and check deployment ctrler re-creating pods when user deletes the pods manually\", func() {\n\t\t\treplica := 3\n\t\t\t\/\/Generate the random string and assign as a UID\n\t\t\tUID = \"edgecore-depl-app-\" + utils.GetRandomString(5)\n\t\t\tpodlist := CreateDeploymentTest(replica, UID, nodeName, nodeSelector, ctx)\n\t\t\tfor _, pod := range podlist.Items {\n\t\t\t\t_, StatusCode := utils.DeletePods(ctx.Cfg.K8SMasterForKubeEdge + constants.AppHandler + \"\/\" + pod.Name)\n\t\t\t\tExpect(StatusCode).Should(Equal(http.StatusOK))\n\t\t\t}\n\t\t\tutils.CheckPodDeleteState(ctx.Cfg.K8SMasterForKubeEdge+constants.AppHandler, podlist)\n\t\t\tlabel := nodeName\n\t\t\tpodlist, err := utils.GetPods(ctx.Cfg.K8SMasterForKubeEdge+constants.AppHandler, label)\n\t\t\tExpect(err).To(BeNil())\n\t\t\tExpect(len(podlist.Items)).Should(Equal(replica))\n\t\t\tutils.WaitforPodsRunning(ctx.Cfg.KubeConfigPath, podlist, 240*time.Second)\n\t\t})\n\n\t})\n\tContext(\"Test application deployment using Pod spec\", func() {\n\t\tBeforeEach(func() {\n\t\t\t\/\/ Get current test description\n\t\t\ttestDescription = CurrentGinkgoTestDescription()\n\t\t\t\/\/ Start test timer\n\t\t\ttestTimer = DeploymentTestTimerGroup.NewTestTimer(testDescription.TestText)\n\t\t})\n\t\tAfterEach(func() {\n\t\t\t\/\/ End test timer\n\t\t\ttestTimer.End()\n\t\t\t\/\/ Print result\n\t\t\ttestTimer.PrintResult()\n\t\t\tvar podlist corev1.PodList\n\t\t\tlabel := nodeName\n\t\t\tpodlist, err := utils.GetPods(ctx.Cfg.K8SMasterForKubeEdge+constants.AppHandler, label)\n\t\t\tExpect(err).To(BeNil())\n\t\t\tfor _, pod := range podlist.Items {\n\t\t\t\t_, StatusCode := utils.DeletePods(ctx.Cfg.K8SMasterForKubeEdge + constants.AppHandler + \"\/\" + pod.Name)\n\t\t\t\tExpect(StatusCode).Should(Equal(http.StatusOK))\n\t\t\t}\n\t\t\tutils.CheckPodDeleteState(ctx.Cfg.K8SMasterForKubeEdge+constants.AppHandler, podlist)\n\t\t\tutils.PrintTestcaseNameandStatus()\n\t\t})\n\n\t\tIt(\"E2E_POD_DEPLOYMENT_1: Create a pod and check the pod is coming up correctly\", func() {\n\t\t\t\/\/Generate the random string and assign as podName\n\t\t\tpodName := \"pod-app-\" + utils.GetRandomString(5)\n\t\t\tpod := utils.NewPodObj(podName, ctx.Cfg.AppImageURL[0], nodeSelector)\n\n\t\t\tCreatePodTest(nodeName, podName, ctx, pod)\n\t\t})\n\n\t\tIt(\"E2E_POD_DEPLOYMENT_2: Create the pod and delete pod happening successfully\", func() {\n\t\t\t\/\/Generate the random string and assign as podName\n\t\t\tpodName := \"pod-app-\" + utils.GetRandomString(5)\n\t\t\tpod := utils.NewPodObj(podName, ctx.Cfg.AppImageURL[0], nodeSelector)\n\n\t\t\tpodlist := CreatePodTest(nodeName, podName, ctx, pod)\n\t\t\tfor _, pod := range podlist.Items {\n\t\t\t\t_, StatusCode := utils.DeletePods(ctx.Cfg.K8SMasterForKubeEdge + constants.AppHandler + \"\/\" + pod.Name)\n\t\t\t\tExpect(StatusCode).Should(Equal(http.StatusOK))\n\t\t\t}\n\t\t\tutils.CheckPodDeleteState(ctx.Cfg.K8SMasterForKubeEdge+constants.AppHandler, podlist)\n\t\t})\n\t\tIt(\"E2E_POD_DEPLOYMENT_3: Create pod and delete the pod successfully, and delete already deleted pod and check the behaviour\", func() {\n\t\t\t\/\/Generate the random string and assign as podName\n\t\t\tpodName := \"pod-app-\" + utils.GetRandomString(5)\n\t\t\tpod := utils.NewPodObj(podName, ctx.Cfg.AppImageURL[0], nodeSelector)\n\n\t\t\tpodlist := CreatePodTest(nodeName, podName, ctx, pod)\n\t\t\tfor _, pod := range podlist.Items {\n\t\t\t\t_, StatusCode := utils.DeletePods(ctx.Cfg.K8SMasterForKubeEdge + constants.AppHandler + \"\/\" + pod.Name)\n\t\t\t\tExpect(StatusCode).Should(Equal(http.StatusOK))\n\t\t\t}\n\t\t\tutils.CheckPodDeleteState(ctx.Cfg.K8SMasterForKubeEdge+constants.AppHandler, podlist)\n\t\t\t_, StatusCode := utils.DeletePods(ctx.Cfg.K8SMasterForKubeEdge + constants.AppHandler + \"\/\" + UID)\n\t\t\tExpect(StatusCode).Should(Equal(http.StatusNotFound))\n\t\t})\n\t\tIt(\"E2E_POD_DEPLOYMENT_4: Create and delete pod multiple times and check all the Pod created and deleted successfully\", func() {\n\t\t\t\/\/Generate the random string and assign as a UID\n\t\t\tfor i := 0; i < 10; i++ {\n\t\t\t\t\/\/Generate the random string and assign as podName\n\t\t\t\tpodName := \"pod-app-\" + utils.GetRandomString(5)\n\t\t\t\tpod := utils.NewPodObj(podName, ctx.Cfg.AppImageURL[0], nodeSelector)\n\n\t\t\t\tpodlist := CreatePodTest(nodeName, podName, ctx, pod)\n\t\t\t\tfor _, pod := range podlist.Items {\n\t\t\t\t\t_, StatusCode := utils.DeletePods(ctx.Cfg.K8SMasterForKubeEdge + constants.AppHandler + \"\/\" + pod.Name)\n\t\t\t\t\tExpect(StatusCode).Should(Equal(http.StatusOK))\n\t\t\t\t}\n\t\t\t\tutils.CheckPodDeleteState(ctx.Cfg.K8SMasterForKubeEdge+constants.AppHandler, podlist)\n\t\t\t}\n\t\t})\n\t\tIt(\"E2E_POD_DEPLOYMENT_5: Create pod with hostpath volume successfully\", func() {\n\t\t\t\/\/Generate the random string and assign as podName\n\t\t\tpodName := \"pod-app-\" + utils.GetRandomString(5)\n\t\t\tpod := utils.NewPodObj(podName, ctx.Cfg.AppImageURL[0], nodeSelector)\n\n\t\t\tpod.Spec.Containers[0].VolumeMounts = []corev1.VolumeMount{{\n\t\t\t\tName: \"hp\",\n\t\t\t\tMountPath: \"\/hp\",\n\t\t\t}}\n\t\t\tpod.Spec.Volumes = []corev1.Volume{{\n\t\t\t\tName: \"hp\",\n\t\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\t\tHostPath: &corev1.HostPathVolumeSource{Path: \"\/tmp\"},\n\t\t\t\t},\n\t\t\t}}\n\n\t\t\tpodlist := CreatePodTest(nodeName, podName, ctx, pod)\n\t\t\tfor _, pod := range podlist.Items {\n\t\t\t\t_, StatusCode := utils.DeletePods(ctx.Cfg.K8SMasterForKubeEdge + constants.AppHandler + \"\/\" + pod.Name)\n\t\t\t\tExpect(StatusCode).Should(Equal(http.StatusOK))\n\t\t\t}\n\t\t\tutils.CheckPodDeleteState(ctx.Cfg.K8SMasterForKubeEdge+constants.AppHandler, podlist)\n\t\t})\n\t})\n})\n","lang_cluster":"Go","diff_tag":0,"review_comment":"","id":180} {"diff_hunk":"@@ -167,5 +167,5 @@ func setup(conf config) (_ *frontend, _ *processor, cleanup func(), err error) {\n \t\tbucket: bucket,\n \t\tcoll: coll,\n \t}\n-\treturn f, p, nil, nil\n+\treturn f, p, cleanup, nil\n }","source_code":"\/\/ Copyright 2019 The Go Cloud Development Kit Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ https:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"gocloud.dev\/blob\"\n\t\"gocloud.dev\/docstore\"\n\t\"gocloud.dev\/pubsub\"\n)\n\nvar (\n\trequestTopicURL = flag.String(\"request-topic\", \"mem:\/\/requests\", \"gocloud.dev\/pubsub URL for request topic\")\n\trequestSubURL = flag.String(\"request-sub\", \"mem:\/\/requests\", \"gocloud.dev\/pubsub URL for request subscription\")\n\tresponseTopicURL = flag.String(\"response-topic\", \"mem:\/\/responses\", \"gocloud.dev\/pubsub URL for response topic\")\n\tresponseSubURL = flag.String(\"response-sub\", \"mem:\/\/responses\", \"gocloud.dev\/pubsub URL for response subscription\")\n\tbucketURL = flag.String(\"bucket\", \"\", \"gocloud.dev\/blob URL for image bucket\")\n\tcollectionURL = flag.String(\"collection\", \"mem:\/\/orders\/ID\", \"gocloud.dev\/docstore URL for order collection\")\n\n\tport = flag.Int(\"port\", 10538, \"HTTP port for frontend\")\n\trunFrontend = flag.Bool(\"frontend\", true, \"run the frontend\")\n\trunProcessor = flag.Bool(\"processor\", true, \"run the image processor\")\n)\n\nfunc main() {\n\tflag.Parse()\n\tconf := config{\n\t\trequestTopicURL: *requestTopicURL,\n\t\trequestSubURL: *requestSubURL,\n\t\tresponseTopicURL: *responseTopicURL,\n\t\tresponseSubURL: *responseSubURL,\n\t\tbucketURL: *bucketURL,\n\t\tcollectionURL: *collectionURL,\n\t}\n\tfrontend, processor, cleanup, err := setup(conf)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer cleanup()\n\n\t\/\/ Run the frontend, or the processor, or both.\n\t\/\/ When we want to run both, one of them has to run in a goroutine.\n\t\/\/ So it's easier to run both in goroutines, even if we only need\n\t\/\/ to run one.\n\terrc := make(chan error, 2)\n\tif *runFrontend {\n\t\tgo func() { errc <- frontend.run(context.Background(), *port) }()\n\t\tfmt.Printf(\"listening on port %d\\n\", *port)\n\t} else {\n\t\terrc <- nil\n\t}\n\tif *runProcessor {\n\t\tgo func() { errc <- processor.run(context.Background()) }()\n\t} else {\n\t\terrc <- nil\n\t}\n\t\/\/ Each of the goroutines will send once to errc, so receive two values.\n\tfor i := 0; i < 2; i++ {\n\t\tif err := <-errc; err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n}\n\n\/\/ config describes the URLs for the resources used by the order application.\ntype config struct {\n\trequestTopicURL string\n\trequestSubURL string\n\tresponseTopicURL string\n\tresponseSubURL string\n\tbucketURL string\n\tcollectionURL string\n}\n\n\/\/ setup opens all the necessary resources for the application.\nfunc setup(conf config) (_ *frontend, _ *processor, cleanup func(), err error) {\n\t\/\/ TODO(jba): simplify cleanup logic\n\tvar cleanups []func()\n\tdefer func() {\n\t\t\/\/ Clean up on error; return cleanup func on success.\n\t\tf := func() {\n\t\t\tfor _, c := range cleanups {\n\t\t\t\tc()\n\t\t\t}\n\t\t}\n\t\tif err != nil {\n\t\t\tf()\n\t\t\tcleanup = nil\n\t\t} else {\n\t\t\tcleanup = f\n\t\t}\n\t}()\n\n\tctx := context.Background()\n\t\/\/ TODO(jba): This application assumes at-least-once processing. Enforce that here if possible.\n\treqTopic, err := pubsub.OpenTopic(ctx, conf.requestTopicURL)\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\tcleanups = append(cleanups, func() { reqTopic.Shutdown(ctx) })\n\n\treqSub, err := pubsub.OpenSubscription(ctx, conf.requestSubURL)\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\tcleanups = append(cleanups, func() { reqSub.Shutdown(ctx) })\n\n\tresTopic, err := pubsub.OpenTopic(ctx, conf.responseTopicURL)\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\tcleanups = append(cleanups, func() { resTopic.Shutdown(ctx) })\n\n\tresSub, err := pubsub.OpenSubscription(ctx, conf.responseSubURL)\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\tcleanups = append(cleanups, func() { resSub.Shutdown(ctx) })\n\n\tburl := conf.bucketURL\n\tif burl == \"\" {\n\t\tdir, err := ioutil.TempDir(\"\", \"gocdk-order\")\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, err\n\t\t}\n\t\tburl = \"file:\/\/\" + filepath.ToSlash(dir)\n\t\tcleanups = append(cleanups, func() { os.Remove(dir) })\n\t}\n\tbucket, err := blob.OpenBucket(ctx, burl)\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\tcleanups = append(cleanups, func() { bucket.Close() })\n\n\tcoll, err := docstore.OpenCollection(ctx, conf.collectionURL)\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\tcleanups = append(cleanups, func() { coll.Close() })\n\n\tf := &frontend{\n\t\trequestTopic: reqTopic,\n\t\tbucket: bucket,\n\t\tcoll: coll,\n\t}\n\tp := &processor{\n\t\trequestSub: reqSub,\n\t\tbucket: bucket,\n\t\tcoll: coll,\n\t}\n\treturn f, p, nil, nil\n}\n","lang_cluster":"Go","diff_tag":0,"review_comment":"","id":181} {"diff_hunk":"@@ -66,6 +66,9 @@ func (j journalBlockServer) RemoveBlockReferences(\n \tcontexts map[BlockID][]BlockContext) (\n \tliveCounts map[BlockID]int, err error) {\n \tif tlfJournal, ok := j.jServer.getTLFJournal(tlfID); ok {\n+\t\tdefer func() {\n+\t\t\terr = translateToBlockServerError(err)\n+\t\t}()\n \t\t\/\/ TODO: Get server counts without making a\n \t\t\/\/ RemoveBlockReferences call and merge it.\n \t\treturn tlfJournal.removeBlockReferences(ctx, contexts)","source_code":"\/\/ Copyright 2016 Keybase Inc. All rights reserved.\n\/\/ Use of this source code is governed by a BSD\n\/\/ license that can be found in the LICENSE file.\n\npackage libkbfs\n\nimport \"golang.org\/x\/net\/context\"\n\ntype journalBlockServer struct {\n\tjServer *JournalServer\n\tBlockServer\n\tenableAddBlockReference bool\n}\n\nvar _ BlockServer = journalBlockServer{}\n\nfunc (j journalBlockServer) Get(\n\tctx context.Context, tlfID TlfID, id BlockID, context BlockContext) (\n\t[]byte, BlockCryptKeyServerHalf, error) {\n\tif tlfJournal, ok := j.jServer.getTLFJournal(tlfID); ok {\n\t\tdata, serverHalf, err := tlfJournal.getBlockDataWithContext(\n\t\t\tid, context)\n\t\tswitch err.(type) {\n\t\tcase nil:\n\t\t\treturn data, serverHalf, nil\n\t\tcase BServerErrorBlockNonExistent:\n\t\t\treturn j.BlockServer.Get(ctx, tlfID, id, context)\n\t\tdefault:\n\t\t\treturn nil, BlockCryptKeyServerHalf{}, err\n\t\t}\n\t}\n\n\treturn j.BlockServer.Get(ctx, tlfID, id, context)\n}\n\nfunc (j journalBlockServer) Put(\n\tctx context.Context, tlfID TlfID, id BlockID, context BlockContext,\n\tbuf []byte, serverHalf BlockCryptKeyServerHalf) error {\n\tif tlfJournal, ok := j.jServer.getTLFJournal(tlfID); ok {\n\t\treturn tlfJournal.putBlockData(ctx, id, context, buf, serverHalf)\n\t}\n\n\treturn j.BlockServer.Put(ctx, tlfID, id, context, buf, serverHalf)\n}\n\nfunc (j journalBlockServer) AddBlockReference(\n\tctx context.Context, tlfID TlfID, id BlockID,\n\tcontext BlockContext) error {\n\tif !j.enableAddBlockReference {\n\t\t\/\/ TODO: Temporarily return an error until KBFS-1149 is\n\t\t\/\/ fixed. This is needed despite\n\t\t\/\/ journalBlockCache.CheckForBlockPtr, since CheckForBlockPtr\n\t\t\/\/ may be called before journaling is turned on for a TLF.\n\t\treturn BServerErrorBlockNonExistent{}\n\t}\n\n\tif tlfJournal, ok := j.jServer.getTLFJournal(tlfID); ok {\n\t\treturn tlfJournal.addBlockReference(ctx, id, context)\n\t}\n\n\treturn j.BlockServer.AddBlockReference(ctx, tlfID, id, context)\n}\n\nfunc (j journalBlockServer) RemoveBlockReferences(\n\tctx context.Context, tlfID TlfID,\n\tcontexts map[BlockID][]BlockContext) (\n\tliveCounts map[BlockID]int, err error) {\n\tif tlfJournal, ok := j.jServer.getTLFJournal(tlfID); ok {\n\t\t\/\/ TODO: Get server counts without making a\n\t\t\/\/ RemoveBlockReferences call and merge it.\n\t\treturn tlfJournal.removeBlockReferences(ctx, contexts)\n\t}\n\n\treturn j.BlockServer.RemoveBlockReferences(ctx, tlfID, contexts)\n}\n\nfunc (j journalBlockServer) ArchiveBlockReferences(\n\tctx context.Context, tlfID TlfID,\n\tcontexts map[BlockID][]BlockContext) error {\n\tif tlfJournal, ok := j.jServer.getTLFJournal(tlfID); ok {\n\t\treturn tlfJournal.archiveBlockReferences(ctx, contexts)\n\t}\n\n\treturn j.BlockServer.ArchiveBlockReferences(ctx, tlfID, contexts)\n}\n\nfunc (j journalBlockServer) Shutdown() {\n\tj.jServer.shutdown()\n}\n","lang_cluster":"Go","diff_tag":0,"review_comment":"","id":182} {"diff_hunk":"@@ -27,7 +27,11 @@ import (\n \t\"github.com\/iotexproject\/iotex-proto\/golang\/iotextypes\"\n )\n \n+var nativeStakingContractCreator = address.ZeroAddress\n+var nativeStakingContractNonce = uint64(0)\n+\n type stakingCommittee struct {\n+\tcandidatesByHeight CandidatesByHeight\n \tgetEpochHeight GetEpochHeight\n \tgetEpochNum GetEpochNum\n \telectionCommittee committee.Committee","source_code":"\/\/ Copyright (c) 2019 IoTeX Foundation\n\/\/ This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no\n\/\/ warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent\n\/\/ permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache\n\/\/ License 2.0 that can be found in the LICENSE file.\n\npackage poll\n\nimport (\n\t\"context\"\n\t\"encoding\/hex\"\n\t\"math\/big\"\n\t\"time\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"go.uber.org\/zap\"\n\n\t\"github.com\/iotexproject\/iotex-core\/action\"\n\t\"github.com\/iotexproject\/iotex-core\/action\/protocol\"\n\t\"github.com\/iotexproject\/iotex-core\/action\/protocol\/rolldpos\"\n\t\"github.com\/iotexproject\/iotex-core\/config\"\n\t\"github.com\/iotexproject\/iotex-core\/pkg\/log\"\n\t\"github.com\/iotexproject\/iotex-core\/state\"\n\t\"github.com\/iotexproject\/iotex-election\/committee\"\n\t\"github.com\/iotexproject\/iotex-election\/types\"\n\t\"github.com\/iotexproject\/iotex-election\/util\"\n\t\"github.com\/iotexproject\/iotex-proto\/golang\/iotextypes\"\n)\n\ntype stakingCommittee struct {\n\tgetEpochHeight GetEpochHeight\n\tgetEpochNum GetEpochNum\n\telectionCommittee committee.Committee\n\tgovernanceStaking Protocol\n\tnativeStaking *NativeStaking\n\trp *rolldpos.Protocol\n\tscoreThreshold *big.Int\n\tcurrentNativeBuckets []*types.Bucket\n}\n\n\/\/ NewStakingCommittee creates a staking committee which fetch result from governance chain and native staking\nfunc NewStakingCommittee(\n\tec committee.Committee,\n\tgs Protocol,\n\treadContract ReadContract,\n\tgetEpochHeight GetEpochHeight,\n\tgetEpochNum GetEpochNum,\n\tnativeStakingContractAddress string,\n\tnativeStakingContractCode string,\n\trp *rolldpos.Protocol,\n\tscoreThreshold *big.Int,\n) (Protocol, error) {\n\tif getEpochHeight == nil {\n\t\treturn nil, errors.New(\"failed to create native staking: empty getEpochHeight\")\n\t}\n\tif getEpochNum == nil {\n\t\treturn nil, errors.New(\"failed to create native staking: empty getEpochNum\")\n\t}\n\tvar ns *NativeStaking\n\tif nativeStakingContractAddress != \"\" || nativeStakingContractCode != \"\" {\n\t\tvar err error\n\t\tif ns, err = NewNativeStaking(readContract); err != nil {\n\t\t\treturn nil, errors.New(\"failed to create native staking\")\n\t\t}\n\t\tif nativeStakingContractAddress != \"\" {\n\t\t\tns.SetContract(nativeStakingContractAddress)\n\t\t}\n\t}\n\treturn &stakingCommittee{\n\t\telectionCommittee: ec,\n\t\tgovernanceStaking: gs,\n\t\tnativeStaking: ns,\n\t\tgetEpochHeight: getEpochHeight,\n\t\tgetEpochNum: getEpochNum,\n\t\trp: rp,\n\t\tscoreThreshold: scoreThreshold,\n\t}, nil\n}\n\nfunc (sc *stakingCommittee) Initialize(ctx context.Context, sm protocol.StateManager) error {\n\treturn sc.governanceStaking.Initialize(ctx, sm)\n}\n\nfunc (sc *stakingCommittee) Handle(ctx context.Context, act action.Action, sm protocol.StateManager) (*action.Receipt, error) {\n\treceipt, err := sc.governanceStaking.Handle(ctx, act, sm)\n\tif err := sc.persistNativeBuckets(ctx, receipt, err); err != nil {\n\t\treturn nil, err\n\t}\n\treturn receipt, err\n}\n\nfunc (sc *stakingCommittee) Validate(ctx context.Context, act action.Action) error {\n\treturn validate(ctx, sc, act)\n}\n\nfunc (sc *stakingCommittee) DelegatesByHeight(ctx context.Context, height uint64) (state.CandidateList, error) {\n\tcand, err := sc.governanceStaking.DelegatesByHeight(ctx, height)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvaCtx := protocol.MustGetValidateActionsCtx(ctx)\n\thu := config.NewHeightUpgrade(&vaCtx.Genesis)\n\t\/\/ convert to epoch start height\n\tif hu.IsPre(config.Cook, sc.getEpochHeight(sc.getEpochNum(height))) {\n\t\treturn sc.filterDelegates(cand), nil\n\t}\n\t\/\/ native staking starts from Cook\n\tif sc.nativeStaking == nil {\n\t\treturn nil, errors.New(\"native staking was not set after cook height\")\n\t}\n\n\tnativeVotes, err := sc.nativeStaking.Votes(vaCtx.Tip.Height, vaCtx.Tip.Timestamp)\n\tif err == ErrNoData {\n\t\t\/\/ no native staking data\n\t\treturn sc.filterDelegates(cand), nil\n\t}\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to get native chain candidates\")\n\t}\n\tsc.currentNativeBuckets = nativeVotes.Buckets\n\n\treturn sc.mergeDelegates(cand, nativeVotes, vaCtx.Tip.Timestamp), nil\n}\n\nfunc (sc *stakingCommittee) ReadState(ctx context.Context, sm protocol.StateManager, method []byte, args ...[]byte) ([]byte, error) {\n\treturn sc.governanceStaking.ReadState(ctx, sm, method, args...)\n}\n\n\/\/ SetNativeStakingContract sets the address of native staking contract\nfunc (sc *stakingCommittee) SetNativeStakingContract(contract string) {\n\tsc.nativeStaking.SetContract(contract)\n}\n\n\/\/ return candidates whose votes are above threshold\nfunc (sc *stakingCommittee) filterDelegates(candidates state.CandidateList) state.CandidateList {\n\tvar cand state.CandidateList\n\tfor _, c := range candidates {\n\t\tif c.Votes.Cmp(sc.scoreThreshold) >= 0 {\n\t\t\tcand = append(cand, c)\n\t\t}\n\t}\n\treturn cand\n}\n\nfunc (sc *stakingCommittee) mergeDelegates(list state.CandidateList, votes *VoteTally, ts time.Time) state.CandidateList {\n\t\/\/ as of now, native staking does not have register contract, only voting\/staking contract\n\t\/\/ it is assumed that all votes done on native staking target for delegates registered on Ethereum\n\t\/\/ votes cast to all outside address will not be counted and simply ignored\n\tcandidates := make(map[string]*state.Candidate)\n\tcandidateScores := make(map[string]*big.Int)\n\tfor _, cand := range list {\n\t\tclone := cand.Clone()\n\t\tname := to12Bytes(clone.CanName)\n\t\tif v, ok := votes.Candidates[name]; ok {\n\t\t\tclone.Votes.Add(clone.Votes, v.Votes)\n\t\t}\n\t\tif clone.Votes.Cmp(sc.scoreThreshold) >= 0 {\n\t\t\tcandidates[hex.EncodeToString(name[:])] = clone\n\t\t\tcandidateScores[hex.EncodeToString(name[:])] = clone.Votes\n\t\t}\n\t}\n\tsorted := util.Sort(candidateScores, uint64(ts.Unix()))\n\tvar merged state.CandidateList\n\tfor _, name := range sorted {\n\t\tmerged = append(merged, candidates[name])\n\t}\n\treturn merged\n}\n\nfunc (sc *stakingCommittee) persistNativeBuckets(ctx context.Context, receipt *action.Receipt, err error) error {\n\t\/\/ Start to write native buckets archive after cook and only when the action is executed successfully\n\traCtx := protocol.MustGetRunActionsCtx(ctx)\n\tepochHeight := sc.getEpochHeight(sc.getEpochNum(raCtx.BlockHeight))\n\thu := config.NewHeightUpgrade(&raCtx.Genesis)\n\tif hu.IsPre(config.Cook, epochHeight) {\n\t\treturn nil\n\t}\n\tif err != nil {\n\t\treturn nil\n\t}\n\tif receipt == nil || receipt.Status != uint64(iotextypes.ReceiptStatus_Success) {\n\t\treturn nil\n\t}\n\tlog.L().Info(\"Store native buckets to election db\", zap.Int(\"size\", len(sc.currentNativeBuckets)))\n\tif err := sc.electionCommittee.PutNativePollByEpoch(\n\t\tsc.rp.GetEpochNum(raCtx.BlockHeight)+1, \/\/ The native buckets recorded in this epoch will be used in next one\n\t\traCtx.Tip.Timestamp, \/\/ The timestamp of last block is used to represent the current buckets timestamp\n\t\tsc.currentNativeBuckets,\n\t); err != nil {\n\t\treturn err\n\t}\n\tsc.currentNativeBuckets = nil\n\treturn nil\n}\n","lang_cluster":"Go","diff_tag":0,"review_comment":"","id":183} {"diff_hunk":"@@ -53,7 +53,8 @@ var telemetryStatusCmd = &cobra.Command{\n \tShort: \"Print the node's telemetry status\",\n \tLong: `Print the node's telemetry status`,\n \tRun: func(cmd *cobra.Command, args []string) {\n-\t\tcfg, err := logging.EnsureTelemetryConfig(nil, \"\")\n+\t\tmaybeUpdateDataDirFromEnv()\n+\t\tcfg, err := logging.EnsureTelemetryConfig(&dataDir, \"\")\n \n \t\t\/\/ If error loading config, can't disable \/ no need to disable\n \t\tif err != nil {","source_code":"\/\/ Copyright (C) 2019 Algorand, Inc.\n\/\/ This file is part of go-algorand\n\/\/\n\/\/ go-algorand is free software: you can redistribute it and\/or modify\n\/\/ it under the terms of the GNU Affero General Public License as\n\/\/ published by the Free Software Foundation, either version 3 of the\n\/\/ License, or (at your option) any later version.\n\/\/\n\/\/ go-algorand is distributed in the hope that it will be useful,\n\/\/ but WITHOUT ANY WARRANTY; without even the implied warranty of\n\/\/ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\/\/ GNU Affero General Public License for more details.\n\/\/\n\/\/ You should have received a copy of the GNU Affero General Public License\n\/\/ along with go-algorand. If not, see .\n\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/spf13\/cobra\"\n\n\t\"github.com\/algorand\/go-algorand\/logging\"\n)\n\nvar (\n\tnodeName string\n\turi string\n)\n\nfunc init() {\n\ttelemetryCmd.AddCommand(telemetryStatusCmd)\n\ttelemetryCmd.AddCommand(telemetryEnableCmd)\n\ttelemetryCmd.AddCommand(telemetryDisableCmd)\n\ttelemetryCmd.AddCommand(telemetryNameCmd)\n\ttelemetryCmd.AddCommand(telemetryEndpointCmd)\n\n\t\/\/ Enable Logging : node name\n\ttelemetryNameCmd.Flags().StringVarP(&nodeName, \"name\", \"n\", \"\", \"Friendly-name to use for node\")\n\ttelemetryEndpointCmd.Flags().StringVarP(&uri, \"endpoint\", \"e\", \"\", \"Endpoint's URI\")\n}\n\nvar telemetryCmd = &cobra.Command{\n\tUse: \"telemetry\",\n\tShort: \"Control and manage Algorand logging\",\n\tLong: `Enable\/disable and configure Algorand remote logging`,\n\tRun: telemetryStatusCmd.Run,\n}\n\nvar telemetryStatusCmd = &cobra.Command{\n\tUse: \"status\",\n\tShort: \"Print the node's telemetry status\",\n\tLong: `Print the node's telemetry status`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tcfg, err := logging.EnsureTelemetryConfig(nil, \"\")\n\n\t\t\/\/ If error loading config, can't disable \/ no need to disable\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tfmt.Println(loggingNotConfigured)\n\t\t} else if cfg.Enable == false {\n\t\t\tfmt.Println(loggingNotEnabled)\n\t\t} else {\n\t\t\tfmt.Printf(loggingEnabled, cfg.Name, cfg.GUID)\n\t\t}\n\t},\n}\n\nvar telemetryEnableCmd = &cobra.Command{\n\tUse: \"enable\",\n\tShort: \"Enable Algorand remote logging\",\n\tLong: `Enable Algorand remote logging`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tcfg, err := logging.EnsureTelemetryConfig(nil, \"\")\n\n\t\t\/\/ If error loading config, can't disable \/ no need to disable\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tcfg.Enable = true\n\t\tcfg.Save(cfg.FilePath)\n\t\tfmt.Printf(\"Telemetry logging enabled: Name = %s, Guid = %s\\n\", cfg.Name, cfg.GUID)\n\t},\n}\n\nvar telemetryDisableCmd = &cobra.Command{\n\tUse: \"disable\",\n\tShort: \"Disable Algorand remote logging\",\n\tLong: `Disable Algorand remote logging`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tcfg, err := logging.EnsureTelemetryConfig(nil, \"\")\n\n\t\t\/\/ If error loading config, can't disable \/ no need to disable\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tcfg.Enable = false\n\t\tcfg.Save(cfg.FilePath)\n\t\tfmt.Printf(\"Telemetry logging disabled: Name = %s, Guid = %s\\n\", cfg.Name, cfg.GUID)\n\t},\n}\n\nvar telemetryNameCmd = &cobra.Command{\n\tUse: \"name -n nodeName\",\n\tShort: \"Enable Algorand remote logging\",\n\tLong: `Enable Algorand remote logging with specified node name`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tcfg, err := logging.EnsureTelemetryConfig(nil, \"\")\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\treturn\n\t\t}\n\t\tcfg.Enable = true\n\t\tif len(nodeName) > 0 {\n\t\t\tcfg.Name = nodeName\n\t\t}\n\t\tcfg.Save(cfg.FilePath)\n\t\tfmt.Printf(\"Telemetry logging: Name = %s, Guid = %s\\n\", cfg.Name, cfg.GUID)\n\t},\n}\n\nvar telemetryEndpointCmd = &cobra.Command{\n\tUse: \"endpoint -e \",\n\tShort: \"sets the \\\"URI\\\" property\",\n\tLong: `Sets the \"URI\" property in the telemetry configuration`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tcfg, err := logging.EnsureTelemetryConfig(nil, \"\")\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\treturn\n\t\t}\n\t\tcfg.URI = uri\n\t\tcfg.Save(cfg.FilePath)\n\t\tfmt.Printf(\"Telemetry logging: Name = %s, Guid = %s, URI = %s\\n\", cfg.Name, cfg.GUID, cfg.URI)\n\t},\n}\n","lang_cluster":"Go","diff_tag":0,"review_comment":"","id":184} {"diff_hunk":"@@ -84,7 +84,7 @@ func byteOrNil(data []byte) *[]byte {\n \treturn &data\n }\n \n-func computeAssetIndexInPayset(tx node.TxnWithStatus, txnCounter uint64, payset []transactions.SignedTxnWithAD) (aidx *uint64) {\n+func computeCreatableIndexInPayset(tx node.TxnWithStatus, txnCounter uint64, payset []transactions.SignedTxnWithAD) (cidx *uint64) {\n \t\/\/ Compute transaction index in block\n \toffset := -1\n \tfor idx, stxnib := range payset {","source_code":"\/\/ Copyright (C) 2019-2020 Algorand, Inc.\n\/\/ This file is part of go-algorand\n\/\/\n\/\/ go-algorand is free software: you can redistribute it and\/or modify\n\/\/ it under the terms of the GNU Affero General Public License as\n\/\/ published by the Free Software Foundation, either version 3 of the\n\/\/ License, or (at your option) any later version.\n\/\/\n\/\/ go-algorand is distributed in the hope that it will be useful,\n\/\/ but WITHOUT ANY WARRANTY; without even the implied warranty of\n\/\/ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\/\/ GNU Affero General Public License for more details.\n\/\/\n\/\/ You should have received a copy of the GNU Affero General Public License\n\/\/ along with go-algorand. If not, see .\n\npackage v2\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"strings\"\n\n\t\"github.com\/algorand\/go-codec\/codec\"\n\t\"github.com\/labstack\/echo\/v4\"\n\n\t\"github.com\/algorand\/go-algorand\/daemon\/algod\/api\/server\/v2\/generated\"\n\t\"github.com\/algorand\/go-algorand\/data\"\n\t\"github.com\/algorand\/go-algorand\/data\/basics\"\n\t\"github.com\/algorand\/go-algorand\/data\/transactions\"\n\t\"github.com\/algorand\/go-algorand\/logging\"\n\t\"github.com\/algorand\/go-algorand\/node\"\n\t\"github.com\/algorand\/go-algorand\/protocol\"\n)\n\n\/\/ returnError logs an internal message while returning the encoded response.\nfunc returnError(ctx echo.Context, code int, internal error, external string, logger logging.Logger) error {\n\tlogger.Info(internal)\n\treturn ctx.JSON(code, generated.ErrorResponse{Message: external})\n}\n\nfunc badRequest(ctx echo.Context, internal error, external string, log logging.Logger) error {\n\treturn returnError(ctx, http.StatusBadRequest, internal, external, log)\n}\n\nfunc serviceUnavailable(ctx echo.Context, internal error, external string, log logging.Logger) error {\n\treturn returnError(ctx, http.StatusServiceUnavailable, internal, external, log)\n}\n\nfunc internalError(ctx echo.Context, internal error, external string, log logging.Logger) error {\n\treturn returnError(ctx, http.StatusInternalServerError, internal, external, log)\n}\n\nfunc notFound(ctx echo.Context, internal error, external string, log logging.Logger) error {\n\treturn returnError(ctx, http.StatusNotFound, internal, external, log)\n}\n\nfunc addrOrNil(addr basics.Address) *string {\n\tif addr.IsZero() {\n\t\treturn nil\n\t}\n\tret := addr.String()\n\treturn &ret\n}\n\nfunc strOrNil(str string) *string {\n\tif str == \"\" {\n\t\treturn nil\n\t}\n\treturn &str\n}\n\nfunc numOrNil(num uint64) *uint64 {\n\tif num == 0 {\n\t\treturn nil\n\t}\n\treturn &num\n}\n\nfunc byteOrNil(data []byte) *[]byte {\n\tif len(data) == 0 {\n\t\treturn nil\n\t}\n\treturn &data\n}\n\nfunc computeAssetIndexInPayset(tx node.TxnWithStatus, txnCounter uint64, payset []transactions.SignedTxnWithAD) (aidx *uint64) {\n\t\/\/ Compute transaction index in block\n\toffset := -1\n\tfor idx, stxnib := range payset {\n\t\tif tx.Txn.Txn.ID() == stxnib.Txn.ID() {\n\t\t\toffset = idx\n\t\t\tbreak\n\t\t}\n\t}\n\n\t\/\/ Sanity check that txn was in fetched block\n\tif offset < 0 {\n\t\treturn nil\n\t}\n\n\t\/\/ Count into block to get created asset index\n\tidx := txnCounter - uint64(len(payset)) + uint64(offset) + 1\n\treturn &idx\n}\n\n\/\/ computeAssetIndexFromTxn returns the created asset index given a confirmed\n\/\/ transaction whose confirmation block is available in the ledger. Note that\n\/\/ 0 is an invalid asset index (they start at 1).\nfunc computeAssetIndexFromTxn(tx node.TxnWithStatus, l *data.Ledger) (aidx *uint64) {\n\t\/\/ Must have ledger\n\tif l == nil {\n\t\treturn nil\n\t}\n\t\/\/ Transaction must be confirmed\n\tif tx.ConfirmedRound == 0 {\n\t\treturn nil\n\t}\n\t\/\/ Transaction must be AssetConfig transaction\n\tif tx.Txn.Txn.AssetConfigTxnFields == (transactions.AssetConfigTxnFields{}) {\n\t\treturn nil\n\t}\n\t\/\/ Transaction must be creating an asset\n\tif tx.Txn.Txn.AssetConfigTxnFields.ConfigAsset != 0 {\n\t\treturn nil\n\t}\n\n\t\/\/ Look up block where transaction was confirmed\n\tblk, err := l.Block(tx.ConfirmedRound)\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\tpayset, err := blk.DecodePaysetFlat()\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\treturn computeAssetIndexInPayset(tx, blk.BlockHeader.TxnCounter, payset)\n}\n\n\/\/ getCodecHandle converts a format string into the encoder + content type\nfunc getCodecHandle(formatPtr *string) (codec.Handle, string, error) {\n\tformat := \"json\"\n\tif formatPtr != nil {\n\t\tformat = strings.ToLower(*formatPtr)\n\t}\n\n\tswitch format {\n\tcase \"json\":\n\t\treturn protocol.JSONHandle, \"application\/json\", nil\n\tcase \"msgpack\":\n\t\tfallthrough\n\tcase \"msgp\":\n\t\treturn protocol.CodecHandle, \"application\/msgpack\", nil\n\tdefault:\n\t\treturn nil, \"\", fmt.Errorf(\"invalid format: %s\", format)\n\t}\n}\n\nfunc encode(handle codec.Handle, obj interface{}) ([]byte, error) {\n\tvar output []byte\n\tenc := codec.NewEncoderBytes(&output, handle)\n\n\terr := enc.Encode(obj)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to encode object: %v\", err)\n\t}\n\treturn output, nil\n}\n\nfunc decode(handle codec.Handle, data []byte, v interface{}) error {\n\tenc := codec.NewDecoderBytes(data, handle)\n\n\terr := enc.Decode(v)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to decode object: %v\", err)\n\t}\n\treturn nil\n}\n","lang_cluster":"Go","diff_tag":0,"review_comment":"","id":185} {"diff_hunk":"@@ -107,8 +107,8 @@ var orderCreateCmd = &cobra.Command{\n }\n \n var orderCancelCmd = &cobra.Command{\n-\tUse: \"cancel \",\n-\tShort: \"Cancel order on Marketplace\",\n+\tUse: \"cancel ...\",\n+\tShort: \"Cancel given orders on Marketplace\",\n \tArgs: cobra.MinimumNArgs(1),\n \tRunE: func(cmd *cobra.Command, args []string) error {\n \t\tctx, cancel := newTimeoutContext()","source_code":"package commands\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/sonm-io\/core\/cmd\/cli\/task_config\"\n\tpb \"github.com\/sonm-io\/core\/proto\"\n\t\"github.com\/spf13\/cobra\"\n)\n\nvar (\n\tordersSearchLimit uint64 = 0\n)\n\nfunc init() {\n\torderListCmd.PersistentFlags().Uint64Var(&ordersSearchLimit, \"limit\", 10, \"Orders count to show\")\n\n\torderRootCmd.AddCommand(\n\t\torderListCmd,\n\t\torderStatusCmd,\n\t\torderCreateCmd,\n\t\torderCancelCmd,\n\t\torderPurgeCmd,\n\t)\n}\n\nvar orderRootCmd = &cobra.Command{\n\tUse: \"order\",\n\tShort: \"Manage orders\",\n\tPersistentPreRunE: loadKeyStoreWrapper,\n}\n\nvar orderListCmd = &cobra.Command{\n\tUse: \"list\",\n\tShort: \"Show your active orders\",\n\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\tctx, cancel := newTimeoutContext()\n\t\tdefer cancel()\n\n\t\tmarket, err := newMarketClient(ctx)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"\u0441annot create client connection: %v\", err)\n\t\t}\n\n\t\treq := &pb.Count{Count: ordersSearchLimit}\n\t\treply, err := market.GetOrders(ctx, req)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"cannot receive orders from marketplace: %v\", err)\n\t\t}\n\n\t\tprintOrdersList(cmd, reply.Orders)\n\t\treturn nil\n\t},\n}\n\nvar orderStatusCmd = &cobra.Command{\n\tUse: \"status \",\n\tShort: \"Show order stats\",\n\tArgs: cobra.MinimumNArgs(1),\n\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\tctx, cancel := newTimeoutContext()\n\t\tdefer cancel()\n\n\t\tmarket, err := newMarketClient(ctx)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"cannot create client connection: %v\", err)\n\t\t}\n\n\t\torderID := args[0]\n\t\torder, err := market.GetOrderByID(ctx, &pb.ID{Id: orderID})\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"cannot get order by ID: %v\", err)\n\t\t}\n\n\t\tprintOrderDetails(cmd, order)\n\t\treturn nil\n\t},\n}\n\nvar orderCreateCmd = &cobra.Command{\n\tUse: \"create \",\n\tShort: \"Place new Bid order on Marketplace\",\n\tArgs: cobra.MinimumNArgs(1),\n\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\tctx, cancel := newTimeoutContext()\n\t\tdefer cancel()\n\n\t\tmarket, err := newMarketClient(ctx)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"cannot create client connection: %v\", err)\n\t\t}\n\n\t\tpath := args[0]\n\t\tbid := &pb.BidOrder{}\n\t\tif err := task_config.LoadFromFile(path, bid); err != nil {\n\t\t\treturn fmt.Errorf(\"cannot load order definition: %v\", err)\n\t\t}\n\n\t\tcreated, err := market.CreateOrder(ctx, bid)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"cannot create order on marketplace: %v\", err)\n\t\t}\n\n\t\tprintID(cmd, created.GetId().Unwrap().String())\n\t\treturn nil\n\t},\n}\n\nvar orderCancelCmd = &cobra.Command{\n\tUse: \"cancel \",\n\tShort: \"Cancel order on Marketplace\",\n\tArgs: cobra.MinimumNArgs(1),\n\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\tctx, cancel := newTimeoutContext()\n\t\tdefer cancel()\n\n\t\tmarket, err := newMarketClient(ctx)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"cannot create client connection: %v\", err)\n\t\t}\n\n\t\torderID := args[0]\n\t\t_, err = market.CancelOrder(ctx, &pb.ID{Id: orderID})\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"cannot cancel order on Marketplace: %v\", err)\n\t\t}\n\n\t\tshowOk(cmd)\n\t\treturn nil\n\t},\n}\n\nvar orderPurgeCmd = &cobra.Command{\n\tUse: \"purge\",\n\tShort: \"Remove all your orders from Marketplace\",\n\tRunE: func(cmd *cobra.Command, _ []string) error {\n\t\tctx, cancel := newTimeoutContext()\n\t\tdefer cancel()\n\n\t\tmarket, err := newMarketClient(ctx)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"cannot create client connection: %v\", err)\n\t\t}\n\n\t\tif _, err := market.Purge(ctx, &pb.Empty{}); err != nil {\n\t\t\treturn fmt.Errorf(\"cannot purge orders: %v\", err)\n\t\t}\n\n\t\tshowOk(cmd)\n\t\treturn nil\n\t},\n}\n","lang_cluster":"Go","diff_tag":0,"review_comment":"","id":186} {"diff_hunk":"@@ -134,6 +134,10 @@ func (e *dErrImpl) Error() string {\n \treturn \"Multiple errors:\\n\" + strings.Join(lines, \"\\n\")\n }\n \n+func (e *dErrImpl) AnonymizedErrs() []string {\n+\treturn e.anonymizedErrs\n+}\n+\n func (e *dErrImpl) len() int {\n \treturn len(e.errs)\n }","source_code":"\/\/ Copyright 2017 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage daisy\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n)\n\nconst (\n\tuntypedError = \"\"\n\tmultiError = \"MultiError\"\n\tfileIOError = \"FileIOError\"\n\tresourceDNEError = \"ResourceDoesNotExist\"\n\timageObsoleteDeletedError = \"ImageObsoleteOrDeleted\"\n\n\tapiError = \"APIError\"\n\tapiError404 = \"APIError404\"\n)\n\n\/\/ dErr is a Daisy internal error type.\n\/\/ It has:\n\/\/ - optional error typing\n\/\/ - multiple error aggregation\n\/\/\n\/\/ Default implementation:\n\/\/ The default dErr implementation is flat, dErr.add(anotherDErr) will merge the two dErrs\n\/\/ into a single, flat dErr instead of making anotherDErr a child to dErr.\ntype dErr interface {\n\terror\n\n\t\/\/ add shouldn't be called directly, instead call addErrs(dErr, error).\n\t\/\/ This assists with nil dErrs. addErrs(nil, e) will return a new dErr.\n\tadd(error)\n\tType() string\n}\n\n\/\/ addErrs adds an error to a dErr.\n\/\/ The dErr can be nil. If both the dErr and errors are nil, a nil dErr is returned.\n\/\/ If dErr is nil, but errors are not nil, a new dErr is instantiated, the errors are added,\n\/\/ and the new dErr is returned.\n\/\/ Any nil error in errs is disregarded. Therefore, `var e dErr; e = addErrs(e, nil)`\n\/\/ preserves e's nil-ness.\nfunc addErrs(e dErr, errs ...error) dErr {\n\tfor _, err := range errs {\n\t\tif err != nil {\n\t\t\tif e == nil {\n\t\t\t\te = &dErrImpl{}\n\t\t\t}\n\t\t\te.add(err)\n\t\t}\n\t}\n\treturn e\n}\n\nfunc errf(format string, a ...interface{}) dErr {\n\treturn newErr(fmt.Errorf(format, a...))\n}\n\n\/\/ newErr returns a dErr. newErr is used to wrap another error as a dErr.\n\/\/ If e is already a dErr, e is copied and returned.\n\/\/ If e is nil, nil is returned.\nfunc newErr(e error) dErr {\n\tif e == nil {\n\t\treturn nil\n\t}\n\tif dE, ok := e.(*dErrImpl); ok {\n\t\treturn dE\n\t}\n\treturn &dErrImpl{errs: []error{e}}\n}\n\nfunc typedErr(errType string, e error) dErr {\n\tif e == nil {\n\t\treturn nil\n\t}\n\tdE := newErr(e)\n\tdE.(*dErrImpl).errType = errType\n\treturn dE\n}\n\nfunc typedErrf(errType, format string, a ...interface{}) dErr {\n\treturn typedErr(errType, fmt.Errorf(format, a...))\n}\n\ntype dErrImpl struct {\n\terrs []error\n\terrType string\n}\n\nfunc (e *dErrImpl) add(err error) {\n\tif e2, ok := err.(*dErrImpl); ok {\n\t\te.merge(e2)\n\t} else if !ok {\n\t\t\/\/ This is some other error type. Add it.\n\t\te.errs = append(e.errs, err)\n\t}\n\tif e.len() > 1 {\n\t\te.errType = multiError\n\t}\n}\n\nfunc (e *dErrImpl) Error() string {\n\tif e.len() == 0 {\n\t\treturn \"\"\n\t}\n\tif e.len() == 1 {\n\t\terrStr := e.errs[0].Error()\n\t\tif e.errType != \"\" {\n\t\t\treturn fmt.Sprintf(\"%s: %s\", e.errType, errStr)\n\t\t}\n\t\treturn errStr\n\t}\n\n\t\/\/ Multiple error handling.\n\tpre := \"* \"\n\tlines := make([]string, e.len())\n\tfor i, err := range e.errs {\n\t\tlines[i] = pre + err.Error()\n\t}\n\n\treturn \"Multiple errors:\\n\" + strings.Join(lines, \"\\n\")\n}\n\nfunc (e *dErrImpl) len() int {\n\treturn len(e.errs)\n}\n\nfunc (e *dErrImpl) merge(e2 *dErrImpl) {\n\tif e2.len() > 0 {\n\t\te.errs = append(e.errs, e2.errs...)\n\t\t\/\/ Take e2's type. This solves the situation of e having 0 errors, and e2 having 1.\n\t\t\/\/ Of course, there is a possibility of len(e) > 0 and len(e2) > 1, in which case,\n\t\t\/\/ the type should be a multiError.\n\t\te.errType = e2.errType\n\t\tif e.len() > 1 {\n\t\t\te.errType = multiError\n\t\t}\n\t}\n}\n\nfunc (e *dErrImpl) Type() string {\n\treturn e.errType\n}\n","lang_cluster":"Go","diff_tag":0,"review_comment":"","id":187} {"diff_hunk":"@@ -2,7 +2,10 @@ package h2quic\n \n import (\n \t\"bytes\"\n+\t\"errors\"\n+\t\"fmt\"\n \t\"net\/http\"\n+\t\"net\/url\"\n \t\"strconv\"\n \t\"strings\"\n \t\"sync\"","source_code":"package h2quic\n\nimport (\n\t\"bytes\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\n\tquic \"github.com\/lucas-clemente\/quic-go\"\n\t\"github.com\/lucas-clemente\/quic-go\/internal\/protocol\"\n\t\"github.com\/lucas-clemente\/quic-go\/internal\/utils\"\n\t\"golang.org\/x\/net\/http2\"\n\t\"golang.org\/x\/net\/http2\/hpack\"\n)\n\ntype responseWriter struct {\n\tdataStreamID protocol.StreamID\n\tdataStream quic.Stream\n\n\theaderStream quic.Stream\n\theaderStreamMutex *sync.Mutex\n\n\theader http.Header\n\tstatus int \/\/ status code passed to WriteHeader\n\theaderWritten bool\n\n\tsettings *sessionSettings\n}\n\nfunc newResponseWriter(headerStream quic.Stream, headerStreamMutex *sync.Mutex, dataStream quic.Stream, dataStreamID protocol.StreamID, settings *sessionSettings) *responseWriter {\n\treturn &responseWriter{\n\t\theader: http.Header{},\n\t\theaderStream: headerStream,\n\t\theaderStreamMutex: headerStreamMutex,\n\t\tdataStream: dataStream,\n\t\tdataStreamID: dataStreamID,\n\t\tsettings: settings,\n\t}\n}\n\nfunc (w *responseWriter) Header() http.Header {\n\treturn w.header\n}\n\nfunc (w *responseWriter) WriteHeader(status int) {\n\tif w.headerWritten {\n\t\treturn\n\t}\n\tw.headerWritten = true\n\tw.status = status\n\n\tvar headers bytes.Buffer\n\tenc := hpack.NewEncoder(&headers)\n\tenc.WriteField(hpack.HeaderField{Name: \":status\", Value: strconv.Itoa(status)})\n\n\tfor k, v := range w.header {\n\t\tfor index := range v {\n\t\t\tenc.WriteField(hpack.HeaderField{Name: strings.ToLower(k), Value: v[index]})\n\t\t}\n\t}\n\n\tutils.Infof(\"Responding with %d\", status)\n\tw.headerStreamMutex.Lock()\n\tdefer w.headerStreamMutex.Unlock()\n\th2framer := http2.NewFramer(w.headerStream, nil)\n\terr := h2framer.WriteHeaders(http2.HeadersFrameParam{\n\t\tStreamID: uint32(w.dataStreamID),\n\t\tEndHeaders: true,\n\t\tBlockFragment: headers.Bytes(),\n\t})\n\tif err != nil {\n\t\tutils.Errorf(\"could not write h2 header: %s\", err.Error())\n\t}\n}\n\nfunc (w *responseWriter) Write(p []byte) (int, error) {\n\tif !w.headerWritten {\n\t\tw.WriteHeader(200)\n\t}\n\tif !bodyAllowedForStatus(w.status) {\n\t\treturn 0, http.ErrBodyNotAllowed\n\t}\n\treturn w.dataStream.Write(p)\n}\n\nfunc (w *responseWriter) Flush() {}\n\n\/\/ This is a NOP. Use http.Request.Context\nfunc (w *responseWriter) CloseNotify() <-chan bool { return make(<-chan bool) }\n\n\/\/ test that we implement http.Flusher\nvar _ http.Flusher = &responseWriter{}\n\n\/\/ test that we implement http.CloseNotifier\nvar _ http.CloseNotifier = &responseWriter{}\n\n\/\/ copied from http2\/http2.go\n\/\/ bodyAllowedForStatus reports whether a given response status code\n\/\/ permits a body. See RFC 2616, section 4.4.\nfunc bodyAllowedForStatus(status int) bool {\n\tswitch {\n\tcase status >= 100 && status <= 199:\n\t\treturn false\n\tcase status == 204:\n\t\treturn false\n\tcase status == 304:\n\t\treturn false\n\t}\n\treturn true\n}\n","lang_cluster":"Go","diff_tag":0,"review_comment":"","id":188} {"diff_hunk":"@@ -129,7 +129,7 @@ func (pl *RoundRobin) Remove(pid transport.PeerIdentifier) error {\n \tpl.lock.Lock()\n \tdefer pl.lock.Unlock()\n \n-\tif err := pl.pr.Remove(pid); err != nil {\n+\tif err := pl.removeByPeerIdentifier(pid); err != nil {\n \t\t\/\/ The peer has already been removed\n \t\treturn err\n \t}","source_code":"\/\/ Copyright (c) 2016 Uber Technologies, Inc.\n\/\/\n\/\/ Permission is hereby granted, free of charge, to any person obtaining a copy\n\/\/ of this software and associated documentation files (the \"Software\"), to deal\n\/\/ in the Software without restriction, including without limitation the rights\n\/\/ to use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\n\/\/ copies of the Software, and to permit persons to whom the Software is\n\/\/ furnished to do so, subject to the following conditions:\n\/\/\n\/\/ The above copyright notice and this permission notice shall be included in\n\/\/ all copies or substantial portions of the Software.\n\/\/\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n\/\/ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n\/\/ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n\/\/ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n\/\/ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n\/\/ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n\/\/ THE SOFTWARE.\n\npackage roundrobin\n\nimport (\n\t\"context\"\n\t\"sync\"\n\n\tyerrors \"go.uber.org\/yarpc\/internal\/errors\"\n\t\"go.uber.org\/yarpc\/transport\"\n\t\"go.uber.org\/yarpc\/transport\/internal\/errors\"\n\n\t\"go.uber.org\/atomic\"\n)\n\n\/\/ New creates a new round robin PeerList using\nfunc New(peerIDs []transport.PeerIdentifier, agent transport.Agent) (*RoundRobin, error) {\n\trr := &RoundRobin{\n\t\tpr: NewPeerRing(len(peerIDs)),\n\t\tagent: agent,\n\t\tpeerAvailableEvent: make(chan struct{}, 1),\n\t}\n\n\terr := rr.addAll(peerIDs)\n\treturn rr, err\n}\n\n\/\/ RoundRobin is a PeerList which rotates which peers are to be selected in a circle\ntype RoundRobin struct {\n\tlock sync.Mutex\n\n\tpr *PeerRing\n\tpeerAvailableEvent chan struct{}\n\tagent transport.Agent\n\tstarted atomic.Bool\n}\n\nfunc (pl *RoundRobin) addAll(peerIDs []transport.PeerIdentifier) error {\n\tpl.lock.Lock()\n\tdefer pl.lock.Unlock()\n\n\tvar errs []error\n\n\tfor _, peerID := range peerIDs {\n\t\tif err := pl.addPeer(peerID); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t\tcontinue\n\t\t}\n\t}\n\n\treturn yerrors.MultiError(errs)\n}\n\n\/\/ Add a peer identifier to the round robin\nfunc (pl *RoundRobin) Add(pid transport.PeerIdentifier) error {\n\tpl.lock.Lock()\n\terr := pl.addPeer(pid)\n\tpl.lock.Unlock()\n\treturn err\n}\n\n\/\/ Must be run inside a mutex.Lock()\nfunc (pl *RoundRobin) addPeer(pid transport.PeerIdentifier) error {\n\tp, err := pl.agent.RetainPeer(pid, pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err = pl.pr.Add(p); err != nil {\n\t\treturn err\n\t}\n\n\tpl.notifyPeerAvailable()\n\treturn nil\n}\n\n\/\/ Start notifies the RoundRobin that requests will start coming\nfunc (pl *RoundRobin) Start() error {\n\tif pl.started.Swap(true) {\n\t\treturn errors.ErrPeerListAlreadyStarted(\"RoundRobinList\")\n\t}\n\treturn nil\n}\n\n\/\/ Stop notifies the RoundRobin that requests will stop coming\nfunc (pl *RoundRobin) Stop() error {\n\tif !pl.started.Swap(false) {\n\t\treturn errors.ErrPeerListNotStarted(\"RoundRobinList\")\n\t}\n\treturn pl.clearPeers()\n}\n\nfunc (pl *RoundRobin) clearPeers() error {\n\tpl.lock.Lock()\n\tdefer pl.lock.Unlock()\n\n\tvar errs []error\n\n\tpeers := pl.pr.RemoveAll()\n\tfor _, p := range peers {\n\t\tif err := pl.agent.ReleasePeer(p, pl); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\n\treturn yerrors.MultiError(errs)\n}\n\n\/\/ Remove a peer identifier from the round robin\nfunc (pl *RoundRobin) Remove(pid transport.PeerIdentifier) error {\n\tpl.lock.Lock()\n\tdefer pl.lock.Unlock()\n\n\tif err := pl.pr.Remove(pid); err != nil {\n\t\t\/\/ The peer has already been removed\n\t\treturn err\n\t}\n\n\treturn pl.agent.ReleasePeer(pid, pl)\n}\n\n\/\/ ChoosePeer selects the next available peer in the round robin\nfunc (pl *RoundRobin) ChoosePeer(ctx context.Context, req *transport.Request) (transport.Peer, error) {\n\tif !pl.started.Load() {\n\t\treturn nil, errors.ErrPeerListNotStarted(\"RoundRobinList\")\n\t}\n\n\tfor {\n\t\tif nextPeer := pl.nextPeer(); nextPeer != nil {\n\t\t\tpl.notifyPeerAvailable()\n\t\t\treturn nextPeer, nil\n\t\t}\n\n\t\tif err := pl.waitForPeerAddedEvent(ctx); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n}\n\n\/\/ nextPeer grabs the next available peer from the PeerRing and returns it,\n\/\/ if there are no available peers it returns nil\nfunc (pl *RoundRobin) nextPeer() transport.Peer {\n\tpl.lock.Lock()\n\tpeer := pl.pr.Next()\n\tpl.lock.Unlock()\n\treturn peer\n}\n\n\/\/ notifyPeerAvailable writes to a channel indicating that a Peer is currently\n\/\/ available for requests\nfunc (pl *RoundRobin) notifyPeerAvailable() {\n\tselect {\n\tcase pl.peerAvailableEvent <- struct{}{}:\n\tdefault:\n\t}\n}\n\n\/\/ waitForPeerAddedEvent waits until a peer is added to the peer list or the\n\/\/ given context finishes.\n\/\/ Must NOT be run in a mutex.Lock()\nfunc (pl *RoundRobin) waitForPeerAddedEvent(ctx context.Context) error {\n\tif _, ok := ctx.Deadline(); !ok {\n\t\treturn errors.ErrChooseContextHasNoDeadline(\"RoundRobinList\")\n\t}\n\n\tselect {\n\tcase <-pl.peerAvailableEvent:\n\t\treturn nil\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\t}\n}\n\n\/\/ NotifyStatusChanged when the peer's status changes\nfunc (pl *RoundRobin) NotifyStatusChanged(transport.Peer) {}\n","lang_cluster":"Go","diff_tag":0,"review_comment":"","id":189} {"diff_hunk":"@@ -17,9 +17,13 @@ limitations under the License.\n package cainjector\n \n import (\n+\t\"context\"\n+\t\"fmt\"\n \t\"io\/ioutil\"\n \n \tlogf \"github.com\/jetstack\/cert-manager\/pkg\/logs\"\n+\t\"github.com\/pkg\/errors\"\n+\t\"golang.org\/x\/sync\/errgroup\"\n \n \tadmissionreg \"k8s.io\/api\/admissionregistration\/v1beta1\"\n \tapiext \"k8s.io\/apiextensions-apiserver\/pkg\/apis\/apiextensions\/v1beta1\"","source_code":"\/*\nCopyright 2019 The Jetstack cert-manager contributors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage cainjector\n\nimport (\n\t\"io\/ioutil\"\n\n\tlogf \"github.com\/jetstack\/cert-manager\/pkg\/logs\"\n\n\tadmissionreg \"k8s.io\/api\/admissionregistration\/v1beta1\"\n\tapiext \"k8s.io\/apiextensions-apiserver\/pkg\/apis\/apiextensions\/v1beta1\"\n\t\"k8s.io\/apimachinery\/pkg\/api\/meta\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\tapireg \"k8s.io\/kube-aggregator\/pkg\/apis\/apiregistration\/v1beta1\"\n\tctrl \"sigs.k8s.io\/controller-runtime\"\n)\n\n\/\/ injectorSet describes a particular setup of the injector controller\ntype injectorSetup struct {\n\tresourceName string\n\tinjector CertInjector\n\tlistType runtime.Object\n}\n\nvar (\n\tMutatingWebhookSetup = injectorSetup{\n\t\tresourceName: \"mutatingwebhookconfiguration\",\n\t\tinjector: mutatingWebhookInjector{},\n\t\tlistType: &admissionreg.MutatingWebhookConfigurationList{},\n\t}\n\n\tValidatingWebhookSetup = injectorSetup{\n\t\tresourceName: \"validatingwebhookconfiguration\",\n\t\tinjector: validatingWebhookInjector{},\n\t\tlistType: &admissionreg.ValidatingWebhookConfigurationList{},\n\t}\n\n\tAPIServiceSetup = injectorSetup{\n\t\tresourceName: \"apiservice\",\n\t\tinjector: apiServiceInjector{},\n\t\tlistType: &apireg.APIServiceList{},\n\t}\n\n\tCRDSetup = injectorSetup{\n\t\tresourceName: \"customresourcedefinition\",\n\t\tinjector: crdConversionInjector{},\n\t\tlistType: &apiext.CustomResourceDefinitionList{},\n\t}\n\n\tinjectorSetups = []injectorSetup{MutatingWebhookSetup, ValidatingWebhookSetup, APIServiceSetup, CRDSetup}\n\tControllerNames []string\n)\n\n\/\/ registerAllInjectors registers all injectors and based on the\n\/\/ graduation state of the injector decides how to log no kind\/resource match errors\nfunc registerAllInjectors(mgr ctrl.Manager, sources ...caDataSource) error {\n\tfor _, setup := range injectorSetups {\n\t\tif err := Register(mgr, setup, sources...); err != nil {\n\t\t\tif !meta.IsNoMatchError(err) || !setup.injector.IsAlpha() {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tctrl.Log.V(logf.WarnLevel).Info(\"unable to register injector which is still in an alpha phase.\"+\n\t\t\t\t\" Enable the feature on the API server in order to use this injector\",\n\t\t\t\t\"injector\", setup.resourceName)\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ Register registers an injection controller with the given manager, and adds relevant indicies.\nfunc Register(mgr ctrl.Manager, setup injectorSetup, sources ...caDataSource) error {\n\ttyp := setup.injector.NewTarget().AsObject()\n\tbuilder := ctrl.NewControllerManagedBy(mgr).For(typ)\n\tfor _, s := range sources {\n\t\tif err := s.ApplyTo(mgr, setup, builder); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn builder.Complete(&genericInjectReconciler{\n\t\tClient: mgr.GetClient(),\n\t\tsources: sources,\n\t\tlog: ctrl.Log.WithName(\"inject-controller\"),\n\t\tresourceName: setup.resourceName,\n\t\tinjector: setup.injector,\n\t})\n}\n\n\/\/ dataFromSliceOrFile returns data from the slice (if non-empty), or from the file,\n\/\/ or an error if an error occurred reading the file\nfunc dataFromSliceOrFile(data []byte, file string) ([]byte, error) {\n\tif len(data) > 0 {\n\t\treturn data, nil\n\t}\n\tif len(file) > 0 {\n\t\tfileData, err := ioutil.ReadFile(file)\n\t\tif err != nil {\n\t\t\treturn []byte{}, err\n\t\t}\n\t\treturn fileData, nil\n\t}\n\treturn nil, nil\n}\n\n\/\/ RegisterCertificateBased registers all known injection controllers that\n\/\/ target Certificate resources with the given manager, and adds relevant\n\/\/ indices.\n\/\/ The registered controllers require the cert-manager API to be available\n\/\/ in order to run.\nfunc RegisterCertificateBased(mgr ctrl.Manager) error {\n\tsources := []caDataSource{\n\t\t&certificateDataSource{client: mgr.GetClient()},\n\t}\n\treturn registerAllInjectors(mgr, sources...)\n}\n\n\/\/ RegisterSecretBased registers all known injection controllers that\n\/\/ target Secret resources with the given manager, and adds relevant\n\/\/ indices.\n\/\/ The registered controllers only require the corev1 APi to be available in\n\/\/ order to run.\nfunc RegisterSecretBased(mgr ctrl.Manager) error {\n\tsources := []caDataSource{\n\t\t&secretDataSource{client: mgr.GetClient()},\n\t\t&kubeconfigDataSource{},\n\t}\n\treturn registerAllInjectors(mgr, sources...)\n}\n","lang_cluster":"Go","diff_tag":0,"review_comment":"","id":190} {"diff_hunk":"@@ -8,7 +8,8 @@ var (\n \tnewlineTabRE = regexp.MustCompile(`\\n\\t`)\n \tcertificateTimeErrorRE = regexp.MustCompile(`: current time \\S+ is after \\S+`)\n \t\/\/ aws\n-\tawsRequestIDRE = regexp.MustCompile(`(, )*(?i)(request id: )(?:[-[:xdigit:]]+)`)\n+\tawsRequestIDRE = regexp.MustCompile(`(, )*(?i)(request id: )(?:[-[:xdigit:]]+)`)\n+\tawsNotAuthorized = regexp.MustCompile(`(User: arn:aws:sts::)\\S+(:assumed-role\/[^\/]+\/)\\S+( is not authorized to perform: \\S+ on resource: arn:aws:iam::)[^:]+(:\\S+)`)\n \t\/\/ azure\n \tazureErrorDescriptionRE = regexp.MustCompile(`\\\"error_description\\\":\\\"(.*?)\\\\r\\\\n`)\n )","source_code":"package utils\n\nimport (\n\t\"regexp\"\n)\n\nvar (\n\tnewlineTabRE = regexp.MustCompile(`\\n\\t`)\n\tcertificateTimeErrorRE = regexp.MustCompile(`: current time \\S+ is after \\S+`)\n\t\/\/ aws\n\tawsRequestIDRE = regexp.MustCompile(`(, )*(?i)(request id: )(?:[-[:xdigit:]]+)`)\n\t\/\/ azure\n\tazureErrorDescriptionRE = regexp.MustCompile(`\\\"error_description\\\":\\\"(.*?)\\\\r\\\\n`)\n)\n\n\/\/ ErrorScrub scrubs cloud error messages destined for CRD status to remove things that\n\/\/ change every attempt, such as request IDs, which subsequently cause an infinite update\/reconcile loop.\nfunc ErrorScrub(err error) string {\n\tif err == nil {\n\t\treturn \"\"\n\t}\n\ts := newlineTabRE.ReplaceAllString(err.Error(), \", \")\n\ts = awsRequestIDRE.ReplaceAllString(s, \"\")\n\ts = certificateTimeErrorRE.ReplaceAllString(s, \"\")\n\t\/\/ if Azure error, return just the error description\n\tmatch := azureErrorDescriptionRE.FindStringSubmatch(s)\n\tif len(match) > 0 {\n\t\treturn match[1]\n\t}\n\treturn s\n}\n","lang_cluster":"Go","diff_tag":0,"review_comment":"","id":191} {"diff_hunk":"@@ -24,11 +24,14 @@ import (\n \tintv1alpha1 \"github.com\/google\/knative-gcp\/pkg\/apis\/intevents\/v1alpha1\"\n \tbcreconciler \"github.com\/google\/knative-gcp\/pkg\/client\/injection\/reconciler\/intevents\/v1alpha1\/brokercell\"\n \t\"github.com\/google\/knative-gcp\/pkg\/reconciler\"\n+\t\"github.com\/google\/knative-gcp\/pkg\/reconciler\/brokercell\/testingdata\"\n \t. \"github.com\/google\/knative-gcp\/pkg\/reconciler\/testing\"\n+\tappsv1 \"k8s.io\/api\/apps\/v1\"\n \tcorev1 \"k8s.io\/api\/core\/v1\"\n \t\"k8s.io\/apimachinery\/pkg\/runtime\"\n \t\"k8s.io\/client-go\/kubernetes\/scheme\"\n \tclientgotesting \"k8s.io\/client-go\/testing\"\n+\tfakekubeclient \"knative.dev\/pkg\/client\/injection\/kube\/client\/fake\"\n \t\"knative.dev\/pkg\/configmap\"\n \t\"knative.dev\/pkg\/controller\"\n \tlogtesting \"knative.dev\/pkg\/logging\/testing\"","source_code":"\/*\nCopyright 2020 Google LLC\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage brokercell\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"testing\"\n\n\tintv1alpha1 \"github.com\/google\/knative-gcp\/pkg\/apis\/intevents\/v1alpha1\"\n\tbcreconciler \"github.com\/google\/knative-gcp\/pkg\/client\/injection\/reconciler\/intevents\/v1alpha1\/brokercell\"\n\t\"github.com\/google\/knative-gcp\/pkg\/reconciler\"\n\t. \"github.com\/google\/knative-gcp\/pkg\/reconciler\/testing\"\n\tcorev1 \"k8s.io\/api\/core\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"k8s.io\/client-go\/kubernetes\/scheme\"\n\tclientgotesting \"k8s.io\/client-go\/testing\"\n\t\"knative.dev\/pkg\/configmap\"\n\t\"knative.dev\/pkg\/controller\"\n\tlogtesting \"knative.dev\/pkg\/logging\/testing\"\n\t. \"knative.dev\/pkg\/reconciler\/testing\"\n)\n\nconst (\n\ttestNS = \"testnamespace\"\n\tbrokerCellName = \"test-brokercell\"\n\n\tbrokerCellFinalizerName = \"brokercells.internal.events.cloud.google.com\"\n)\n\nvar (\n\ttestKey = fmt.Sprintf(\"%s\/%s\", testNS, brokerCellName)\n\n\tbrokerCellFinalizerUpdatedEvent = Eventf(corev1.EventTypeNormal, \"FinalizerUpdate\", `Updated \"test-brokercell\" finalizers`)\n\tbrokerCellReconciledEvent = Eventf(corev1.EventTypeNormal, \"BrokerCellReconciled\", `BrokerCell reconciled: \"testnamespace\/test-brokercell\"`)\n\tbrokerCellFinalizedEvent = Eventf(corev1.EventTypeNormal, \"BrokerCellFinalized\", `BrokerCell finalized: \"testnamespace\/test-brokercell\"`)\n)\n\nfunc init() {\n\t\/\/ Add types to scheme\n\t_ = intv1alpha1.AddToScheme(scheme.Scheme)\n}\n\nfunc TestAllCases(t *testing.T) {\n\ttable := TableTest{{\n\t\tName: \"bad workqueue key\",\n\t\tKey: \"too\/many\/parts\",\n\t}, {\n\t\tName: \"key not found\",\n\t\tKey: testKey,\n\t}, {\n\t\tName: \"BrokerCell is being deleted\",\n\t\tKey: testKey,\n\t\tObjects: []runtime.Object{\n\t\t\tNewBrokerCell(brokerCellName, testNS,\n\t\t\t\tWithInitBrokerCellConditions,\n\t\t\t\tWithBrokerCellDeletionTimestamp),\n\t\t},\n\t\tWantEvents: []string{\n\t\t\tbrokerCellFinalizedEvent,\n\t\t},\n\t}, {\n\t\tName: \"BrokerCell created\",\n\t\tKey: testKey,\n\t\tObjects: []runtime.Object{\n\t\t\tNewBrokerCell(brokerCellName, testNS),\n\t\t},\n\t\tWantStatusUpdates: []clientgotesting.UpdateActionImpl{{\n\t\t\tObject: NewBrokerCell(brokerCellName, testNS,\n\t\t\t\tWithInitBrokerCellConditions,\n\t\t\t),\n\t\t}},\n\t\tWantEvents: []string{\n\t\t\tbrokerCellFinalizerUpdatedEvent,\n\t\t\tbrokerCellReconciledEvent,\n\t\t},\n\t\tWantPatches: []clientgotesting.PatchActionImpl{\n\t\t\tpatchFinalizers(testNS, brokerCellName, brokerCellFinalizerName),\n\t\t},\n\t}}\n\n\tdefer logtesting.ClearAll()\n\ttable.Test(t, MakeFactory(func(ctx context.Context, listers *Listers, cmw configmap.Watcher, testData map[string]interface{}) controller.Reconciler {\n\t\tr := &Reconciler{\n\t\t\tBase: reconciler.NewBase(ctx, controllerAgentName, cmw),\n\t\t}\n\t\treturn bcreconciler.NewReconciler(ctx, r.Logger, r.RunClientSet, listers.GetBrokerCellLister(), r.Recorder, r)\n\t}))\n}\n\nfunc patchFinalizers(namespace, name, finalizer string) clientgotesting.PatchActionImpl {\n\taction := clientgotesting.PatchActionImpl{}\n\taction.Name = name\n\taction.Namespace = namespace\n\tpatch := `{\"metadata\":{\"finalizers\":[\"` + finalizer + `\"],\"resourceVersion\":\"\"}}`\n\taction.Patch = []byte(patch)\n\treturn action\n}\n\nfunc patchRemoveFinalizers(namespace, name string) clientgotesting.PatchActionImpl {\n\taction := clientgotesting.PatchActionImpl{}\n\taction.Name = name\n\taction.Namespace = namespace\n\tpatch := `{\"metadata\":{\"finalizers\":[],\"resourceVersion\":\"\"}}`\n\taction.Patch = []byte(patch)\n\treturn action\n}\n","lang_cluster":"Go","diff_tag":0,"review_comment":"","id":192} {"diff_hunk":"@@ -104,7 +104,7 @@ func EncodeSimulatedMessage(msg Message, timestamp, blockNumber *big.Int, execut\n \ttx := ovmTransaction{\n \t\ttimestamp,\n \t\tblockNumber,\n-\t\tuint8(msg.QueueOrigin().Uint64()),\n+\t\tuint8(msg.QueueOrigin()),\n \t\t*msg.L1MessageSender(),\n \t\t*to,\n \t\tbig.NewInt(int64(msg.Gas())),","source_code":"package core\n\nimport (\n\t\"fmt\"\n\t\"math\/big\"\n\n\t\"github.com\/ethereum\/go-ethereum\/common\"\n\t\"github.com\/ethereum\/go-ethereum\/core\/types\"\n\t\"github.com\/ethereum\/go-ethereum\/core\/vm\"\n\t\"github.com\/ethereum\/go-ethereum\/rollup\/dump\"\n)\n\nvar ZeroAddress = common.HexToAddress(\"0x0000000000000000000000000000000000000000\")\n\ntype ovmTransaction struct {\n\tTimestamp *big.Int `json:\"timestamp\"`\n\tBlockNumber *big.Int `json:\"blockNumber\"`\n\tL1QueueOrigin uint8 `json:\"l1QueueOrigin\"`\n\tL1TxOrigin common.Address `json:\"l1TxOrigin\"`\n\tEntrypoint common.Address `json:\"entrypoint\"`\n\tGasLimit *big.Int `json:\"gasLimit\"`\n\tData []uint8 `json:\"data\"`\n}\n\nfunc toExecutionManagerRun(evm *vm.EVM, msg Message) (Message, error) {\n\ttx := ovmTransaction{\n\t\tevm.Context.Time,\n\t\tmsg.L1BlockNumber(),\n\t\tuint8(msg.QueueOrigin().Uint64()),\n\t\t*msg.L1MessageSender(),\n\t\t*msg.To(),\n\t\tbig.NewInt(int64(msg.Gas())),\n\t\tmsg.Data(),\n\t}\n\n\tvar abi = evm.Context.OvmExecutionManager.ABI\n\tvar args = []interface{}{\n\t\ttx,\n\t\tevm.Context.OvmStateManager.Address,\n\t}\n\n\tret, err := abi.Pack(\"run\", args...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\toutputmsg, err := modMessage(\n\t\tmsg,\n\t\tmsg.From(),\n\t\t&evm.Context.OvmExecutionManager.Address,\n\t\tret,\n\t\tevm.Context.GasLimit,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn outputmsg, nil\n}\n\nfunc AsOvmMessage(tx *types.Transaction, signer types.Signer, decompressor common.Address, gasLimit uint64) (Message, error) {\n\tmsg, err := tx.AsMessage(signer)\n\tif err != nil {\n\t\t\/\/ This should only be allowed to pass if the transaction is in the ctc\n\t\t\/\/ already. The presence of `Index` should specify this.\n\t\tindex := tx.GetMeta().Index\n\t\tif index == nil {\n\t\t\treturn msg, fmt.Errorf(\"Cannot convert tx to message in asOvmMessage: %w\", err)\n\t\t}\n\t}\n\n\t\/\/ Queue origin L1ToL2 transactions do not go through the\n\t\/\/ sequencer entrypoint. The calldata is expected to be in the\n\t\/\/ correct format when deserialized from the EVM events, see\n\t\/\/ rollup\/sync_service.go.\n\tqo := msg.QueueOrigin()\n\tif qo != nil && qo.Uint64() == uint64(types.QueueOriginL1ToL2) {\n\t\treturn msg, nil\n\t}\n\n\t\/\/ Sequencer transactions get sent to the \"sequencer entrypoint,\" a contract that decompresses\n\t\/\/ the incoming transaction data.\n\toutmsg, err := modMessage(\n\t\tmsg,\n\t\tmsg.From(),\n\t\t&decompressor,\n\t\ttx.GetMeta().RawTransaction,\n\t\tgasLimit,\n\t)\n\n\tif err != nil {\n\t\treturn msg, fmt.Errorf(\"Cannot mod message: %w\", err)\n\t}\n\n\treturn outmsg, nil\n}\n\nfunc EncodeSimulatedMessage(msg Message, timestamp, blockNumber *big.Int, executionManager, stateManager dump.OvmDumpAccount) (Message, error) {\n\tto := msg.To()\n\tif to == nil {\n\t\tto = &common.Address{0}\n\t}\n\n\ttx := ovmTransaction{\n\t\ttimestamp,\n\t\tblockNumber,\n\t\tuint8(msg.QueueOrigin().Uint64()),\n\t\t*msg.L1MessageSender(),\n\t\t*to,\n\t\tbig.NewInt(int64(msg.Gas())),\n\t\tmsg.Data(),\n\t}\n\n\tfrom := msg.From()\n\tvar args = []interface{}{\n\t\ttx,\n\t\tfrom,\n\t\tstateManager.Address,\n\t}\n\n\toutput, err := executionManager.ABI.Pack(\"simulateMessage\", args...)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Cannot pack simulateMessage: %w\", err)\n\t}\n\n\treturn modMessage(\n\t\tmsg,\n\t\tcommon.Address{},\n\t\t&executionManager.Address,\n\t\toutput,\n\t\tmsg.Gas(),\n\t)\n}\n\nfunc modMessage(\n\tmsg Message,\n\tfrom common.Address,\n\tto *common.Address,\n\tdata []byte,\n\tgasLimit uint64,\n) (Message, error) {\n\tqueueOrigin, err := getQueueOrigin(msg.QueueOrigin())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\toutmsg := types.NewMessage(\n\t\tfrom,\n\t\tto,\n\t\tmsg.Nonce(),\n\t\tcommon.Big0,\n\t\tgasLimit,\n\t\tmsg.GasPrice(),\n\t\tdata,\n\t\tfalse,\n\t\tmsg.L1MessageSender(),\n\t\tmsg.L1BlockNumber(),\n\t\tqueueOrigin,\n\t\tmsg.SignatureHashType(),\n\t)\n\n\treturn outmsg, nil\n}\n\nfunc getQueueOrigin(\n\tqueueOrigin *big.Int,\n) (types.QueueOrigin, error) {\n\tif queueOrigin.Cmp(big.NewInt(0)) == 0 {\n\t\treturn types.QueueOriginSequencer, nil\n\t} else if queueOrigin.Cmp(big.NewInt(1)) == 0 {\n\t\treturn types.QueueOriginL1ToL2, nil\n\t} else if queueOrigin.Cmp(big.NewInt(2)) == 0 {\n\t\treturn types.QueueOriginL1ToL2, nil\n\t} else {\n\t\treturn types.QueueOriginSequencer, fmt.Errorf(\"invalid queue origin: %d\", queueOrigin)\n\t}\n}\n","lang_cluster":"Go","diff_tag":0,"review_comment":"","id":193} {"diff_hunk":"@@ -97,7 +97,7 @@ func (a *GCPActuator) getZones(region string) ([]string, error) {\n \tpageToken := \"\"\n \n \tfor {\n-\t\tzoneList, err := a.client.ListComputeZones(gcpclient.ListComputeZonesOptions{\n+\t\tzoneList, err := a.gcpClient.ListComputeZones(gcpclient.ListComputeZonesOptions{\n \t\t\tFilter: zoneFilter,\n \t\t\tPageToken: pageToken,\n \t\t})","source_code":"package remotemachineset\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/pkg\/errors\"\n\tlog \"github.com\/sirupsen\/logrus\"\n\n\tmachineapi \"github.com\/openshift\/cluster-api\/pkg\/apis\/machine\/v1beta1\"\n\tcorev1 \"k8s.io\/api\/core\/v1\"\n\n\tinstallgcp \"github.com\/openshift\/installer\/pkg\/asset\/machines\/gcp\"\n\tinstallertypes \"github.com\/openshift\/installer\/pkg\/types\"\n\tinstallertypesgcp \"github.com\/openshift\/installer\/pkg\/types\/gcp\"\n\n\thivev1 \"github.com\/openshift\/hive\/pkg\/apis\/hive\/v1\"\n\t\"github.com\/openshift\/hive\/pkg\/gcpclient\"\n)\n\n\/\/ GCPActuator encapsulates the pieces necessary to be able to generate\n\/\/ a list of MachineSets to sync to the remote cluster.\ntype GCPActuator struct {\n\tclient gcpclient.Client\n\tlogger log.FieldLogger\n}\n\nvar _ Actuator = &GCPActuator{}\n\n\/\/ NewGCPActuator is the constructor for building a GCPActuator\nfunc NewGCPActuator(gcpCreds *corev1.Secret, logger log.FieldLogger) (*GCPActuator, error) {\n\tgcpClient, err := gcpclient.NewClientFromSecret(gcpCreds)\n\tif err != nil {\n\t\tlogger.WithError(err).Warn(\"failed to create GCP client with creds in clusterDeployment's secret\")\n\t\treturn nil, err\n\t}\n\tactuator := &GCPActuator{\n\t\tclient: gcpClient,\n\t\tlogger: logger,\n\t}\n\treturn actuator, nil\n}\n\n\/\/ GenerateMachineSets satisfies the Actuator interface and will take a clusterDeployment and return a list of MachineSets\n\/\/ to sync to the remote cluster.\nfunc (a *GCPActuator) GenerateMachineSets(cd *hivev1.ClusterDeployment, pool *hivev1.MachinePool, logger log.FieldLogger) ([]*machineapi.MachineSet, error) {\n\tif cd.Spec.ClusterMetadata == nil {\n\t\treturn nil, errors.New(\"ClusterDeployment does not have cluster metadata\")\n\t}\n\tif cd.Spec.Platform.GCP == nil {\n\t\treturn nil, errors.New(\"ClusterDeployment is not for GCP\")\n\t}\n\tif pool.Spec.Platform.GCP == nil {\n\t\treturn nil, errors.New(\"MachinePool is not for GCP\")\n\t}\n\n\tic := &installertypes.InstallConfig{\n\t\tPlatform: installertypes.Platform{\n\t\t\tGCP: &installertypesgcp.Platform{\n\t\t\t\tRegion: cd.Spec.Platform.GCP.Region,\n\t\t\t},\n\t\t},\n\t}\n\n\tcomputePool := baseMachinePool(pool)\n\tcomputePool.Platform.GCP = &installertypesgcp.MachinePool{\n\t\tZones: pool.Spec.Platform.GCP.Zones,\n\t\tInstanceType: pool.Spec.Platform.GCP.InstanceType,\n\t}\n\n\t\/\/ get image ID for the generated machine sets\n\timageID, err := a.getImageID(cd, logger)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to find image ID for the machine sets\")\n\t}\n\n\tif len(computePool.Platform.GCP.Zones) == 0 {\n\t\tzones, err := a.getZones(cd.Spec.Platform.GCP.Region)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"compute pool not providing list of zones and failed to fetch list of zones\")\n\t\t}\n\t\tif len(zones) == 0 {\n\t\t\treturn nil, fmt.Errorf(\"zero zones returned for region %s\", cd.Spec.Platform.GCP.Region)\n\t\t}\n\t\tcomputePool.Platform.GCP.Zones = zones\n\t}\n\n\tinstallerMachineSets, err := installgcp.MachineSets(cd.Spec.ClusterMetadata.InfraID, ic, computePool, imageID, pool.Spec.Name, \"worker-user-data\")\n\treturn installerMachineSets, errors.Wrap(err, \"failed to generate machinesets\")\n}\n\nfunc (a *GCPActuator) getZones(region string) ([]string, error) {\n\tzones := []string{}\n\n\t\/\/ Filter to regions matching '.*.*' (where the zone is actually UP)\n\tzoneFilter := fmt.Sprintf(\"(region eq '.*%s.*') (status eq UP)\", region)\n\n\tpageToken := \"\"\n\n\tfor {\n\t\tzoneList, err := a.client.ListComputeZones(gcpclient.ListComputeZonesOptions{\n\t\t\tFilter: zoneFilter,\n\t\t\tPageToken: pageToken,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn zones, err\n\t\t}\n\n\t\tfor _, zone := range zoneList.Items {\n\t\t\tzones = append(zones, zone.Name)\n\t\t}\n\n\t\tif zoneList.NextPageToken == \"\" {\n\t\t\tbreak\n\t\t}\n\t\tpageToken = zoneList.NextPageToken\n\t}\n\n\treturn zones, nil\n}\n\nfunc (a *GCPActuator) getImageID(cd *hivev1.ClusterDeployment, logger log.FieldLogger) (string, error) {\n\tinfra := cd.Spec.ClusterMetadata.InfraID\n\n\t\/\/ find names of the form '-.*'\n\tfilter := fmt.Sprintf(\"name eq \\\"%s-.*\\\"\", infra)\n\tresult, err := a.client.ListComputeImages(gcpclient.ListComputeImagesOptions{Filter: filter})\n\tif err != nil {\n\t\tlogger.WithError(err).Warnf(\"failed to find a GCP image starting with name: %s\", infra)\n\t\treturn \"\", err\n\t}\n\tswitch len(result.Items) {\n\tcase 0:\n\t\tmsg := fmt.Sprintf(\"found 0 results searching for GCP image starting with name: %s\", infra)\n\t\tlogger.Warnf(msg)\n\t\treturn \"\", errors.New(msg)\n\tcase 1:\n\t\tlogger.Debugf(\"using image with name %s for machine sets\", result.Items[0].Name)\n\t\treturn result.Items[0].Name, nil\n\tdefault:\n\t\tmsg := fmt.Sprintf(\"unexpected number of results when looking for GCP image with name starting with %s\", infra)\n\t\tlogger.Warnf(msg)\n\t\treturn \"\", errors.New(msg)\n\t}\n}\n","lang_cluster":"Go","diff_tag":0,"review_comment":"","id":194} {"diff_hunk":"@@ -94,5 +94,5 @@ func (f *Factory) FromRole(roleARN string, region string) (*session.Session, err\n \t\treturn nil, err\n \t}\n \tsess.Handlers.Build.PushBackNamed(userAgentHandler())\n-\treturn sess, err\n+\treturn sess, nil\n }","source_code":"\/\/ Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\/\/ SPDX-License-Identifier: Apache-2.0\n\n\/\/ Package session provides functions that return AWS sessions to use in the AWS SDK.\npackage session\n\nimport (\n\t\"fmt\"\n\t\"runtime\"\n\n\t\"github.com\/aws\/amazon-ecs-cli-v2\/internal\/pkg\/version\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/credentials\/stscreds\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/request\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/session\"\n)\n\nconst userAgentHeader = \"User-Agent\"\n\n\/\/ userAgentHandler returns a http request handler that sets a custom user agent to all aws requests.\nfunc userAgentHandler() request.NamedHandler {\n\treturn request.NamedHandler{\n\t\tName: \"UserAgentHandler\",\n\t\tFn: func(r *request.Request) {\n\t\t\tuserAgent := r.HTTPRequest.Header.Get(userAgentHeader)\n\t\t\tr.HTTPRequest.Header.Set(userAgentHeader,\n\t\t\t\tfmt.Sprintf(\"aws-ecs-cli-v2\/%s (%s) %s\", version.Version, runtime.GOOS, userAgent))\n\t\t},\n\t}\n}\n\n\/\/ Factory holds methods to create sessions.\ntype Factory struct{}\n\n\/\/ Default returns a session configured against the \"default\" AWS profile.\nfunc (f *Factory) Default() (*session.Session, error) {\n\tsess, err := session.NewSessionWithOptions(session.Options{\n\t\tConfig: aws.Config{\n\t\t\tCredentialsChainVerboseErrors: aws.Bool(true),\n\t\t},\n\t\tSharedConfigState: session.SharedConfigEnable,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsess.Handlers.Build.PushBackNamed(userAgentHandler())\n\treturn sess, err\n}\n\n\/\/ DefaultWithRegion returns a session configured against the \"default\" AWS profile and the input region.\nfunc (f *Factory) DefaultWithRegion(region string) (*session.Session, error) {\n\tsess, err := session.NewSession(&aws.Config{\n\t\tRegion: aws.String(region),\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsess.Handlers.Build.PushBackNamed(userAgentHandler())\n\treturn sess, err\n}\n\n\/\/ FromProfile returns a session configured against the input profile name.\nfunc (f *Factory) FromProfile(name string) (*session.Session, error) {\n\tsess, err := session.NewSessionWithOptions(session.Options{\n\t\tConfig: aws.Config{\n\t\t\tCredentialsChainVerboseErrors: aws.Bool(true),\n\t\t},\n\t\tSharedConfigState: session.SharedConfigEnable,\n\t\tProfile: name,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsess.Handlers.Build.PushBackNamed(userAgentHandler())\n\treturn sess, err\n}\n\n\/\/ FromRole returns a session configured against the input role and region.\nfunc (f *Factory) FromRole(roleARN string, region string) (*session.Session, error) {\n\tdefaultSession, err := f.Default()\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error creating default session: %w\", err)\n\t}\n\n\tcreds := stscreds.NewCredentials(defaultSession, roleARN)\n\tsess, err := session.NewSession(&aws.Config{\n\t\tCredentialsChainVerboseErrors: aws.Bool(true),\n\t\tCredentials: creds,\n\t\tRegion: ®ion,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsess.Handlers.Build.PushBackNamed(userAgentHandler())\n\treturn sess, err\n}\n","lang_cluster":"Go","diff_tag":0,"review_comment":"","id":195} {"diff_hunk":"@@ -45,6 +45,7 @@ import (\n \ttriggerinformer \"github.com\/google\/knative-gcp\/pkg\/client\/injection\/informers\/broker\/v1beta1\/trigger\"\n \ttriggerreconciler \"github.com\/google\/knative-gcp\/pkg\/client\/injection\/reconciler\/broker\/v1beta1\/trigger\"\n \t\"github.com\/google\/knative-gcp\/pkg\/reconciler\"\n+\treconcilerutils \"github.com\/google\/knative-gcp\/pkg\/reconciler\/utils\"\n \t\"github.com\/google\/knative-gcp\/pkg\/utils\"\n )\n ","source_code":"\/*\nCopyright 2020 Google LLC\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage trigger\n\nimport (\n\t\"context\"\n\n\t\"github.com\/google\/knative-gcp\/pkg\/reconciler\/celltenant\"\n\n\t\"cloud.google.com\/go\/pubsub\"\n\t\"go.uber.org\/zap\"\n\t\"k8s.io\/apimachinery\/pkg\/labels\"\n\t\"k8s.io\/client-go\/tools\/cache\"\n\n\t\"github.com\/google\/knative-gcp\/pkg\/logging\"\n\t\"knative.dev\/eventing\/pkg\/apis\/eventing\"\n\teventingv1beta1 \"knative.dev\/eventing\/pkg\/apis\/eventing\/v1beta1\"\n\t\"knative.dev\/eventing\/pkg\/duck\"\n\t\"knative.dev\/pkg\/client\/injection\/ducks\/duck\/v1\/addressable\"\n\t\"knative.dev\/pkg\/client\/injection\/ducks\/duck\/v1\/source\"\n\t\"knative.dev\/pkg\/configmap\"\n\t\"knative.dev\/pkg\/controller\"\n\tpkgcontroller \"knative.dev\/pkg\/controller\"\n\t\"knative.dev\/pkg\/injection\"\n\tpkgreconciler \"knative.dev\/pkg\/reconciler\"\n\t\"knative.dev\/pkg\/resolver\"\n\n\tbrokerv1beta1 \"github.com\/google\/knative-gcp\/pkg\/apis\/broker\/v1beta1\"\n\t\"github.com\/google\/knative-gcp\/pkg\/apis\/configs\/dataresidency\"\n\tbrokerinformer \"github.com\/google\/knative-gcp\/pkg\/client\/injection\/informers\/broker\/v1beta1\/broker\"\n\ttriggerinformer \"github.com\/google\/knative-gcp\/pkg\/client\/injection\/informers\/broker\/v1beta1\/trigger\"\n\ttriggerreconciler \"github.com\/google\/knative-gcp\/pkg\/client\/injection\/reconciler\/broker\/v1beta1\/trigger\"\n\t\"github.com\/google\/knative-gcp\/pkg\/reconciler\"\n\t\"github.com\/google\/knative-gcp\/pkg\/utils\"\n)\n\nconst (\n\t\/\/ controllerAgentName is the string used by this controller to identify\n\t\/\/ itself when creating events.\n\tcontrollerAgentName = \"trigger-controller\"\n\t\/\/ finalizerName is the name of the finalizer that this controller adds to the Triggers that it reconciles.\n\tfinalizerName = \"googlecloud\"\n)\n\n\/\/ filterBroker is the function to filter brokers with proper brokerclass.\nvar filterBroker = pkgreconciler.AnnotationFilterFunc(eventingv1beta1.BrokerClassAnnotationKey, brokerv1beta1.BrokerClass, false \/*allowUnset*\/)\n\ntype Constructor injection.ControllerConstructor\n\n\/\/ NewConstructor creates a constructor to make a Trigger controller.\nfunc NewConstructor(dataresidencyss *dataresidency.StoreSingleton) Constructor {\n\treturn func(ctx context.Context, cmw configmap.Watcher) *controller.Impl {\n\t\treturn newController(ctx, cmw, dataresidencyss.Store(ctx, cmw))\n\t}\n}\n\nfunc newController(ctx context.Context, cmw configmap.Watcher, drs *dataresidency.Store) *controller.Impl {\n\ttriggerInformer := triggerinformer.Get(ctx)\n\n\tvar client *pubsub.Client\n\t\/\/ If there is an error, the projectID will be empty. The reconciler will retry\n\t\/\/ to get the projectID during reconciliation.\n\tprojectID, err := utils.ProjectIDOrDefault(\"\")\n\tif err != nil {\n\t\tlogging.FromContext(ctx).Error(\"Failed to get project ID\", zap.Error(err))\n\t} else {\n\t\t\/\/ Attempt to create a pubsub client for all worker threads to use. If this\n\t\t\/\/ fails, pass a nil value to the Reconciler. They will attempt to\n\t\t\/\/ create a client on reconcile.\n\t\tif client, err = pubsub.NewClient(ctx, projectID); err != nil {\n\t\t\tclient = nil\n\t\t\tlogging.FromContext(ctx).Error(\"Failed to create controller-wide Pub\/Sub client\", zap.Error(err))\n\t\t}\n\t}\n\n\tif client != nil {\n\t\tgo func() {\n\t\t\t<-ctx.Done()\n\t\t\tclient.Close()\n\t\t}()\n\t}\n\tr := &Reconciler{\n\t\tBase: reconciler.NewBase(ctx, controllerAgentName, cmw),\n\t\tbrokerLister: brokerinformer.Get(ctx).Lister(),\n\t\ttargetReconciler: &celltenant.TargetReconciler{\n\t\t\tProjectID: projectID,\n\t\t\tPubsubClient: client,\n\t\t\tDataresidencyStore: drs,\n\t\t},\n\t}\n\n\timpl := triggerreconciler.NewImpl(ctx, r, withAgentAndFinalizer)\n\tr.sourceTracker = duck.NewListableTracker(ctx, source.Get, impl.EnqueueKey, controller.GetTrackerLease(ctx))\n\tr.addressableTracker = duck.NewListableTracker(ctx, addressable.Get, impl.EnqueueKey, controller.GetTrackerLease(ctx))\n\tr.uriResolver = resolver.NewURIResolver(ctx, impl.EnqueueKey)\n\n\tr.Logger.Info(\"Setting up event handlers\")\n\n\ttriggerInformer.Informer().AddEventHandlerWithResyncPeriod(controller.HandleAll(impl.Enqueue), reconciler.DefaultResyncPeriod)\n\n\t\/\/ Watch brokers.\n\tbrokerinformer.Get(ctx).Informer().AddEventHandler(\n\t\tcache.FilteringResourceEventHandler{\n\t\t\t\/\/ Only care about brokers with the proper class annotation\n\t\t\tFilterFunc: filterBroker,\n\t\t\tHandler: controller.HandleAll(func(obj interface{}) {\n\t\t\t\tif b, ok := obj.(*brokerv1beta1.Broker); ok {\n\t\t\t\t\ttriggers, err := triggerinformer.Get(ctx).Lister().Triggers(b.Namespace).List(labels.SelectorFromSet(map[string]string{eventing.BrokerLabelKey: b.Name}))\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tr.Logger.Warn(\"Failed to list triggers\", zap.String(\"Namespace\", b.Namespace), zap.String(\"Broker\", b.Name))\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tfor _, trigger := range triggers {\n\t\t\t\t\t\timpl.Enqueue(trigger)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}),\n\t\t},\n\t)\n\n\treturn impl\n}\n\nfunc withAgentAndFinalizer(_ *pkgcontroller.Impl) pkgcontroller.Options {\n\treturn pkgcontroller.Options{\n\t\tFinalizerName: finalizerName,\n\t\tAgentName: controllerAgentName,\n\t}\n}\n","lang_cluster":"Go","diff_tag":0,"review_comment":"","id":196} {"diff_hunk":"@@ -6,7 +6,8 @@ import (\n \t\"fmt\"\n \t\"html\"\n \t\"net\/http\"\n-\t\"strings\"\n+\t\"net\/url\"\n+\t\"regexp\"\n \n \t\"github.com\/mholt\/caddy\/middleware\"\n )","source_code":"\/\/ Package redirect is middleware for redirecting certain requests\n\/\/ to other locations.\npackage redirect\n\nimport (\n\t\"fmt\"\n\t\"html\"\n\t\"net\/http\"\n\t\"strings\"\n\n\t\"github.com\/mholt\/caddy\/middleware\"\n)\n\n\/\/ Redirect is middleware to respond with HTTP redirects\ntype Redirect struct {\n\tNext middleware.Handler\n\tRules []Rule\n}\n\n\/\/ ServeHTTP implements the middleware.Handler interface.\nfunc (rd Redirect) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) {\n\tfor _, rule := range rd.Rules {\n\t\tif rule.From == \"\/\" {\n\t\t\t\/\/ Catchall redirect preserves path (TODO: Standardize\/formalize this behavior)\n\t\t\tnewPath := strings.TrimSuffix(rule.To, \"\/\") + r.URL.Path\n\t\t\tif rule.Meta {\n\t\t\t\tfmt.Fprintf(w, metaRedir, html.EscapeString(newPath))\n\t\t\t} else {\n\t\t\t\thttp.Redirect(w, r, newPath, rule.Code)\n\t\t\t}\n\t\t\treturn 0, nil\n\t\t}\n\t\tif r.URL.Path == rule.From {\n\t\t\tif rule.Meta {\n\t\t\t\tfmt.Fprintf(w, metaRedir, html.EscapeString(rule.To))\n\t\t\t} else {\n\t\t\t\thttp.Redirect(w, r, rule.To, rule.Code)\n\t\t\t}\n\t\t\treturn 0, nil\n\t\t}\n\t}\n\treturn rd.Next.ServeHTTP(w, r)\n}\n\n\/\/ Rule describes an HTTP redirect rule.\ntype Rule struct {\n\tFrom, To string\n\tCode int\n\tMeta bool\n}\n\nvar metaRedir = `\n\n \n<\/head>\nredirecting...<\/body>\n<\/html>`\n","lang_cluster":"Go","diff_tag":0,"review_comment":"","id":197} {"diff_hunk":"@@ -173,8 +173,3 @@ func (o *ocDistAggregator) Count() (uint64, error) {\n func (o *ocDistAggregator) Histogram() (aggregation.Buckets, error) {\n \treturn o.buckets, nil\n }\n-\n-\/\/ end returns the time the histogram was measured.\n-func (o *ocDistAggregator) end() time.Time {\n-\treturn o.endTime\n-}","source_code":"\/\/ Copyright The OpenTelemetry Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage opencensus \/\/ import \"go.opentelemetry.io\/otel\/bridge\/opencensus\"\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"go.opencensus.io\/metric\/metricdata\"\n\n\t\"go.opentelemetry.io\/otel\/metric\/number\"\n\t\"go.opentelemetry.io\/otel\/sdk\/export\/metric\/aggregation\"\n)\n\nvar (\n\terrIncompatibleType = errors.New(\"incompatible type for aggregation\")\n\terrEmpty = errors.New(\"points may not be empty\")\n\terrBadPoint = errors.New(\"point cannot be converted\")\n)\n\n\/\/ aggregationWithEndTime is an aggregation that can also provide the timestamp\n\/\/ of the last recorded point.\ntype aggregationWithEndTime interface {\n\taggregation.Aggregation\n\tend() time.Time\n}\n\n\/\/ newAggregationFromPoints creates an OpenTelemetry aggregation from\n\/\/ OpenCensus points. Points may not be empty and must be either\n\/\/ all (int|float)64 or all *metricdata.Distribution.\nfunc newAggregationFromPoints(points []metricdata.Point) (aggregationWithEndTime, error) {\n\tif len(points) == 0 {\n\t\treturn nil, errEmpty\n\t}\n\tswitch t := points[0].Value.(type) {\n\tcase int64:\n\t\treturn newExactAggregator(points)\n\tcase float64:\n\t\treturn newExactAggregator(points)\n\tcase *metricdata.Distribution:\n\t\treturn newDistributionAggregator(points)\n\tdefault:\n\t\t\/\/ TODO add *metricdata.Summary support\n\t\treturn nil, fmt.Errorf(\"%w: %v\", errIncompatibleType, t)\n\t}\n}\n\nvar _ aggregation.Aggregation = &ocExactAggregator{}\nvar _ aggregation.LastValue = &ocExactAggregator{}\nvar _ aggregation.Points = &ocExactAggregator{}\n\n\/\/ newExactAggregator creates an OpenTelemetry aggreation from OpenCensus points.\n\/\/ Points may not be empty, and must only contain integers or floats.\nfunc newExactAggregator(pts []metricdata.Point) (aggregationWithEndTime, error) {\n\tpoints := make([]aggregation.Point, len(pts))\n\tfor i, pt := range pts {\n\t\tswitch t := pt.Value.(type) {\n\t\tcase int64:\n\t\t\tpoints[i] = aggregation.Point{\n\t\t\t\tNumber: number.NewInt64Number(pt.Value.(int64)),\n\t\t\t\tTime: pt.Time,\n\t\t\t}\n\t\tcase float64:\n\t\t\tpoints[i] = aggregation.Point{\n\t\t\t\tNumber: number.NewFloat64Number(pt.Value.(float64)),\n\t\t\t\tTime: pt.Time,\n\t\t\t}\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"%w: %v\", errIncompatibleType, t)\n\t\t}\n\t}\n\treturn &ocExactAggregator{\n\t\tpoints: points,\n\t}, nil\n}\n\ntype ocExactAggregator struct {\n\tpoints []aggregation.Point\n}\n\n\/\/ Kind returns the kind of aggregation this is.\nfunc (o *ocExactAggregator) Kind() aggregation.Kind {\n\treturn aggregation.ExactKind\n}\n\n\/\/ Points returns access to the raw data set.\nfunc (o *ocExactAggregator) Points() ([]aggregation.Point, error) {\n\treturn o.points, nil\n}\n\n\/\/ LastValue returns the last point.\nfunc (o *ocExactAggregator) LastValue() (number.Number, time.Time, error) {\n\tlast := o.points[len(o.points)-1]\n\treturn last.Number, last.Time, nil\n}\n\n\/\/ end returns the timestamp of the last point\nfunc (o *ocExactAggregator) end() time.Time {\n\t_, t, _ := o.LastValue()\n\treturn t\n}\n\nvar _ aggregation.Aggregation = &ocDistAggregator{}\nvar _ aggregation.Histogram = &ocDistAggregator{}\n\n\/\/ newDistributionAggregator creates an OpenTelemetry aggreation from\n\/\/ OpenCensus points. Points may not be empty, and must only contain\n\/\/ Distributions. The most recent disribution will be used in the aggregation.\nfunc newDistributionAggregator(pts []metricdata.Point) (aggregationWithEndTime, error) {\n\t\/\/ only use the most recent datapoint for now.\n\tpt := pts[len(pts)-1]\n\tval, ok := pt.Value.(*metricdata.Distribution)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"%w: %v\", errBadPoint, pt.Value)\n\t}\n\tbucketCounts := make([]uint64, len(val.Buckets))\n\tfor i, bucket := range val.Buckets {\n\t\tif bucket.Count < 0 {\n\t\t\treturn nil, fmt.Errorf(\"%w: bucket count may not be negative\", errBadPoint)\n\t\t}\n\t\tbucketCounts[i] = uint64(bucket.Count)\n\t}\n\tif val.Count < 0 {\n\t\treturn nil, fmt.Errorf(\"%w: count may not be negative\", errBadPoint)\n\t}\n\treturn &ocDistAggregator{\n\t\tsum: number.NewFloat64Number(val.Sum),\n\t\tcount: uint64(val.Count),\n\t\tbuckets: aggregation.Buckets{\n\t\t\tBoundaries: val.BucketOptions.Bounds,\n\t\t\tCounts: bucketCounts,\n\t\t},\n\t\tendTime: pts[len(pts)-1].Time,\n\t}, nil\n}\n\ntype ocDistAggregator struct {\n\tsum number.Number\n\tcount uint64\n\tbuckets aggregation.Buckets\n\tendTime time.Time\n}\n\n\/\/ Kind returns the kind of aggregation this is.\nfunc (o *ocDistAggregator) Kind() aggregation.Kind {\n\treturn aggregation.HistogramKind\n}\n\n\/\/ Sum returns the sum of values.\nfunc (o *ocDistAggregator) Sum() (number.Number, error) {\n\treturn o.sum, nil\n}\n\n\/\/ Count returns the number of values.\nfunc (o *ocDistAggregator) Count() (uint64, error) {\n\treturn o.count, nil\n}\n\n\/\/ Histogram returns the count of events in pre-determined buckets.\nfunc (o *ocDistAggregator) Histogram() (aggregation.Buckets, error) {\n\treturn o.buckets, nil\n}\n\n\/\/ end returns the time the histogram was measured.\nfunc (o *ocDistAggregator) end() time.Time {\n\treturn o.endTime\n}\n","lang_cluster":"Go","diff_tag":0,"review_comment":"","id":198} {"diff_hunk":"@@ -8,10 +8,12 @@ package staking\n \n import (\n \t\"context\"\n+\t\"math\/big\"\n \n \t\"github.com\/gogo\/protobuf\/proto\"\n \t\"github.com\/pkg\/errors\"\n \n+\t\"github.com\/iotexproject\/iotex-address\/address\"\n \t\"github.com\/iotexproject\/iotex-proto\/golang\/iotextypes\"\n \n \t\"github.com\/iotexproject\/iotex-core\/db\"","source_code":"\/\/ Copyright (c) 2020 IoTeX Foundation\n\/\/ This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no\n\/\/ warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent\n\/\/ permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache\n\/\/ License 2.0 that can be found in the LICENSE file.\n\npackage staking\n\nimport (\n\t\"context\"\n\n\t\"github.com\/gogo\/protobuf\/proto\"\n\t\"github.com\/pkg\/errors\"\n\n\t\"github.com\/iotexproject\/iotex-proto\/golang\/iotextypes\"\n\n\t\"github.com\/iotexproject\/iotex-core\/db\"\n\t\"github.com\/iotexproject\/iotex-core\/pkg\/util\/byteutil\"\n)\n\nconst (\n\t\/\/ StakingCandidatesNamespace is a namespace to store candidates with epoch start height\n\tStakingCandidatesNamespace = \"stakingCandidates\"\n\t\/\/ StakingBucketsNamespace is a namespace to store vote buckets with epoch start height\n\tStakingBucketsNamespace = \"stakingBuckets\"\n)\n\nconst indexerHeightKey = \"latestHeight\"\n\n\/\/ CandidatesBucketsIndexer is an indexer to store candidates by given height\ntype CandidatesBucketsIndexer struct {\n\tlatestCandidatesHeight uint64\n\tlatestBucketsHeight uint64\n\tkvStore db.KVStore\n}\n\n\/\/ NewStakingCandidatesBucketsIndexer creates a new StakingCandidatesIndexer\nfunc NewStakingCandidatesBucketsIndexer(kv db.KVStore) (*CandidatesBucketsIndexer, error) {\n\tif kv == nil {\n\t\treturn nil, ErrMissingField\n\t}\n\treturn &CandidatesBucketsIndexer{\n\t\tkvStore: kv,\n\t}, nil\n}\n\n\/\/ Start starts the indexer\nfunc (cbi *CandidatesBucketsIndexer) Start(ctx context.Context) error {\n\tif err := cbi.kvStore.Start(ctx); err != nil {\n\t\treturn err\n\t}\n\tret, err := cbi.kvStore.Get(StakingCandidatesNamespace, []byte(indexerHeightKey))\n\tswitch errors.Cause(err) {\n\tcase nil:\n\t\tcbi.latestCandidatesHeight = byteutil.BytesToUint64BigEndian(ret)\n\tcase db.ErrNotExist:\n\t\tcbi.latestCandidatesHeight = 0\n\tdefault:\n\t\treturn err\n\t}\n\n\tret, err = cbi.kvStore.Get(StakingBucketsNamespace, []byte(indexerHeightKey))\n\tswitch errors.Cause(err) {\n\tcase nil:\n\t\tcbi.latestBucketsHeight = byteutil.BytesToUint64BigEndian(ret)\n\tcase db.ErrNotExist:\n\t\tcbi.latestBucketsHeight = 0\n\tdefault:\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ Stop stops the indexer\nfunc (cbi *CandidatesBucketsIndexer) Stop(ctx context.Context) error {\n\treturn cbi.kvStore.Stop(ctx)\n}\n\n\/\/ PutCandidates puts candidates into indexer\nfunc (cbi *CandidatesBucketsIndexer) PutCandidates(height uint64, candidates *iotextypes.CandidateListV2) error {\n\tcandidatesBytes, err := proto.Marshal(candidates)\n\tif err != nil {\n\t\treturn err\n\t}\n\theightBytes := byteutil.Uint64ToBytesBigEndian(height)\n\tif err := cbi.kvStore.Put(StakingCandidatesNamespace, heightBytes, candidatesBytes); err != nil {\n\t\treturn err\n\t}\n\tif err := cbi.kvStore.Put(StakingCandidatesNamespace, []byte(indexerHeightKey), heightBytes); err != nil {\n\t\treturn err\n\t}\n\tcbi.latestCandidatesHeight = height\n\treturn nil\n}\n\n\/\/ GetCandidates gets candidates from indexer given epoch start height\nfunc (cbi *CandidatesBucketsIndexer) GetCandidates(height uint64, offset, limit uint32) ([]byte, uint64, error) {\n\tif height > cbi.latestCandidatesHeight {\n\t\theight = cbi.latestCandidatesHeight\n\t}\n\tcandidateList := &iotextypes.CandidateListV2{}\n\tret, err := cbi.kvStore.Get(StakingCandidatesNamespace, byteutil.Uint64ToBytesBigEndian(height))\n\tif errors.Cause(err) == db.ErrNotExist {\n\t\td, err := proto.Marshal(candidateList)\n\t\treturn d, height, err\n\t}\n\tif err != nil {\n\t\treturn nil, height, err\n\t}\n\tif err := proto.Unmarshal(ret, candidateList); err != nil {\n\t\treturn nil, height, err\n\t}\n\tlength := uint32(len(candidateList.Candidates))\n\tif offset >= length {\n\t\td, err := proto.Marshal(&iotextypes.CandidateListV2{})\n\t\treturn d, height, err\n\t}\n\tend := offset + limit\n\tif end > uint32(len(candidateList.Candidates)) {\n\t\tend = uint32(len(candidateList.Candidates))\n\t}\n\tcandidateList.Candidates = candidateList.Candidates[offset:end]\n\td, err := proto.Marshal(candidateList)\n\treturn d, height, err\n}\n\n\/\/ PutBuckets puts vote buckets into indexer\nfunc (cbi *CandidatesBucketsIndexer) PutBuckets(height uint64, buckets *iotextypes.VoteBucketList) error {\n\tbucketsBytes, err := proto.Marshal(buckets)\n\tif err != nil {\n\t\treturn err\n\t}\n\theightBytes := byteutil.Uint64ToBytesBigEndian(height)\n\tif err := cbi.kvStore.Put(StakingBucketsNamespace, heightBytes, bucketsBytes); err != nil {\n\t\treturn err\n\t}\n\tif err := cbi.kvStore.Put(StakingBucketsNamespace, []byte(indexerHeightKey), heightBytes); err != nil {\n\t\treturn err\n\t}\n\tcbi.latestBucketsHeight = height\n\treturn nil\n}\n\n\/\/ GetBuckets gets vote buckets from indexer given epoch start height\nfunc (cbi *CandidatesBucketsIndexer) GetBuckets(height uint64, offset, limit uint32) ([]byte, uint64, error) {\n\tif height > cbi.latestBucketsHeight {\n\t\theight = cbi.latestBucketsHeight\n\t}\n\tbuckets := &iotextypes.VoteBucketList{}\n\tret, err := cbi.kvStore.Get(StakingBucketsNamespace, byteutil.Uint64ToBytesBigEndian(height))\n\tif errors.Cause(err) == db.ErrNotExist {\n\t\td, err := proto.Marshal(buckets)\n\t\treturn d, height, err\n\t}\n\tif err != nil {\n\t\treturn nil, height, err\n\t}\n\tif err := proto.Unmarshal(ret, buckets); err != nil {\n\t\treturn nil, height, err\n\t}\n\tlength := uint32(len(buckets.Buckets))\n\tif offset >= length {\n\t\td, err := proto.Marshal(&iotextypes.VoteBucketList{})\n\t\treturn d, height, err\n\t}\n\tend := offset + limit\n\tif end > uint32(len(buckets.Buckets)) {\n\t\tend = uint32(len(buckets.Buckets))\n\t}\n\tbuckets.Buckets = buckets.Buckets[offset:end]\n\td, err := proto.Marshal(buckets)\n\treturn d, height, err\n}\n","lang_cluster":"Go","diff_tag":0,"review_comment":"","id":199} {"diff_hunk":"@@ -1,8 +1,9 @@\n package structs\n \n import (\n-\t\"fmt\"\n+\t\"strings\"\n \n+\t\"github.com\/pborman\/uuid\"\n \t\"github.com\/pkg\/errors\"\n \t\"github.com\/sonm-io\/core\/proto\"\n )","source_code":"package structs\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/sonm-io\/core\/proto\"\n)\n\ntype Network interface {\n\t\/\/ ID returns a unique identifier that will be used as a new network name.\n\tID() string\n\t\/\/ NetworkType returns a network driver name used to establish networking.\n\tNetworkType() string\n\t\/\/ NetworkOptions return configuration map, passed directly to network driver, this map should not be mutated.\n\tNetworkOptions() map[string]string\n\t\/\/ Returns network subnet in CIDR notation if applicable.\n\tNetworkCIDR() string\n\t\/\/ Returns specified addr to join the network.\n\tNetworkAddr() string\n}\n\ntype NetworkSpec struct {\n\t*sonm.NetworkSpec\n\tId string\n}\n\nfunc (n *NetworkSpec) ID() string {\n\treturn n.Id\n}\n\nfunc (n *NetworkSpec) NetworkType() string {\n\treturn n.GetType()\n}\n\nfunc (n *NetworkSpec) NetworkOptions() map[string]string {\n\treturn n.GetOptions()\n}\n\nfunc (n *NetworkSpec) NetworkCIDR() string {\n\treturn n.GetSubnet()\n}\n\nfunc (n *NetworkSpec) NetworkAddr() string {\n\treturn n.GetAddr()\n}\n\nfunc validateNetworkSpec(id string, spec *sonm.NetworkSpec) error {\n\tif spec.Type == \"tinc\" {\n\t\tif len(spec.Addr) == 0 || len(spec.Subnet) == 0 {\n\t\t\treturn errors.New(\"address and subnet are required for tinc driver\")\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc NewNetworkSpec(id string, spec *sonm.NetworkSpec) (*NetworkSpec, error) {\n\terr := validateNetworkSpec(id, spec)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &NetworkSpec{spec, id}, nil\n}\n\nfunc NewNetworkSpecs(idPrefix string, specs []*sonm.NetworkSpec) ([]Network, error) {\n\tresult := make([]Network, 0, len(specs))\n\tfor i, s := range specs {\n\t\tspec, err := NewNetworkSpec(idPrefix+\"__\"+fmt.Sprint(i), s)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresult = append(result, spec)\n\t}\n\treturn result, nil\n}\n","lang_cluster":"Go","diff_tag":0,"review_comment":"","id":200} {"diff_hunk":"@@ -29,6 +29,8 @@ import org.springframework.web.reactive.result.method.HandlerMethodArgumentResol\n import org.springframework.web.server.ServerWebExchange;\n import reactor.core.publisher.Mono;\n \n+import java.util.Optional;\n+\n \/**\n * An implementation of a {@link HandlerMethodArgumentResolver} that is capable\n * of resolving a method parameter to an argument value of type {@link OAuth2AuthorizedClient}.","source_code":"\/*\n * Copyright 2002-2018 the original author or authors.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage org.springframework.security.oauth2.client.web.reactive.result.method.annotation;\n\nimport org.springframework.core.MethodParameter;\nimport org.springframework.core.annotation.AnnotatedElementUtils;\nimport org.springframework.security.oauth2.client.OAuth2AuthorizedClient;\nimport org.springframework.security.oauth2.client.annotation.RegisteredOAuth2AuthorizedClient;\nimport org.springframework.security.oauth2.client.registration.ReactiveClientRegistrationRepository;\nimport org.springframework.security.oauth2.client.web.server.ServerOAuth2AuthorizedClientRepository;\nimport org.springframework.util.Assert;\nimport org.springframework.util.StringUtils;\nimport org.springframework.web.reactive.BindingContext;\nimport org.springframework.web.reactive.result.method.HandlerMethodArgumentResolver;\nimport org.springframework.web.server.ServerWebExchange;\nimport reactor.core.publisher.Mono;\n\n\/**\n * An implementation of a {@link HandlerMethodArgumentResolver} that is capable\n * of resolving a method parameter to an argument value of type {@link OAuth2AuthorizedClient}.\n *\n *

\n * For example:\n *

\n * @Controller\n * public class MyController {\n *     @GetMapping(\"\/authorized-client\")\n *     public Mono<String> authorizedClient(@RegisteredOAuth2AuthorizedClient(\"login-client\") OAuth2AuthorizedClient authorizedClient) {\n *         \/\/ do something with authorizedClient\n *     }\n * }\n * <\/pre>\n *\n * @author Rob Winch\n * @since 5.1\n * @see RegisteredOAuth2AuthorizedClient\n *\/\npublic final class OAuth2AuthorizedClientArgumentResolver implements HandlerMethodArgumentResolver {\n\n\tprivate final OAuth2AuthorizedClientResolver authorizedClientResolver;\n\n\t\/**\n\t * Constructs an {@code OAuth2AuthorizedClientArgumentResolver} using the provided parameters.\n\t *\n\t * @param authorizedClientRepository the authorized client repository\n\t *\/\n\tpublic OAuth2AuthorizedClientArgumentResolver(ReactiveClientRegistrationRepository clientRegistrationRepository, ServerOAuth2AuthorizedClientRepository authorizedClientRepository) {\n\t\tAssert.notNull(authorizedClientRepository, \"authorizedClientRepository cannot be null\");\n\t\tthis.authorizedClientResolver = new OAuth2AuthorizedClientResolver(clientRegistrationRepository, authorizedClientRepository);\n\t\tthis.authorizedClientResolver.setDefaultOAuth2AuthorizedClient(true);\n\t}\n\n\t@Override\n\tpublic boolean supportsParameter(MethodParameter parameter) {\n\t\treturn AnnotatedElementUtils.findMergedAnnotation(parameter.getParameter(), RegisteredOAuth2AuthorizedClient.class) != null;\n\t}\n\n\t@Override\n\tpublic Mono resolveArgument(\n\t\t\tMethodParameter parameter, BindingContext bindingContext, ServerWebExchange exchange) {\n\t\treturn Mono.defer(() -> {\n\t\t\tRegisteredOAuth2AuthorizedClient authorizedClientAnnotation = AnnotatedElementUtils\n\t\t\t\t\t.findMergedAnnotation(parameter.getParameter(), RegisteredOAuth2AuthorizedClient.class);\n\n\t\t\tString clientRegistrationId = StringUtils.hasLength(authorizedClientAnnotation.registrationId()) ?\n\t\t\t\t\tauthorizedClientAnnotation.registrationId() : null;\n\n\t\t\treturn this.authorizedClientResolver.createDefaultedRequest(clientRegistrationId, null, exchange)\n\t\t\t\t\t.flatMap(this.authorizedClientResolver::loadAuthorizedClient);\n\t\t});\n\t}\n}\n","lang_cluster":"Java","diff_tag":0,"review_comment":"","id":201}
{"diff_hunk":"@@ -75,7 +75,7 @@ abstract class BaseDeltaTaskWriter extends BaseTaskWriter {\n \n       case DELETE:\n       case UPDATE_BEFORE:\n-        writer.delete(row);\n+        writer.deleteKey(projectDeleteData(row));\n         break;\n \n       default:","source_code":"\/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *   http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing,\n * software distributed under the License is distributed on an\n * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n * KIND, either express or implied.  See the License for the\n * specific language governing permissions and limitations\n * under the License.\n *\/\n\npackage org.apache.iceberg.flink.sink;\n\nimport java.io.IOException;\nimport java.util.List;\nimport org.apache.flink.table.data.RowData;\nimport org.apache.flink.table.types.logical.RowType;\nimport org.apache.iceberg.FileFormat;\nimport org.apache.iceberg.PartitionKey;\nimport org.apache.iceberg.PartitionSpec;\nimport org.apache.iceberg.Schema;\nimport org.apache.iceberg.StructLike;\nimport org.apache.iceberg.flink.RowDataWrapper;\nimport org.apache.iceberg.io.BaseTaskWriter;\nimport org.apache.iceberg.io.FileAppenderFactory;\nimport org.apache.iceberg.io.FileIO;\nimport org.apache.iceberg.io.OutputFileFactory;\nimport org.apache.iceberg.relocated.com.google.common.collect.Sets;\nimport org.apache.iceberg.types.TypeUtil;\n\nabstract class BaseDeltaTaskWriter extends BaseTaskWriter {\n\n  private final Schema schema;\n  private final Schema deleteSchema;\n  private final RowDataWrapper wrapper;\n\n  BaseDeltaTaskWriter(PartitionSpec spec,\n                      FileFormat format,\n                      FileAppenderFactory appenderFactory,\n                      OutputFileFactory fileFactory,\n                      FileIO io,\n                      long targetFileSize,\n                      Schema schema,\n                      RowType flinkSchema,\n                      List equalityFieldIds) {\n    super(spec, format, appenderFactory, fileFactory, io, targetFileSize);\n    this.schema = schema;\n    this.deleteSchema = TypeUtil.select(schema, Sets.newHashSet(equalityFieldIds));\n    this.wrapper = new RowDataWrapper(flinkSchema, schema.asStruct());\n  }\n\n  abstract RowDataDeltaWriter route(RowData row);\n\n  RowDataWrapper wrapper() {\n    return wrapper;\n  }\n\n  @Override\n  public void write(RowData row) throws IOException {\n    RowDataDeltaWriter writer = route(row);\n\n    switch (row.getRowKind()) {\n      case INSERT:\n      case UPDATE_AFTER:\n        writer.write(row);\n        break;\n\n      case DELETE:\n      case UPDATE_BEFORE:\n        writer.delete(row);\n        break;\n\n      default:\n        throw new UnsupportedOperationException(\"Unknown row kind: \" + row.getRowKind());\n    }\n  }\n\n  protected class RowDataDeltaWriter extends BaseEqualityDeltaWriter {\n    RowDataDeltaWriter(PartitionKey partition) {\n      super(partition, schema, deleteSchema);\n    }\n\n    @Override\n    protected StructLike asStructLike(RowData data) {\n      return wrapper.wrap(data);\n    }\n  }\n}\n","lang_cluster":"Java","diff_tag":0,"review_comment":"","id":202}
{"diff_hunk":"@@ -38,15 +38,9 @@ public class TransactionPoolFactory {\n       final Wei minTransactionGasPrice,\n       final TransactionPoolConfiguration transactionPoolConfiguration) {\n \n-    final PendingTransactions pendingTransactions =\n-        new PendingTransactions(\n-            transactionPoolConfiguration.getPendingTxRetentionPeriod(),\n-            transactionPoolConfiguration.getTxPoolMaxSize(),\n-            transactionPoolConfiguration.getPooledTransactionHashesSize(),\n-            clock,\n-            metricsSystem,\n-            protocolContext.getBlockchain()::getChainHeadHeader,\n-            transactionPoolConfiguration.getPriceBump());\n+    final AbstractPendingTransactionsSorter pendingTransactions =\n+        createPendingTransactionsSorter(\n+            protocolSchedule, protocolContext, clock, metricsSystem, transactionPoolConfiguration);\n \n     final PeerTransactionTracker transactionTracker = new PeerTransactionTracker();\n     final TransactionsMessageSender transactionsMessageSender =","source_code":"\/*\n * Copyright ConsenSys AG.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n * the License. You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n * an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n * specific language governing permissions and limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n *\/\npackage org.hyperledger.besu.ethereum.eth.transactions;\n\nimport org.hyperledger.besu.ethereum.ProtocolContext;\nimport org.hyperledger.besu.ethereum.core.Wei;\nimport org.hyperledger.besu.ethereum.eth.manager.EthContext;\nimport org.hyperledger.besu.ethereum.eth.messages.EthPV62;\nimport org.hyperledger.besu.ethereum.eth.messages.EthPV65;\nimport org.hyperledger.besu.ethereum.eth.sync.state.SyncState;\nimport org.hyperledger.besu.ethereum.mainnet.ProtocolSchedule;\nimport org.hyperledger.besu.metrics.BesuMetricCategory;\nimport org.hyperledger.besu.plugin.services.MetricsSystem;\n\nimport java.time.Clock;\n\npublic class TransactionPoolFactory {\n\n  public static TransactionPool createTransactionPool(\n      final ProtocolSchedule protocolSchedule,\n      final ProtocolContext protocolContext,\n      final EthContext ethContext,\n      final Clock clock,\n      final MetricsSystem metricsSystem,\n      final SyncState syncState,\n      final Wei minTransactionGasPrice,\n      final TransactionPoolConfiguration transactionPoolConfiguration) {\n\n    final PendingTransactions pendingTransactions =\n        new PendingTransactions(\n            transactionPoolConfiguration.getPendingTxRetentionPeriod(),\n            transactionPoolConfiguration.getTxPoolMaxSize(),\n            transactionPoolConfiguration.getPooledTransactionHashesSize(),\n            clock,\n            metricsSystem,\n            protocolContext.getBlockchain()::getChainHeadHeader,\n            transactionPoolConfiguration.getPriceBump());\n\n    final PeerTransactionTracker transactionTracker = new PeerTransactionTracker();\n    final TransactionsMessageSender transactionsMessageSender =\n        new TransactionsMessageSender(transactionTracker);\n\n    final PeerPendingTransactionTracker pendingTransactionTracker =\n        new PeerPendingTransactionTracker(pendingTransactions);\n    final PendingTransactionsMessageSender pendingTransactionsMessageSender =\n        new PendingTransactionsMessageSender(pendingTransactionTracker);\n\n    return createTransactionPool(\n        protocolSchedule,\n        protocolContext,\n        ethContext,\n        metricsSystem,\n        syncState,\n        minTransactionGasPrice,\n        transactionPoolConfiguration,\n        pendingTransactions,\n        transactionTracker,\n        transactionsMessageSender,\n        pendingTransactionTracker,\n        pendingTransactionsMessageSender);\n  }\n\n  static TransactionPool createTransactionPool(\n      final ProtocolSchedule protocolSchedule,\n      final ProtocolContext protocolContext,\n      final EthContext ethContext,\n      final MetricsSystem metricsSystem,\n      final SyncState syncState,\n      final Wei minTransactionGasPrice,\n      final TransactionPoolConfiguration transactionPoolConfiguration,\n      final PendingTransactions pendingTransactions,\n      final PeerTransactionTracker transactionTracker,\n      final TransactionsMessageSender transactionsMessageSender,\n      final PeerPendingTransactionTracker pendingTransactionTracker,\n      final PendingTransactionsMessageSender pendingTransactionsMessageSender) {\n    final TransactionPool transactionPool =\n        new TransactionPool(\n            pendingTransactions,\n            protocolSchedule,\n            protocolContext,\n            new TransactionSender(transactionTracker, transactionsMessageSender, ethContext),\n            new PendingTransactionSender(\n                pendingTransactionTracker, pendingTransactionsMessageSender, ethContext),\n            syncState,\n            ethContext,\n            transactionTracker,\n            pendingTransactionTracker,\n            minTransactionGasPrice,\n            metricsSystem,\n            transactionPoolConfiguration);\n    final TransactionsMessageHandler transactionsMessageHandler =\n        new TransactionsMessageHandler(\n            ethContext.getScheduler(),\n            new TransactionsMessageProcessor(\n                transactionTracker,\n                transactionPool,\n                metricsSystem.createCounter(\n                    BesuMetricCategory.TRANSACTION_POOL,\n                    \"transactions_messages_skipped_total\",\n                    \"Total number of transactions messages skipped by the processor.\")),\n            transactionPoolConfiguration.getTxMessageKeepAliveSeconds());\n    ethContext.getEthMessages().subscribe(EthPV62.TRANSACTIONS, transactionsMessageHandler);\n    final PendingTransactionsMessageHandler pooledTransactionsMessageHandler =\n        new PendingTransactionsMessageHandler(\n            ethContext.getScheduler(),\n            new PendingTransactionsMessageProcessor(\n                pendingTransactionTracker,\n                transactionPool,\n                transactionPoolConfiguration,\n                metricsSystem.createCounter(\n                    BesuMetricCategory.TRANSACTION_POOL,\n                    \"pending_transactions_messages_skipped_total\",\n                    \"Total number of pending transactions messages skipped by the processor.\"),\n                ethContext,\n                metricsSystem,\n                syncState),\n            transactionPoolConfiguration.getTxMessageKeepAliveSeconds());\n    ethContext\n        .getEthMessages()\n        .subscribe(EthPV65.NEW_POOLED_TRANSACTION_HASHES, pooledTransactionsMessageHandler);\n    ethContext.getEthPeers().subscribeDisconnect(pendingTransactionTracker);\n\n    protocolContext.getBlockchain().observeBlockAdded(transactionPool);\n    ethContext.getEthPeers().subscribeDisconnect(transactionTracker);\n    return transactionPool;\n  }\n}\n","lang_cluster":"Java","diff_tag":0,"review_comment":"","id":203}
{"diff_hunk":"@@ -38,7 +38,6 @@ public class CommonMetrics {\n   public static final String SUBMIT_FLOW_FAIL_METER_NAME = \"submit-flow-fail-meter\";\n   public static final String SUBMIT_FLOW_SKIP_METER_NAME = \"submit-flow-skip-meter\";\n   public static final String OOM_WAITING_JOB_COUNT_NAME = \"OOM-waiting-job-count\";\n-  public static final String QUEUE_WAIT_HISTOGRAM_NAME = \"queue-wait-histogram\";\n   public static final String UPLOAD_FAT_PROJECT_METER_NAME = \"upload-fat-project-meter\";\n   public static final String UPLOAD_THIN_PROJECT_METER_NAME = \"upload-thin-project-meter\";\n ","source_code":"\/*\n * Copyright 2017 LinkedIn Corp.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n * use this file except in compliance with the License. You may obtain a copy of\n * the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n * License for the specific language governing permissions and limitations under\n * the License.\n *\/\n\npackage azkaban.metrics;\n\nimport com.codahale.metrics.Counter;\nimport com.codahale.metrics.Histogram;\nimport com.codahale.metrics.Meter;\nimport javax.inject.Inject;\nimport javax.inject.Singleton;\n\n\/**\n * This singleton class CommonMetrics is in charge of collecting varieties of metrics which are\n * accessed in both web and exec modules. That said, these metrics will be exposed in both Web\n * server and executor.\n *\/\n@Singleton\npublic class CommonMetrics {\n  public static final String FLOW_FAIL_METER_NAME = \"flow-fail-meter\";\n  public static final String DISPATCH_FAIL_METER_NAME = \"dispatch-fail-meter\";\n  public static final String DISPATCH_SUCCESS_METER_NAME = \"dispatch-success-meter\";\n  public static final String SEND_EMAIL_FAIL_METER_NAME = \"send-email-fail-meter\";\n  public static final String SEND_EMAIL_SUCCESS_METER_NAME = \"send-email-success-meter\";\n  public static final String SUBMIT_FLOW_SUCCESS_METER_NAME = \"submit-flow-success-meter\";\n  public static final String SUBMIT_FLOW_FAIL_METER_NAME = \"submit-flow-fail-meter\";\n  public static final String SUBMIT_FLOW_SKIP_METER_NAME = \"submit-flow-skip-meter\";\n  public static final String OOM_WAITING_JOB_COUNT_NAME = \"OOM-waiting-job-count\";\n  public static final String QUEUE_WAIT_HISTOGRAM_NAME = \"queue-wait-histogram\";\n  public static final String UPLOAD_FAT_PROJECT_METER_NAME = \"upload-fat-project-meter\";\n  public static final String UPLOAD_THIN_PROJECT_METER_NAME = \"upload-thin-project-meter\";\n\n  private Counter OOMWaitingJobCount;\n  private final MetricsManager metricsManager;\n  private Meter flowFailMeter;\n  private Meter dispatchFailMeter;\n  private Meter dispatchSuccessMeter;\n  private Meter sendEmailFailMeter;\n  private Meter sendEmailSuccessMeter;\n  private Meter submitFlowSuccessMeter;\n  private Meter submitFlowFailMeter;\n  private Meter submitFlowSkipMeter;\n  private Meter uploadFatProjectMeter;\n  private Meter uploadThinProjectMeter;\n  private Histogram queueWaitMeter;\n\n  @Inject\n  public CommonMetrics(final MetricsManager metricsManager) {\n    this.metricsManager = metricsManager;\n    setupAllMetrics();\n  }\n\n  private void setupAllMetrics() {\n    this.flowFailMeter = this.metricsManager.addMeter(FLOW_FAIL_METER_NAME);\n    this.dispatchFailMeter = this.metricsManager.addMeter(DISPATCH_FAIL_METER_NAME);\n    this.dispatchSuccessMeter = this.metricsManager.addMeter(DISPATCH_SUCCESS_METER_NAME);\n    this.sendEmailFailMeter = this.metricsManager.addMeter(SEND_EMAIL_FAIL_METER_NAME);\n    this.sendEmailSuccessMeter = this.metricsManager.addMeter(SEND_EMAIL_SUCCESS_METER_NAME);\n    this.submitFlowSuccessMeter = this.metricsManager.addMeter(SUBMIT_FLOW_SUCCESS_METER_NAME);\n    this.submitFlowFailMeter = this.metricsManager.addMeter(SUBMIT_FLOW_FAIL_METER_NAME);\n    this.submitFlowSkipMeter = this.metricsManager.addMeter(SUBMIT_FLOW_SKIP_METER_NAME);\n    this.OOMWaitingJobCount = this.metricsManager.addCounter(OOM_WAITING_JOB_COUNT_NAME);\n    this.queueWaitMeter = this.metricsManager.addHistogram(QUEUE_WAIT_HISTOGRAM_NAME);\n    this.uploadFatProjectMeter = this.metricsManager.addMeter(UPLOAD_FAT_PROJECT_METER_NAME);\n    this.uploadThinProjectMeter = this.metricsManager.addMeter(UPLOAD_THIN_PROJECT_METER_NAME);\n  }\n\n  \/**\n   * Mark flowFailMeter when a flow is considered as FAILED. This method could be called by Web\n   * Server or Executor, as they both detect flow failure.\n   *\/\n  public void markFlowFail() {\n    this.flowFailMeter.mark();\n  }\n\n  \/**\n   * Mark dispatchFailMeter when web server fails to dispatch a flow to executor.\n   *\/\n  public void markDispatchFail() {\n    this.dispatchFailMeter.mark();\n  }\n\n  \/**\n   * Mark dispatchSuccessMeter when web server successfully dispatches a flow to executor.\n   *\/\n  public void markDispatchSuccess() {\n    this.dispatchSuccessMeter.mark();\n  }\n\n  \/**\n   * Mark sendEmailFailMeter when an email fails to be sent out.\n   *\/\n  public void markSendEmailFail() {\n    this.sendEmailFailMeter.mark();\n  }\n\n  \/**\n   * Mark sendEmailSuccessMeter when an email is sent out successfully.\n   *\/\n  public void markSendEmailSuccess() {\n    this.sendEmailSuccessMeter.mark();\n  }\n\n  \/**\n   * Mark submitFlowSuccessMeter when a flow is submitted for execution successfully.\n   *\/\n  public void markSubmitFlowSuccess() {\n    this.submitFlowSuccessMeter.mark();\n  }\n\n  \/**\n   * Mark submitFlowFailMeter when a flow submitted for execution is skipped.\n   *\/\n  public void markSubmitFlowSkip() {\n    this.submitFlowSkipMeter.mark();\n  }\n\n  \/**\n   * Mark submitFlowFailMeter when a flow fails to be submitted for execution.\n   *\/\n  public void markSubmitFlowFail() {\n    this.submitFlowFailMeter.mark();\n  }\n\n  \/**\n   * Mark uploadFatProjectMeter when a fat project zip is uploaded to the web server.\n   *\/\n  public void markUploadFatProject() { this.uploadFatProjectMeter.mark(); }\n\n  \/**\n   * Mark uploadThinProjectMeter when a thin project zip is uploaded to the web server.\n   *\/\n  public void markUploadThinProject() { this.uploadThinProjectMeter.mark(); }\n\n  \/**\n   * Mark the occurrence of an job waiting event due to OOM\n   *\/\n  public void incrementOOMJobWaitCount() {\n    this.OOMWaitingJobCount.inc();\n  }\n\n  \/**\n   * Unmark the occurrence of an job waiting event due to OOM\n   *\/\n  public void decrementOOMJobWaitCount() {\n    this.OOMWaitingJobCount.dec();\n  }\n\n  \/**\n   * Add the queue wait time for a flow to the metrics.\n   *\n   * @param time queue wait time for a flow.\n   *\/\n  public void addQueueWait(final long time) {\n    this.queueWaitMeter.update(time);\n  }\n}\n","lang_cluster":"Java","diff_tag":0,"review_comment":"","id":204}
{"diff_hunk":"@@ -26,7 +26,9 @@ import org.apache.hadoop.conf.Configuration;\n import org.apache.hadoop.mapreduce.InputSplit;\n import org.apache.iceberg.CombinedScanTask;\n import org.apache.iceberg.FileScanTask;\n+import org.apache.iceberg.encryption.EncryptionManager;\n import org.apache.iceberg.hadoop.Util;\n+import org.apache.iceberg.io.FileIO;\n import org.apache.iceberg.mr.InputFormatConfig;\n import org.apache.iceberg.mr.SerializationUtil;\n ","source_code":"\/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *   http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing,\n * software distributed under the License is distributed on an\n * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n * KIND, either express or implied.  See the License for the\n * specific language governing permissions and limitations\n * under the License.\n *\/\n\npackage org.apache.iceberg.mr.mapreduce;\n\nimport java.io.DataInput;\nimport java.io.DataOutput;\nimport java.io.IOException;\nimport org.apache.hadoop.conf.Configuration;\nimport org.apache.hadoop.mapreduce.InputSplit;\nimport org.apache.iceberg.CombinedScanTask;\nimport org.apache.iceberg.FileScanTask;\nimport org.apache.iceberg.hadoop.Util;\nimport org.apache.iceberg.mr.InputFormatConfig;\nimport org.apache.iceberg.mr.SerializationUtil;\n\n\/\/ Since this class extends `mapreduce.InputSplit and implements `mapred.InputSplit`, it can be returned by both MR v1\n\/\/ and v2 file formats.\npublic class IcebergSplit extends InputSplit implements org.apache.hadoop.mapred.InputSplit, IcebergSplitContainer {\n\n  public static final String[] ANYWHERE = new String[]{\"*\"};\n\n  private CombinedScanTask task;\n\n  private transient String[] locations;\n  private transient Configuration conf;\n\n  \/\/ public no-argument constructor for deserialization\n  public IcebergSplit() {\n  }\n\n  IcebergSplit(Configuration conf, CombinedScanTask task) {\n    this.task = task;\n    this.conf = conf;\n  }\n\n  public CombinedScanTask task() {\n    return task;\n  }\n\n  @Override\n  public IcebergSplit icebergSplit() {\n    return this;\n  }\n\n  @Override\n  public long getLength() {\n    return task.files().stream().mapToLong(FileScanTask::length).sum();\n  }\n\n  @Override\n  public String[] getLocations() {\n    if (locations == null) {\n      boolean localityPreferred = conf.getBoolean(InputFormatConfig.LOCALITY, false);\n      locations = localityPreferred ? Util.blockLocations(task, conf) : ANYWHERE;\n    }\n\n    return locations;\n  }\n\n  @Override\n  public void write(DataOutput out) throws IOException {\n    byte[] data = SerializationUtil.serializeToBytes(this.task);\n    out.writeInt(data.length);\n    out.write(data);\n  }\n\n  @Override\n  public void readFields(DataInput in) throws IOException {\n    byte[] data = new byte[in.readInt()];\n    in.readFully(data);\n    this.task = SerializationUtil.deserializeFromBytes(data);\n  }\n}\n","lang_cluster":"Java","diff_tag":0,"review_comment":"","id":205}
{"diff_hunk":"@@ -17,6 +17,7 @@\n \n package org.apache.servicecomb.swagger.generator.core.processor.parameter;\n \n+import org.apache.commons.lang3.StringUtils;\n import org.apache.servicecomb.swagger.generator.core.OperationGenerator;\n import org.apache.servicecomb.swagger.generator.core.ParameterAnnotationProcessor;\n import org.apache.servicecomb.swagger.generator.core.utils.ParamUtils;","source_code":"\/*\n * Licensed to the Apache Software Foundation (ASF) under one or more\n * contributor license agreements.  See the NOTICE file distributed with\n * this work for additional information regarding copyright ownership.\n * The ASF licenses this file to You under the Apache License, Version 2.0\n * (the \"License\"); you may not use this file except in compliance with\n * the License.  You may obtain a copy of the License at\n *\n *     http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage org.apache.servicecomb.swagger.generator.core.processor.parameter;\n\nimport org.apache.servicecomb.swagger.generator.core.OperationGenerator;\nimport org.apache.servicecomb.swagger.generator.core.ParameterAnnotationProcessor;\nimport org.apache.servicecomb.swagger.generator.core.utils.ParamUtils;\n\nimport io.swagger.models.parameters.AbstractSerializableParameter;\n\npublic abstract class AbstractParameterProcessor>\n    implements ParameterAnnotationProcessor {\n  @Override\n  public void process(Object annotation, OperationGenerator operationGenerator, int paramIdx) {\n    T parameter = createParameter();\n\n    fillParameter(annotation, operationGenerator, paramIdx, parameter);\n\n    operationGenerator.addProviderParameter(parameter);\n  }\n\n  protected void fillParameter(Object annotation, OperationGenerator operationGenerator, int paramIdx,\n      T parameter) {\n    setParameterName(annotation, operationGenerator, paramIdx, parameter);\n    setParameterType(operationGenerator, paramIdx, parameter);\n  }\n\n  protected void setParameterType(OperationGenerator operationGenerator, int paramIdx,\n      T parameter) {\n    ParamUtils.setParameterType(operationGenerator.getSwagger(),\n        operationGenerator.getProviderMethod(),\n        paramIdx,\n        parameter);\n  }\n\n  protected void setParameterName(Object annotation, OperationGenerator operationGenerator, int paramIdx,\n      T parameter) {\n    String paramName = getAnnotationParameterName(annotation);\n    paramName = ParamUtils.getParameterName(paramName, operationGenerator.getProviderMethod(), paramIdx);\n    parameter.setName(paramName);\n  }\n\n  protected abstract T createParameter();\n\n  protected abstract String getAnnotationParameterName(Object annotation);\n}\n","lang_cluster":"Java","diff_tag":0,"review_comment":"","id":206}
{"diff_hunk":"@@ -181,6 +181,9 @@ public abstract class AbstractBlockProcessor implements BlockProcessor {\n \n     worldState.persist(blockHeader.getHash());\n     return AbstractBlockProcessor.Result.successful(receipts);\n+        } finally {\n+        globalProcessBlock.end();\n+        }\n   }\n \n   protected MiningBeneficiaryCalculator getMiningBeneficiaryCalculator() {","source_code":"\/*\n * Copyright ConsenSys AG.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n * the License. You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n * an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n * specific language governing permissions and limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n *\/\npackage org.hyperledger.besu.ethereum.mainnet;\n\nimport org.hyperledger.besu.ethereum.chain.Blockchain;\nimport org.hyperledger.besu.ethereum.core.Address;\nimport org.hyperledger.besu.ethereum.core.BlockHeader;\nimport org.hyperledger.besu.ethereum.core.MutableWorldState;\nimport org.hyperledger.besu.ethereum.core.Transaction;\nimport org.hyperledger.besu.ethereum.core.TransactionReceipt;\nimport org.hyperledger.besu.ethereum.core.Wei;\nimport org.hyperledger.besu.ethereum.core.WorldState;\nimport org.hyperledger.besu.ethereum.core.WorldUpdater;\nimport org.hyperledger.besu.ethereum.core.fees.TransactionGasBudgetCalculator;\nimport org.hyperledger.besu.ethereum.privacy.storage.PrivateMetadataUpdater;\nimport org.hyperledger.besu.ethereum.processing.TransactionProcessingResult;\nimport org.hyperledger.besu.ethereum.vm.BlockHashLookup;\nimport org.hyperledger.besu.ethereum.vm.OperationTracer;\nimport org.hyperledger.besu.plugin.data.TransactionType;\n\nimport java.util.ArrayList;\nimport java.util.List;\n\nimport com.google.common.collect.ImmutableList;\nimport org.apache.logging.log4j.LogManager;\nimport org.apache.logging.log4j.Logger;\n\npublic abstract class AbstractBlockProcessor implements BlockProcessor {\n  @FunctionalInterface\n  public interface TransactionReceiptFactory {\n\n    TransactionReceipt create(\n        TransactionType transactionType,\n        TransactionProcessingResult result,\n        WorldState worldState,\n        long gasUsed);\n  }\n\n  private static final Logger LOG = LogManager.getLogger();\n\n  static final int MAX_GENERATION = 6;\n\n  public static class Result implements BlockProcessor.Result {\n\n    private static final AbstractBlockProcessor.Result FAILED =\n        new AbstractBlockProcessor.Result(false, null);\n\n    private final boolean successful;\n\n    private final List receipts;\n\n    public static AbstractBlockProcessor.Result successful(\n        final List receipts) {\n      return new AbstractBlockProcessor.Result(true, ImmutableList.copyOf(receipts));\n    }\n\n    public static AbstractBlockProcessor.Result failed() {\n      return FAILED;\n    }\n\n    Result(final boolean successful, final List receipts) {\n      this.successful = successful;\n      this.receipts = receipts;\n    }\n\n    @Override\n    public List getReceipts() {\n      return receipts;\n    }\n\n    @Override\n    public boolean isSuccessful() {\n      return successful;\n    }\n  }\n\n  private final MainnetTransactionProcessor transactionProcessor;\n\n  private final AbstractBlockProcessor.TransactionReceiptFactory transactionReceiptFactory;\n\n  final Wei blockReward;\n\n  private final boolean skipZeroBlockRewards;\n\n  private final MiningBeneficiaryCalculator miningBeneficiaryCalculator;\n\n  private final TransactionGasBudgetCalculator gasBudgetCalculator;\n\n  protected AbstractBlockProcessor(\n      final MainnetTransactionProcessor transactionProcessor,\n      final TransactionReceiptFactory transactionReceiptFactory,\n      final Wei blockReward,\n      final MiningBeneficiaryCalculator miningBeneficiaryCalculator,\n      final boolean skipZeroBlockRewards,\n      final TransactionGasBudgetCalculator gasBudgetCalculator) {\n    this.transactionProcessor = transactionProcessor;\n    this.transactionReceiptFactory = transactionReceiptFactory;\n    this.blockReward = blockReward;\n    this.miningBeneficiaryCalculator = miningBeneficiaryCalculator;\n    this.skipZeroBlockRewards = skipZeroBlockRewards;\n    this.gasBudgetCalculator = gasBudgetCalculator;\n  }\n\n  @Override\n  public AbstractBlockProcessor.Result processBlock(\n      final Blockchain blockchain,\n      final MutableWorldState worldState,\n      final BlockHeader blockHeader,\n      final List transactions,\n      final List ommers,\n      final PrivateMetadataUpdater privateMetadataUpdater) {\n\n    final List receipts = new ArrayList<>();\n    long currentGasUsed = 0;\n    for (final Transaction transaction : transactions) {\n      final long remainingGasBudget = blockHeader.getGasLimit() - currentGasUsed;\n      if (!gasBudgetCalculator.hasBudget(\n          transaction, blockHeader.getNumber(), blockHeader.getGasLimit(), currentGasUsed)) {\n        LOG.info(\n            \"Block processing error: transaction gas limit {} exceeds available block budget\"\n                + \" remaining {}. Block {} Transaction {}\",\n            transaction.getGasLimit(),\n            remainingGasBudget,\n            blockHeader.getHash().toHexString(),\n            transaction.getHash().toHexString());\n        return AbstractBlockProcessor.Result.failed();\n      }\n\n      final WorldUpdater worldStateUpdater = worldState.updater();\n      final BlockHashLookup blockHashLookup = new BlockHashLookup(blockHeader, blockchain);\n      final Address miningBeneficiary =\n          miningBeneficiaryCalculator.calculateBeneficiary(blockHeader);\n\n      final TransactionProcessingResult result =\n          transactionProcessor.processTransaction(\n              blockchain,\n              worldStateUpdater,\n              blockHeader,\n              transaction,\n              miningBeneficiary,\n              OperationTracer.NO_TRACING,\n              blockHashLookup,\n              true,\n              TransactionValidationParams.processingBlock(),\n              privateMetadataUpdater);\n      if (result.isInvalid()) {\n        LOG.info(\n            \"Block processing error: transaction invalid '{}'. Block {} Transaction {}\",\n            result.getValidationResult().getInvalidReason(),\n            blockHeader.getHash().toHexString(),\n            transaction.getHash().toHexString());\n        return AbstractBlockProcessor.Result.failed();\n      }\n\n      worldStateUpdater.commit();\n\n      currentGasUsed += transaction.getGasLimit() - result.getGasRemaining();\n\n      final TransactionReceipt transactionReceipt =\n          transactionReceiptFactory.create(\n              transaction.getType(), result, worldState, currentGasUsed);\n      receipts.add(transactionReceipt);\n    }\n\n    if (!rewardCoinbase(worldState, blockHeader, ommers, skipZeroBlockRewards)) {\n      \/\/ no need to log, rewardCoinbase logs the error.\n      return AbstractBlockProcessor.Result.failed();\n    }\n\n    worldState.persist(blockHeader.getHash());\n    return AbstractBlockProcessor.Result.successful(receipts);\n  }\n\n  protected MiningBeneficiaryCalculator getMiningBeneficiaryCalculator() {\n    return miningBeneficiaryCalculator;\n  }\n\n  abstract boolean rewardCoinbase(\n      final MutableWorldState worldState,\n      final BlockHeader header,\n      final List ommers,\n      final boolean skipZeroBlockRewards);\n}\n","lang_cluster":"Java","diff_tag":0,"review_comment":"","id":207}
{"diff_hunk":"@@ -19,6 +19,7 @@ import com.google.api.codegen.config.PageStreamingConfig;\n import com.google.api.codegen.util.Name;\n import com.google.api.codegen.viewmodel.PageStreamingDescriptorClassView;\n import com.google.api.codegen.viewmodel.PageStreamingDescriptorView;\n+import com.google.api.codegen.viewmodel.PageStreamingFactoryClassView;\n import com.google.api.tools.framework.model.Field;\n import com.google.api.tools.framework.model.Method;\n import com.google.api.tools.framework.model.TypeRef;","source_code":"\/* Copyright 2016 Google Inc\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\npackage com.google.api.codegen.transformer;\n\nimport com.google.api.codegen.config.MethodConfig;\nimport com.google.api.codegen.config.PageStreamingConfig;\nimport com.google.api.codegen.util.Name;\nimport com.google.api.codegen.viewmodel.PageStreamingDescriptorClassView;\nimport com.google.api.codegen.viewmodel.PageStreamingDescriptorView;\nimport com.google.api.tools.framework.model.Field;\nimport com.google.api.tools.framework.model.Method;\nimport com.google.api.tools.framework.model.TypeRef;\nimport java.util.ArrayList;\nimport java.util.List;\n\n\/**\n * PageStreamingTransformer generates view objects for page streaming from a service model.\n *\/\npublic class PageStreamingTransformer {\n\n  public List generateDescriptors(SurfaceTransformerContext context) {\n    List descriptors = new ArrayList<>();\n\n    for (Method method : context.getPageStreamingMethods()) {\n      MethodConfig methodConfig = context.getMethodConfig(method);\n      context.getNamer().addPageStreamingDescriptorImports(context.getTypeTable());\n      PageStreamingConfig pageStreaming = methodConfig.getPageStreaming();\n\n      PageStreamingDescriptorView.Builder descriptor = PageStreamingDescriptorView.newBuilder();\n      descriptor.varName(context.getNamer().getPageStreamingDescriptorName(method));\n      descriptor.requestTokenFieldName(pageStreaming.getRequestTokenField().getSimpleName());\n      if (pageStreaming.hasPageSizeField()) {\n        descriptor.requestPageSizeFieldName(pageStreaming.getPageSizeField().getSimpleName());\n      }\n      descriptor.responseTokenFieldName(pageStreaming.getResponseTokenField().getSimpleName());\n      descriptor.resourcesFieldName(pageStreaming.getResourcesField().getSimpleName());\n      descriptor.methodName(Name.upperCamel(method.getSimpleName()).toLowerCamel());\n\n      descriptors.add(descriptor.build());\n    }\n\n    return descriptors;\n  }\n\n  public List generateDescriptorClasses(\n      SurfaceTransformerContext context) {\n    List descriptors = new ArrayList<>();\n\n    context.getNamer().addPageStreamingDescriptorImports(context.getTypeTable());\n    for (Method method : context.getPageStreamingMethods()) {\n      descriptors.add(generateDescriptorClass(context.asMethodContext(method)));\n    }\n\n    return descriptors;\n  }\n\n  private PageStreamingDescriptorClassView generateDescriptorClass(\n      MethodTransformerContext context) {\n    SurfaceNamer namer = context.getNamer();\n    ModelTypeTable typeTable = context.getTypeTable();\n    Method method = context.getMethod();\n    PageStreamingConfig pageStreaming = context.getMethodConfig().getPageStreaming();\n    FeatureConfig featureConfig = context.getFeatureConfig();\n\n    PageStreamingDescriptorClassView.Builder desc = PageStreamingDescriptorClassView.newBuilder();\n\n    Field resourceField = pageStreaming.getResourcesField();\n    TypeRef resourceType = resourceField.getType();\n\n    desc.name(namer.getPageStreamingDescriptorConstName(method));\n    desc.typeName(\n        namer.getAndSavePagedResponseTypeName(\n            featureConfig,\n            typeTable,\n            method.getInputType(),\n            method.getOutputType(),\n            resourceField));\n    desc.requestTypeName(typeTable.getAndSaveNicknameFor(method.getInputType()));\n    desc.responseTypeName(typeTable.getAndSaveNicknameFor(method.getOutputType()));\n    desc.resourceTypeName(\n        namer.getAndSaveElementFieldTypeName(featureConfig, typeTable, resourceField));\n\n    TypeRef tokenType = pageStreaming.getResponseTokenField().getType();\n    desc.tokenTypeName(typeTable.getAndSaveNicknameFor(tokenType));\n    desc.defaultTokenValue(context.getTypeTable().getZeroValueAndSaveNicknameFor(tokenType));\n\n    \/\/ The resource fields are \"repeated\" in the proto.\n    \/\/ We `makeOptional` so that we get the zero value of the resource,\n    \/\/ not the zero value of the array\/list of resources.\n    desc.resourceZeroValue(\n        context.getTypeTable().getZeroValueAndSaveNicknameFor(resourceType.makeOptional()));\n\n    desc.requestTokenSetFunction(\n        namer.getFieldSetFunctionName(featureConfig, pageStreaming.getRequestTokenField()));\n    if (pageStreaming.hasPageSizeField()) {\n      desc.requestPageSizeSetFunction(\n          namer.getFieldSetFunctionName(featureConfig, pageStreaming.getPageSizeField()));\n      desc.requestPageSizeGetFunction(\n          namer.getFieldGetFunctionName(featureConfig, pageStreaming.getPageSizeField()));\n    }\n    desc.responseTokenGetFunction(\n        namer.getFieldGetFunctionName(featureConfig, pageStreaming.getResponseTokenField()));\n    desc.resourcesFieldGetFunction(\n        namer.getFieldGetFunctionName(featureConfig, pageStreaming.getResourcesField()));\n\n    return desc.build();\n  }\n}\n","lang_cluster":"Java","diff_tag":0,"review_comment":"","id":208}
{"diff_hunk":"@@ -1,5 +1,6 @@\n package de.danoeh.antennapod.dialog;\n \n+import android.app.Activity;\n import android.app.Dialog;\n import android.content.Context;\n import android.content.DialogInterface;","source_code":"package de.danoeh.antennapod.dialog;\n\nimport android.app.Dialog;\nimport android.content.Context;\nimport android.content.DialogInterface;\nimport android.os.Bundle;\nimport android.view.View;\nimport android.view.inputmethod.InputMethodManager;\nimport android.widget.ArrayAdapter;\nimport android.widget.Button;\nimport android.widget.CheckBox;\nimport android.widget.EditText;\nimport android.widget.LinearLayout;\nimport android.widget.Spinner;\nimport android.widget.TextView;\nimport androidx.annotation.NonNull;\nimport androidx.appcompat.app.AlertDialog;\nimport androidx.fragment.app.DialogFragment;\nimport com.google.android.material.snackbar.Snackbar;\nimport de.danoeh.antennapod.R;\nimport de.danoeh.antennapod.core.preferences.SleepTimerPreferences;\nimport de.danoeh.antennapod.core.service.playback.PlaybackService;\nimport de.danoeh.antennapod.core.util.Converter;\nimport de.danoeh.antennapod.core.util.playback.PlaybackController;\nimport io.reactivex.Observable;\nimport io.reactivex.android.schedulers.AndroidSchedulers;\nimport io.reactivex.disposables.Disposable;\n\nimport java.util.concurrent.TimeUnit;\n\npublic class SleepTimerDialog extends DialogFragment {\n    private PlaybackController controller;\n    private Disposable timeUpdater;\n\n    private EditText etxtTime;\n    private Spinner spTimeUnit;\n    private LinearLayout timeSetup;\n    private LinearLayout timeDisplay;\n    private TextView time;\n\n    public SleepTimerDialog() {\n\n    }\n\n    @Override\n    public void onStart() {\n        super.onStart();\n        controller = new PlaybackController(getActivity()) {\n            @Override\n            public void setupGUI() {\n                updateTime();\n            }\n\n            @Override\n            public void onSleepTimerUpdate() {\n                updateTime();\n            }\n        };\n        controller.init();\n        timeUpdater = Observable.interval(1, TimeUnit.SECONDS)\n                .observeOn(AndroidSchedulers.mainThread())\n                .subscribe(tick -> updateTime());\n    }\n\n    @Override\n    public void onStop() {\n        super.onStop();\n        if (controller != null) {\n            controller.release();\n        }\n        if (timeUpdater != null) {\n            timeUpdater.dispose();\n        }\n    }\n\n    @NonNull\n    @Override\n    public Dialog onCreateDialog(Bundle savedInstanceState) {\n        View content = View.inflate(getContext(), R.layout.time_dialog, null);\n        AlertDialog.Builder builder = new AlertDialog.Builder(getContext());\n        builder.setTitle(R.string.sleep_timer_label);\n        builder.setView(content);\n        builder.setPositiveButton(R.string.close_label, null);\n\n        etxtTime = content.findViewById(R.id.etxtTime);\n        spTimeUnit = content.findViewById(R.id.spTimeUnit);\n        timeSetup = content.findViewById(R.id.timeSetup);\n        timeDisplay = content.findViewById(R.id.timeDisplay);\n        time = content.findViewById(R.id.time);\n\n        etxtTime.setText(SleepTimerPreferences.lastTimerValue());\n        etxtTime.postDelayed(() -> {\n            InputMethodManager imm = (InputMethodManager) getContext().getSystemService(Context.INPUT_METHOD_SERVICE);\n            imm.showSoftInput(etxtTime, InputMethodManager.SHOW_IMPLICIT);\n        }, 100);\n\n        String[] spinnerContent = new String[] {\n                getString(R.string.time_seconds),\n                getString(R.string.time_minutes),\n                getString(R.string.time_hours) };\n        ArrayAdapter spinnerAdapter = new ArrayAdapter<>(getContext(),\n                android.R.layout.simple_spinner_item, spinnerContent);\n        spinnerAdapter.setDropDownViewResource(android.R.layout.simple_spinner_dropdown_item);\n        spTimeUnit.setAdapter(spinnerAdapter);\n        spTimeUnit.setSelection(SleepTimerPreferences.lastTimerTimeUnit());\n\n        CheckBox cbShakeToReset = content.findViewById(R.id.cbShakeToReset);\n        CheckBox cbVibrate = content.findViewById(R.id.cbVibrate);\n        CheckBox chAutoEnable = content.findViewById(R.id.chAutoEnable);\n\n        cbShakeToReset.setChecked(SleepTimerPreferences.shakeToReset());\n        cbVibrate.setChecked(SleepTimerPreferences.vibrate());\n        chAutoEnable.setChecked(SleepTimerPreferences.autoEnable());\n\n        cbShakeToReset.setOnCheckedChangeListener((buttonView, isChecked)\n                -> SleepTimerPreferences.setShakeToReset(isChecked));\n        cbVibrate.setOnCheckedChangeListener((buttonView, isChecked)\n                -> SleepTimerPreferences.setVibrate(isChecked));\n        chAutoEnable.setOnCheckedChangeListener((compoundButton, isChecked)\n                -> SleepTimerPreferences.setAutoEnable(isChecked));\n\n        Button disableButton = content.findViewById(R.id.disableSleeptimerButton);\n        disableButton.setOnClickListener(v -> {\n            if (controller != null) {\n                controller.disableSleepTimer();\n            }\n        });\n        Button setButton = content.findViewById(R.id.setSleeptimerButton);\n        setButton.setOnClickListener(v -> {\n            if (!PlaybackService.isRunning) {\n                Snackbar.make(content, R.string.no_media_playing_label, Snackbar.LENGTH_LONG).show();\n                return;\n            }\n            try {\n                SleepTimerPreferences.setLastTimer(etxtTime.getText().toString(), spTimeUnit.getSelectedItemPosition());\n                long time = SleepTimerPreferences.timerMillis();\n                if (controller != null) {\n                    controller.setSleepTimer(time);\n                }\n            } catch (NumberFormatException e) {\n                e.printStackTrace();\n                Snackbar.make(content, R.string.time_dialog_invalid_input, Snackbar.LENGTH_LONG).show();\n            }\n        });\n        return builder.create();\n    }\n\n    private void updateTime() {\n        if (controller == null) {\n            return;\n        }\n        timeSetup.setVisibility(controller.sleepTimerActive() ? View.GONE : View.VISIBLE);\n        timeDisplay.setVisibility(controller.sleepTimerActive() ? View.VISIBLE : View.GONE);\n        time.setText(Converter.getDurationStringLong((int) controller.getSleepTimerTimeLeft()));\n    }\n}\n","lang_cluster":"Java","diff_tag":0,"review_comment":"","id":209}
{"diff_hunk":"@@ -19,10 +19,14 @@\n \n package org.apache.iceberg.spark.source;\n \n+import java.util.Map;\n+import java.util.Set;\n import org.apache.arrow.vector.NullCheckingForGet;\n import org.apache.iceberg.CombinedScanTask;\n+import org.apache.iceberg.DataFile;\n import org.apache.iceberg.FileFormat;\n import org.apache.iceberg.FileScanTask;\n+import org.apache.iceberg.PartitionSpec;\n import org.apache.iceberg.Schema;\n import org.apache.iceberg.encryption.EncryptionManager;\n import org.apache.iceberg.io.CloseableIterable;","source_code":"\/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *   http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing,\n * software distributed under the License is distributed on an\n * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n * KIND, either express or implied.  See the License for the\n * specific language governing permissions and limitations\n * under the License.\n *\/\n\npackage org.apache.iceberg.spark.source;\n\nimport org.apache.arrow.vector.NullCheckingForGet;\nimport org.apache.iceberg.CombinedScanTask;\nimport org.apache.iceberg.FileFormat;\nimport org.apache.iceberg.FileScanTask;\nimport org.apache.iceberg.Schema;\nimport org.apache.iceberg.encryption.EncryptionManager;\nimport org.apache.iceberg.io.CloseableIterable;\nimport org.apache.iceberg.io.CloseableIterator;\nimport org.apache.iceberg.io.FileIO;\nimport org.apache.iceberg.io.InputFile;\nimport org.apache.iceberg.mapping.NameMappingParser;\nimport org.apache.iceberg.parquet.Parquet;\nimport org.apache.iceberg.relocated.com.google.common.base.Preconditions;\nimport org.apache.iceberg.spark.data.vectorized.VectorizedSparkParquetReaders;\nimport org.apache.spark.sql.vectorized.ColumnarBatch;\n\nclass BatchDataReader extends BaseDataReader {\n  private final Schema expectedSchema;\n  private final String nameMapping;\n  private final boolean caseSensitive;\n  private final int batchSize;\n\n  BatchDataReader(\n      CombinedScanTask task, Schema expectedSchema, String nameMapping, FileIO fileIo,\n      EncryptionManager encryptionManager, boolean caseSensitive, int size) {\n    super(task, fileIo, encryptionManager);\n    this.expectedSchema = expectedSchema;\n    this.nameMapping = nameMapping;\n    this.caseSensitive = caseSensitive;\n    this.batchSize = size;\n  }\n\n  @Override\n  CloseableIterator open(FileScanTask task) {\n    CloseableIterable iter;\n    InputFile location = getInputFile(task);\n    Preconditions.checkNotNull(location, \"Could not find InputFile associated with FileScanTask\");\n    if (task.file().format() == FileFormat.PARQUET) {\n      Parquet.ReadBuilder builder = Parquet.read(location)\n          .project(expectedSchema)\n          .split(task.start(), task.length())\n          .createBatchedReaderFunc(fileSchema -> VectorizedSparkParquetReaders.buildReader(expectedSchema,\n              fileSchema, \/* setArrowValidityVector *\/ NullCheckingForGet.NULL_CHECKING_ENABLED))\n          .recordsPerBatch(batchSize)\n          .filter(task.residual())\n          .caseSensitive(caseSensitive)\n          \/\/ Spark eagerly consumes the batches. So the underlying memory allocated could be reused\n          \/\/ without worrying about subsequent reads clobbering over each other. This improves\n          \/\/ read performance as every batch read doesn't have to pay the cost of allocating memory.\n          .reuseContainers();\n\n      if (nameMapping != null) {\n        builder.withNameMapping(NameMappingParser.fromJson(nameMapping));\n      }\n\n      iter = builder.build();\n    } else {\n      throw new UnsupportedOperationException(\n          \"Format: \" + task.file().format() + \" not supported for batched reads\");\n    }\n    return iter.iterator();\n  }\n}\n","lang_cluster":"Java","diff_tag":0,"review_comment":"","id":210}
{"diff_hunk":"@@ -32,6 +32,7 @@ import java.util.UUID;\n public class OpenSamlAuthenticationRequestFactory implements Saml2AuthenticationRequestFactory {\n \tprivate Clock clock = Clock.systemUTC();\n \tprivate final OpenSamlImplementation saml = OpenSamlImplementation.getInstance();\n+\tprivate String protocolBinding = SAMLConstants.SAML2_POST_BINDING_URI;\n \n \t\/**\n \t * {@inheritDoc}","source_code":"\/*\n * Copyright 2002-2019 the original author or authors.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      https:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage org.springframework.security.saml2.provider.service.authentication;\n\nimport org.springframework.util.Assert;\n\nimport org.joda.time.DateTime;\nimport org.opensaml.saml.saml2.core.AuthnRequest;\nimport org.opensaml.saml.saml2.core.Issuer;\n\nimport java.time.Clock;\nimport java.time.Instant;\nimport java.util.UUID;\n\n\/**\n * @since 5.2\n *\/\npublic class OpenSamlAuthenticationRequestFactory implements Saml2AuthenticationRequestFactory {\n\tprivate Clock clock = Clock.systemUTC();\n\tprivate final OpenSamlImplementation saml = OpenSamlImplementation.getInstance();\n\n\t\/**\n\t * {@inheritDoc}\n\t *\/\n\t@Override\n\tpublic String createAuthenticationRequest(Saml2AuthenticationRequest request) {\n\t\tAuthnRequest auth = this.saml.buildSAMLObject(AuthnRequest.class);\n\t\tauth.setID(\"ARQ\" + UUID.randomUUID().toString().substring(1));\n\t\tauth.setIssueInstant(new DateTime(this.clock.millis()));\n\t\tauth.setForceAuthn(Boolean.FALSE);\n\t\tauth.setIsPassive(Boolean.FALSE);\n\t\tauth.setProtocolBinding(\"urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect\");\n\t\tIssuer issuer = this.saml.buildSAMLObject(Issuer.class);\n\t\tissuer.setValue(request.getIssuer());\n\t\tauth.setIssuer(issuer);\n\t\tauth.setDestination(request.getDestination());\n\t\tauth.setAssertionConsumerServiceURL(request.getAssertionConsumerServiceUrl());\n\t\treturn this.saml.toXml(\n\t\t\t\tauth,\n\t\t\t\trequest.getCredentials(),\n\t\t\t\trequest.getIssuer()\n\t\t);\n\t}\n\n\t\/**\n\t * '\n\t * Use this {@link Clock} with {@link Instant#now()} for generating\n\t * timestamps\n\t *\n\t * @param clock\n\t *\/\n\tpublic void setClock(Clock clock) {\n\t\tAssert.notNull(clock, \"clock cannot be null\");\n\t\tthis.clock = clock;\n\t}\n}\n","lang_cluster":"Java","diff_tag":0,"review_comment":"","id":211}
{"diff_hunk":"@@ -15,6 +15,7 @@\n package org.hyperledger.besu.config;\n \n import java.util.Optional;\n+import java.util.OptionalLong;\n \n public interface QbftConfigOptions extends BftConfigOptions {\n ","source_code":"\/*\n * Copyright ConsenSys AG.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n * the License. You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n * an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n * specific language governing permissions and limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n *\/\npackage org.hyperledger.besu.config;\n\nimport java.util.Optional;\n\npublic interface QbftConfigOptions extends BftConfigOptions {\n\n  Optional getValidatorContractAddress();\n\n  default boolean isValidatorContractMode() {\n    return getValidatorContractAddress().isPresent();\n  }\n}\n","lang_cluster":"Java","diff_tag":0,"review_comment":"","id":212}
{"diff_hunk":"@@ -18,6 +18,7 @@ import com.google.api.codegen.util.NamePath;\n import com.google.api.codegen.util.TypeAlias;\n import com.google.api.codegen.util.TypeName;\n import com.google.api.codegen.util.TypeTable;\n+import com.google.common.collect.ImmutableSet;\n \n import java.util.ArrayList;\n import java.util.List;","source_code":"\/* Copyright 2016 Google Inc\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\npackage com.google.api.codegen.util.go;\n\nimport com.google.api.codegen.util.NamePath;\nimport com.google.api.codegen.util.TypeAlias;\nimport com.google.api.codegen.util.TypeName;\nimport com.google.api.codegen.util.TypeTable;\n\nimport java.util.ArrayList;\nimport java.util.List;\nimport java.util.Map;\nimport java.util.TreeMap;\n\npublic class GoTypeTable implements TypeTable {\n\n  private final TreeMap imports = new TreeMap<>();\n\n  @Override\n  public TypeTable cloneEmpty() {\n    return new GoTypeTable();\n  }\n\n  @Override\n  public TypeName getTypeName(String fullName) {\n    String[] parts = fullName.split(\";\", -1);\n    if (parts.length != 4) {\n      return new TypeName(fullName);\n    }\n    return new TypeName(fullName, parts[3] + parts[1] + \".\" + parts[2]);\n  }\n\n  @Override\n  public NamePath getNamePath(String fullName) {\n    return NamePath.dotted(fullName);\n  }\n\n  @Override\n  public TypeName getContainerTypeName(String containerFullName, String... elementFullNames) {\n    return getTypeName(containerFullName);\n  }\n\n  @Override\n  public String getAndSaveNicknameFor(String fullName) {\n    return getAndSaveNicknameFor(getTypeName(fullName));\n  }\n\n  @Override\n  public String getAndSaveNicknameFor(TypeName typeName) {\n    return typeName.getAndSaveNicknameIn(this);\n  }\n\n  @Override\n  public String getAndSaveNicknameFor(TypeAlias alias) {\n    String[] parts = alias.getFullName().split(\";\", -1);\n    if (parts.length == 4) {\n      imports.put(parts[0], parts[1]);\n    }\n    return alias.getNickname();\n  }\n\n  @Override\n  public Map getImports() {\n    return imports;\n  }\n\n  public static List formatImports(Map imports) {\n    List standard = new ArrayList<>(imports.size());\n    List thirdParty = new ArrayList<>(imports.size());\n\n    for (Map.Entry imp : imports.entrySet()) {\n      String importPath = imp.getKey();\n      String packageRename = imp.getValue();\n      List target = isStandardImport(importPath) ? standard : thirdParty;\n      if (packageRename.equals(\"\")) {\n        target.add(String.format(\"\\\"%s\\\"\", importPath));\n      } else {\n        target.add(String.format(\"%s \\\"%s\\\"\", packageRename, importPath));\n      }\n    }\n\n    List merge = new ArrayList<>(standard);\n    if (!standard.isEmpty() && !thirdParty.isEmpty()) {\n      merge.add(\"\");\n    }\n    merge.addAll(thirdParty);\n    return merge;\n  }\n\n  private static boolean isStandardImport(String importPath) {\n    \/\/ TODO(pongad): Some packages in standard library have slashes,\n    \/\/ we might have to special case them.\n    return !importPath.contains(\"\/\");\n  }\n}\n","lang_cluster":"Java","diff_tag":0,"review_comment":"","id":213}
{"diff_hunk":"@@ -57,15 +57,17 @@ public final class ASTMethodDeclaration extends AbstractMethodOrConstructorDecla\n \n     \/**\n      * Returns the simple name of the method.\n+     *\n+     * @deprecated Use {@link #getName()}\n      *\/\n+    @Deprecated\n     public String getMethodName() {\n-        return getFirstChildOfType(ASTMethodDeclarator.class).getImage();\n+        return getName();\n     }\n \n-\n     @Override\n     public String getName() {\n-        return getMethodName();\n+        return getImage();\n     }\n \n ","source_code":"\/**\n * BSD-style license; for more info see http:\/\/pmd.sourceforge.net\/license.html\n *\/\n\npackage net.sourceforge.pmd.lang.java.ast;\n\nimport net.sourceforge.pmd.annotation.InternalApi;\nimport net.sourceforge.pmd.lang.ast.Node;\nimport net.sourceforge.pmd.lang.dfa.DFAGraphMethod;\n\n\n\/**\n * A method declaration, in a class or interface declaration. This cannot\n * be found in {@linkplain ASTAnnotationTypeDeclaration annotation types},\n * which instead have {@linkplain ASTAnnotationMethodDeclaration annotation methods}.\n *\n * 
\n *\n * MethodDeclaration ::= MethodModifier*\n *                       {@link ASTTypeParameters TypeParameters}?\n *                       {@link ASTResultType ResultType}\n *                       {@link ASTMethodDeclarator MethodDeclarator}\n *                       (\"throws\" {@link ASTNameList NameList})?\n *                       ({@link ASTBlock Block} | \";\" )\n *\n *\n * MethodModifier ::= \"public\" | \"private\"  | \"protected\" | \"static\"\n *                  | \"final\"  | \"abstract\" | \"native\"\n *                  | {@linkplain ASTAnnotation Annotation}\n *\n * <\/pre>\n *\/\npublic final class ASTMethodDeclaration extends AbstractMethodOrConstructorDeclaration implements DFAGraphMethod {\n\n\n    @InternalApi\n    @Deprecated\n    public ASTMethodDeclaration(int id) {\n        super(id);\n    }\n\n    ASTMethodDeclaration(JavaParser p, int id) {\n        super(p, id);\n    }\n\n    @Override\n    public Object jjtAccept(JavaParserVisitor visitor, Object data) {\n        return visitor.visit(this, data);\n    }\n\n\n    @Override\n    public  void jjtAccept(SideEffectingVisitor visitor, T data) {\n        visitor.visit(this, data);\n    }\n\n\n    \/**\n     * Returns the simple name of the method.\n     *\/\n    public String getMethodName() {\n        return getFirstChildOfType(ASTMethodDeclarator.class).getImage();\n    }\n\n\n    @Override\n    public String getName() {\n        return getMethodName();\n    }\n\n\n    \/**\n     * Returns true if this method is explicitly modified by\n     * the {@code public} modifier.\n     *\/\n    public boolean isSyntacticallyPublic() {\n        return super.isPublic();\n    }\n\n\n    \/**\n     * Returns true if this method is explicitly modified by\n     * the {@code abstract} modifier.\n     *\/\n    public boolean isSyntacticallyAbstract() {\n        return super.isAbstract();\n    }\n\n\n    \/**\n     * Returns true if this method has public visibility.\n     * Non-private interface members are implicitly public,\n     * whether they declare the {@code public} modifier or\n     * not.\n     *\/\n    @Override\n    public boolean isPublic() {\n        \/\/ interface methods are public by default, but could be private since java9\n        return isInterfaceMember() && !isPrivate() || super.isPublic();\n    }\n\n\n    \/**\n     * Returns true if this method is abstract, so doesn't\n     * declare a body. Interface members are\n     * implicitly abstract, whether they declare the\n     * {@code abstract} modifier or not. Default interface\n     * methods are not abstract though, consistently with the\n     * standard reflection API.\n     *\/\n    @Override\n    public boolean isAbstract() {\n        return isInterfaceMember() && !isDefault() || super.isAbstract();\n    }\n\n\n    \/**\n     * Returns true if this method declaration is a member of an interface type.\n     *\/\n    public boolean isInterfaceMember() {\n        \/\/ for a real class\/interface the 3rd parent is a ClassOrInterfaceDeclaration,\n        \/\/ for anonymous classes, the parent is e.g. a AllocationExpression\n        Node potentialTypeDeclaration = getNthParent(3);\n\n        return potentialTypeDeclaration instanceof ASTClassOrInterfaceDeclaration\n            && ((ASTClassOrInterfaceDeclaration) potentialTypeDeclaration).isInterface();\n    }\n\n\n    \/**\n     * Returns true if the result type of this method is {@code void}.\n     *\/\n    public boolean isVoid() {\n        return getResultType().isVoid();\n    }\n\n\n    \/**\n     * Returns the result type node of the method.\n     *\/\n    public ASTResultType getResultType() {\n        return getFirstChildOfType(ASTResultType.class);\n    }\n\n\n    \/**\n     * Returns the block defined by this method, or\n     * null if the method is abstract.\n     *\/\n    public ASTBlock getBlock() {\n        return getFirstChildOfType(ASTBlock.class);\n    }\n\n\n    \/**\n     * Returns the exception names listed in the {@code throws} clause\n     * of this method declaration, or null if there are none.\n     *\/\n    public ASTNameList getThrows() {\n        return getFirstChildOfType(ASTNameList.class);\n    }\n\n\n    @Override\n    public MethodLikeKind getKind() {\n        return MethodLikeKind.METHOD;\n    }\n\n    public ASTTypeParameters getTypeParameters() {\n        return getFirstChildOfType(ASTTypeParameters.class);\n    }\n\n    \/\/@Override \/\/ enable this with PMD 7.0.0 - see interface ASTMethodOrConstructorDeclaration\n    public ASTFormalParameters getFormalParameters() {\n        return getFirstChildOfType(ASTMethodDeclarator.class).getFirstChildOfType(ASTFormalParameters.class);\n    }\n\n\n    \/**\n     * Returns the method declarator. Never null.\n     *\/\n    public ASTMethodDeclarator getMethodDeclarator() {\n        return getFirstChildOfType(ASTMethodDeclarator.class);\n    }\n}\n","lang_cluster":"Java","diff_tag":0,"review_comment":"","id":214}
{"diff_hunk":"@@ -194,7 +194,7 @@ public class ExecutorApiGateway {\n         JSONUtils.toJSON(executionIdsList));\n \n     return callWithExecutionId(executor.getHost(), executor.getPort(),\n-        ConnectorParams.UPDATE_ACTION, null, null, executionIds, updateTimes);\n+        ConnectorParams.UPDATE_ACTION, null, null, null, executionIds, updateTimes);\n   }\n \n }","source_code":"\/*\n * Copyright 2017 LinkedIn Corp.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n * use this file except in compliance with the License. You may obtain a copy of\n * the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n * License for the specific language governing permissions and limitations under\n * the License.\n *\/\n\npackage azkaban.executor;\n\nimport azkaban.Constants;\nimport azkaban.Constants.ConfigurationKeys;\nimport azkaban.DispatchMethod;\nimport azkaban.utils.JSONUtils;\nimport azkaban.utils.Pair;\nimport azkaban.utils.Props;\nimport com.google.common.annotations.VisibleForTesting;\nimport com.google.inject.Inject;\nimport java.io.IOException;\nimport java.net.URI;\nimport java.util.ArrayList;\nimport java.util.Arrays;\nimport java.util.List;\nimport java.util.Map;\nimport java.util.Optional;\nimport java.util.function.BiFunction;\nimport javax.inject.Singleton;\nimport org.codehaus.jackson.map.ObjectMapper;\nimport org.slf4j.Logger;\nimport org.slf4j.LoggerFactory;\n\n@Singleton\npublic class ExecutorApiGateway {\n  private final static Logger logger = LoggerFactory.getLogger(ExecutorApiGateway.class);\n  public final static String DEFAULT_EXECUTION_RESOURCE = \"executor\";\n  public final static String CONTAINERIZED_EXECUTION_RESOURCE = \"container\";\n\n  \/\/ Default procedure for modifying a resource path that a reverse proxy, such as an\n  \/\/ ingress-controller, can use to route the request to correct endpoint.\n  \/\/   - This is a first-class function to make it easier to switch to a different mechanism of\n  \/\/     creating the path, depending on how the reverse-proxy is configured.\n  \/\/   - In future this implementation could be guice-injected (possibly based on a config property)\n  \/\/   - This implementation simply prefixes resource name with the execution-id and assumes that\n  \/\/     that a reverse proxy can route the request correctly based on this prefix.\n  private final static BiFunction executionResourceNameModifier =\n      ((e,r) -> String.join(\"\/\",  e.toString(), r));\n\n  private final static Executor defaultEmptyExecutor = new Executor(-1, \"\", 1, false);\n  private final ExecutorApiClient apiClient;\n  private final String executionResourceName;\n  private final boolean isReverseProxyEnabled;\n\n  @Inject\n  public ExecutorApiGateway(final ExecutorApiClient apiClient, Props azkProps) {\n    this.apiClient = apiClient;\n    isReverseProxyEnabled =\n        azkProps.getBoolean(ConfigurationKeys.AZKABAN_EXECUTOR_REVERSE_PROXY_ENABLED,\n            false);\n    String executionResourceName = DEFAULT_EXECUTION_RESOURCE;\n    if (DispatchMethod.getDispatchMethod(azkProps\n        .getString(Constants.ConfigurationKeys.AZKABAN_EXECUTION_DISPATCH_METHOD,\n            DispatchMethod.PUSH.name())) == DispatchMethod.CONTAINERIZED) {\n      executionResourceName = CONTAINERIZED_EXECUTION_RESOURCE;\n    }\n    this.executionResourceName = executionResourceName;\n  }\n\n  Map callWithExecutable(final ExecutableFlow exflow,\n      final Executor executor, final String action) throws ExecutorManagerException {\n    return callWithExecutionId(executor.getHost(), executor.getPort(), action,\n        exflow.getExecutionId(), null, (Pair[]) null);\n  }\n\n  Map callWithReference(final ExecutionReference ref, final String action,\n      final Pair... params) throws ExecutorManagerException {\n    final Executor executor = (isReverseProxyEnabled ? defaultEmptyExecutor : ref.getExecutor().get());\n    return callWithExecutionId(executor.getHost(), executor.getPort(), action, ref.getExecId(),\n        null, params);\n  }\n\n  public Map callWithReferenceByUser(final ExecutionReference ref,\n      final String action, final String user, final Pair... params)\n      throws ExecutorManagerException {\n    final Executor executor = (isReverseProxyEnabled ? defaultEmptyExecutor : ref.getExecutor().get());\n    return callWithExecutionId(executor.getHost(), executor.getPort(), action,\n        ref.getExecId(), user, params);\n  }\n\n  @VisibleForTesting\n  String createExecutionPath(final Optional executionId) throws ExecutorManagerException {\n    if (!isReverseProxyEnabled) {\n      return \"\/\" + executionResourceName;\n    }\n\n    if(!executionId.isPresent()) {\n      final String errorMessage = \"Execution Id must be provided when reverse-proxy is enabled\";\n      logger.error(errorMessage);\n      throw new ExecutorManagerException(errorMessage);\n    }\n    return \"\/\" + executionResourceNameModifier.apply(executionId.get(), executionResourceName);\n  }\n\n  Map callWithExecutionId(final String host, final int port,\n      final String action, final Integer executionId, final String user,\n      final Pair... params) throws ExecutorManagerException {\n    try {\n      final List> paramList = new ArrayList<>();\n\n      if (params != null) {\n        paramList.addAll(Arrays.asList(params));\n      }\n\n      paramList\n          .add(new Pair<>(ConnectorParams.ACTION_PARAM, action));\n      paramList.add(new Pair<>(ConnectorParams.EXECID_PARAM, String\n          .valueOf(executionId)));\n      paramList.add(new Pair<>(ConnectorParams.USER_PARAM, user));\n\n      \/\/ Ideally we should throw an exception if executionId is null but some existing code\n      \/\/ (updateExecutions()) expects to call this method with a null executionId.\n      String executionPath = createExecutionPath(Optional.ofNullable(executionId));\n      return callForJsonObjectMap(host, port, executionPath, paramList);\n    } catch (final IOException e) {\n      throw new ExecutorManagerException(e.getMessage(), e);\n    }\n  }\n\n  \/**\n   * Call executor and parse the JSON response as an instance of the class given as an argument.\n   *\/\n   T callForJsonType(final String host, final int port, final String path,\n      final List> paramList, final Class valueType) throws IOException {\n    final String responseString = callForJsonString(host, port, path, paramList);\n    if (null == responseString || responseString.length() == 0) {\n      return null;\n    }\n    return new ObjectMapper().readValue(responseString, valueType);\n  }\n\n  \/*\n   * Call executor and return json object map.\n   *\/\n  Map callForJsonObjectMap(final String host, final int port,\n      final String path, final List> paramList) throws IOException {\n    final String responseString =\n        callForJsonString(host, port, path, paramList);\n\n    @SuppressWarnings(\"unchecked\") final Map jsonResponse =\n        (Map) JSONUtils.parseJSONFromString(responseString);\n    final String error = (String) jsonResponse.get(ConnectorParams.RESPONSE_ERROR);\n    if (error != null) {\n      throw new IOException(error);\n    }\n    return jsonResponse;\n  }\n\n  \/*\n   * Call executor and return raw json string.\n   *\/\n  private String callForJsonString(final String host, final int port, final String path,\n      List> paramList) throws IOException {\n    if (paramList == null) {\n      paramList = new ArrayList<>();\n    }\n\n    @SuppressWarnings(\"unchecked\") final URI uri =\n        apiClient.buildExecutorUri(host, port, path, true);\n\n    return this.apiClient.httpPost(uri, paramList);\n  }\n\n  public Map updateExecutions(final Executor executor,\n      final List executions) throws ExecutorManagerException {\n    final List updateTimesList = new ArrayList<>();\n    final List executionIdsList = new ArrayList<>();\n    \/\/ We pack the parameters of the same host together before query\n    for (final ExecutableFlow flow : executions) {\n      executionIdsList.add(flow.getExecutionId());\n      updateTimesList.add(flow.getUpdateTime());\n    }\n    final Pair updateTimes = new Pair<>(\n        ConnectorParams.UPDATE_TIME_LIST_PARAM,\n        JSONUtils.toJSON(updateTimesList));\n    final Pair executionIds = new Pair<>(\n        ConnectorParams.EXEC_ID_LIST_PARAM,\n        JSONUtils.toJSON(executionIdsList));\n\n    return callWithExecutionId(executor.getHost(), executor.getPort(),\n        ConnectorParams.UPDATE_ACTION, null, null, executionIds, updateTimes);\n  }\n\n}\n","lang_cluster":"Java","diff_tag":0,"review_comment":"","id":215}
{"diff_hunk":"@@ -27,6 +27,7 @@ import com.github.javaparser.ast.NodeList;\n import com.github.javaparser.ast.expr.AnnotationExpr;\n import com.github.javaparser.ast.expr.SimpleName;\n import com.github.javaparser.ast.nodeTypes.modifiers.NodeWithAbstractModifier;\n+import com.github.javaparser.ast.type.Type;\n import com.github.javaparser.ast.visitor.CloneVisitor;\n import com.github.javaparser.ast.visitor.GenericVisitor;\n import com.github.javaparser.ast.visitor.VoidVisitor;","source_code":"\/*\n * Copyright (C) 2007-2010 J\u00falio Vilmar Gesser.\n * Copyright (C) 2011, 2013-2016 The JavaParser Team.\n *\n * This file is part of JavaParser.\n *\n * JavaParser can be used either under the terms of\n * a) the GNU Lesser General Public License as published by\n *     the Free Software Foundation, either version 3 of the License, or\n *     (at your option) any later version.\n * b) the terms of the Apache License\n *\n * You should have received a copy of both licenses in LICENCE.LGPL and\n * LICENCE.APACHE. Please refer to those files for details.\n *\n * JavaParser is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n * GNU Lesser General Public License for more details.\n *\/\npackage com.github.javaparser.ast.body;\n\nimport com.github.javaparser.ast.AllFieldsConstructor;\nimport com.github.javaparser.ast.Modifier;\nimport com.github.javaparser.ast.Node;\nimport com.github.javaparser.ast.NodeList;\nimport com.github.javaparser.ast.expr.AnnotationExpr;\nimport com.github.javaparser.ast.expr.SimpleName;\nimport com.github.javaparser.ast.nodeTypes.modifiers.NodeWithAbstractModifier;\nimport com.github.javaparser.ast.visitor.CloneVisitor;\nimport com.github.javaparser.ast.visitor.GenericVisitor;\nimport com.github.javaparser.ast.visitor.VoidVisitor;\nimport com.github.javaparser.metamodel.AnnotationDeclarationMetaModel;\nimport com.github.javaparser.metamodel.JavaParserMetaModel;\nimport com.github.javaparser.TokenRange;\nimport com.github.javaparser.resolution.Resolvable;\nimport com.github.javaparser.resolution.declarations.ResolvedAnnotationDeclaration;\nimport java.util.function.Consumer;\nimport java.util.Optional;\nimport com.github.javaparser.ast.Generated;\n\n\/**\n * An annotation type declaration.@interface X { ... }<\/code>\n *\n * @author Julio Vilmar Gesser\n *\/\npublic class AnnotationDeclaration extends TypeDeclaration implements NodeWithAbstractModifier, Resolvable {\n\n    public AnnotationDeclaration() {\n        this(null, new NodeList<>(), new NodeList<>(), new SimpleName(), new NodeList<>());\n    }\n\n    public AnnotationDeclaration(NodeList modifiers, String name) {\n        this(null, modifiers, new NodeList<>(), new SimpleName(name), new NodeList<>());\n    }\n\n    @AllFieldsConstructor\n    public AnnotationDeclaration(NodeList modifiers, NodeList annotations, SimpleName name, NodeList> members) {\n        this(null, modifiers, annotations, name, members);\n    }\n\n    \/**\n     * This constructor is used by the parser and is considered private.\n     *\/\n    @Generated(\"com.github.javaparser.generator.core.node.MainConstructorGenerator\")\n    public AnnotationDeclaration(TokenRange tokenRange, NodeList modifiers, NodeList annotations, SimpleName name, NodeList> members) {\n        super(tokenRange, modifiers, annotations, name, members);\n        customInitialization();\n    }\n\n    @Override\n    @Generated(\"com.github.javaparser.generator.core.node.AcceptGenerator\")\n    public  R accept(final GenericVisitor v, final A arg) {\n        return v.visit(this, arg);\n    }\n\n    @Override\n    @Generated(\"com.github.javaparser.generator.core.node.AcceptGenerator\")\n    public  void accept(final VoidVisitor v, final A arg) {\n        v.visit(this, arg);\n    }\n\n    @Override\n    @Generated(\"com.github.javaparser.generator.core.node.RemoveMethodGenerator\")\n    public boolean remove(Node node) {\n        if (node == null)\n            return false;\n        return super.remove(node);\n    }\n\n    @Override\n    @Generated(\"com.github.javaparser.generator.core.node.CloneGenerator\")\n    public AnnotationDeclaration clone() {\n        return (AnnotationDeclaration) accept(new CloneVisitor(), null);\n    }\n\n    @Override\n    @Generated(\"com.github.javaparser.generator.core.node.GetMetaModelGenerator\")\n    public AnnotationDeclarationMetaModel getMetaModel() {\n        return JavaParserMetaModel.annotationDeclarationMetaModel;\n    }\n\n    @Override\n    @Generated(\"com.github.javaparser.generator.core.node.ReplaceMethodGenerator\")\n    public boolean replace(Node node, Node replacementNode) {\n        if (node == null)\n            return false;\n        return super.replace(node, replacementNode);\n    }\n\n    @Override\n    @Generated(\"com.github.javaparser.generator.core.node.TypeCastingGenerator\")\n    public boolean isAnnotationDeclaration() {\n        return true;\n    }\n\n    @Override\n    @Generated(\"com.github.javaparser.generator.core.node.TypeCastingGenerator\")\n    public AnnotationDeclaration asAnnotationDeclaration() {\n        return this;\n    }\n\n    @Generated(\"com.github.javaparser.generator.core.node.TypeCastingGenerator\")\n    public void ifAnnotationDeclaration(Consumer action) {\n        action.accept(this);\n    }\n\n    @Override\n    public ResolvedAnnotationDeclaration resolve() {\n        return getSymbolResolver().resolveDeclaration(this, ResolvedAnnotationDeclaration.class);\n    }\n\n    @Override\n    @Generated(\"com.github.javaparser.generator.core.node.TypeCastingGenerator\")\n    public Optional toAnnotationDeclaration() {\n        return Optional.of(this);\n    }\n}\n","lang_cluster":"Java","diff_tag":0,"review_comment":"","id":216}
{"diff_hunk":"@@ -111,8 +111,13 @@ public class Stats {\n     }\n     blockCountByPrefixLen[frame.prefix]++;\n     startBlockCount++;\n-    totalBlockSuffixBytes += frame.suffixesReader.length();\n+    totalBlockSuffixBytes += frame.totalSuffixBytes;\n+    totalUncompressedBlockSuffixBytes += frame.suffixesReader.length();\n+    if (frame.suffixesReader != frame.suffixLengthsReader) {\n+      totalUncompressedBlockSuffixBytes += frame.suffixLengthsReader.length();\n+    }\n     totalBlockStatsBytes += frame.statsReader.length();\n+    compressionAlgorithms[frame.compressionAlg.code]++;\n   }\n \n   void endBlock(SegmentTermsEnumFrame frame) {","source_code":"\/*\n * Licensed to the Apache Software Foundation (ASF) under one or more\n * contributor license agreements.  See the NOTICE file distributed with\n * this work for additional information regarding copyright ownership.\n * The ASF licenses this file to You under the Apache License, Version 2.0\n * (the \"License\"); you may not use this file except in compliance with\n * the License.  You may obtain a copy of the License at\n *\n *     http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\npackage org.apache.lucene.codecs.blocktree;\n\n\nimport java.io.ByteArrayOutputStream;\nimport java.io.PrintStream;\nimport java.io.UnsupportedEncodingException;\nimport java.util.Locale;\n\nimport org.apache.lucene.codecs.PostingsReaderBase;\nimport org.apache.lucene.util.ArrayUtil;\nimport org.apache.lucene.util.BytesRef;\nimport org.apache.lucene.util.IOUtils;\n\n\/**\n * BlockTree statistics for a single field \n * returned by {@link FieldReader#getStats()}.\n * @lucene.internal\n *\/\npublic class Stats {\n  \/** Byte size of the index. *\/\n  public long indexNumBytes;\n\n  \/** Total number of terms in the field. *\/\n  public long totalTermCount;\n\n  \/** Total number of bytes (sum of term lengths) across all terms in the field. *\/\n  public long totalTermBytes;\n\n  \/** The number of normal (non-floor) blocks in the terms file. *\/\n  public int nonFloorBlockCount;\n\n  \/** The number of floor blocks (meta-blocks larger than the\n   *  allowed {@code maxItemsPerBlock}) in the terms file. *\/\n  public int floorBlockCount;\n    \n  \/** The number of sub-blocks within the floor blocks. *\/\n  public int floorSubBlockCount;\n\n  \/** The number of \"internal\" blocks (that have both\n   *  terms and sub-blocks). *\/\n  public int mixedBlockCount;\n\n  \/** The number of \"leaf\" blocks (blocks that have only\n   *  terms). *\/\n  public int termsOnlyBlockCount;\n\n  \/** The number of \"internal\" blocks that do not contain\n   *  terms (have only sub-blocks). *\/\n  public int subBlocksOnlyBlockCount;\n\n  \/** Total number of blocks. *\/\n  public int totalBlockCount;\n\n  \/** Number of blocks at each prefix depth. *\/\n  public int[] blockCountByPrefixLen = new int[10];\n  private int startBlockCount;\n  private int endBlockCount;\n\n  \/** Total number of bytes used to store term suffixes. *\/\n  public long totalBlockSuffixBytes;\n\n  \/** Total number of bytes used to store term stats (not\n   *  including what the {@link PostingsReaderBase}\n   *  stores. *\/\n  public long totalBlockStatsBytes;\n\n  \/** Total bytes stored by the {@link PostingsReaderBase},\n   *  plus the other few vInts stored in the frame. *\/\n  public long totalBlockOtherBytes;\n\n  \/** Segment name. *\/\n  public final String segment;\n\n  \/** Field name. *\/\n  public final String field;\n\n  Stats(String segment, String field) {\n    this.segment = segment;\n    this.field = field;\n  }\n\n  void startBlock(SegmentTermsEnumFrame frame, boolean isFloor) {\n    totalBlockCount++;\n    if (isFloor) {\n      if (frame.fp == frame.fpOrig) {\n        floorBlockCount++;\n      }\n      floorSubBlockCount++;\n    } else {\n      nonFloorBlockCount++;\n    }\n\n    if (blockCountByPrefixLen.length <= frame.prefix) {\n      blockCountByPrefixLen = ArrayUtil.grow(blockCountByPrefixLen, 1+frame.prefix);\n    }\n    blockCountByPrefixLen[frame.prefix]++;\n    startBlockCount++;\n    totalBlockSuffixBytes += frame.suffixesReader.length();\n    totalBlockStatsBytes += frame.statsReader.length();\n  }\n\n  void endBlock(SegmentTermsEnumFrame frame) {\n    final int termCount = frame.isLeafBlock ? frame.entCount : frame.state.termBlockOrd;\n    final int subBlockCount = frame.entCount - termCount;\n    totalTermCount += termCount;\n    if (termCount != 0 && subBlockCount != 0) {\n      mixedBlockCount++;\n    } else if (termCount != 0) {\n      termsOnlyBlockCount++;\n    } else if (subBlockCount != 0) {\n      subBlocksOnlyBlockCount++;\n    } else {\n      throw new IllegalStateException();\n    }\n    endBlockCount++;\n    final long otherBytes = frame.fpEnd - frame.fp - frame.suffixesReader.length() - frame.statsReader.length();\n    assert otherBytes > 0 : \"otherBytes=\" + otherBytes + \" frame.fp=\" + frame.fp + \" frame.fpEnd=\" + frame.fpEnd;\n    totalBlockOtherBytes += otherBytes;\n  }\n\n  void term(BytesRef term) {\n    totalTermBytes += term.length;\n  }\n\n  void finish() {\n    assert startBlockCount == endBlockCount: \"startBlockCount=\" + startBlockCount + \" endBlockCount=\" + endBlockCount;\n    assert totalBlockCount == floorSubBlockCount + nonFloorBlockCount: \"floorSubBlockCount=\" + floorSubBlockCount + \" nonFloorBlockCount=\" + nonFloorBlockCount + \" totalBlockCount=\" + totalBlockCount;\n    assert totalBlockCount == mixedBlockCount + termsOnlyBlockCount + subBlocksOnlyBlockCount: \"totalBlockCount=\" + totalBlockCount + \" mixedBlockCount=\" + mixedBlockCount + \" subBlocksOnlyBlockCount=\" + subBlocksOnlyBlockCount + \" termsOnlyBlockCount=\" + termsOnlyBlockCount;\n  }\n\n  @Override\n  public String toString() {\n    final ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);\n    PrintStream out;\n    try {\n      out = new PrintStream(bos, false, IOUtils.UTF_8);\n    } catch (UnsupportedEncodingException bogus) {\n      throw new RuntimeException(bogus);\n    }\n      \n    out.println(\"  index FST:\");\n    out.println(\"    \" + indexNumBytes + \" bytes\");\n    out.println(\"  terms:\");\n    out.println(\"    \" + totalTermCount + \" terms\");\n    out.println(\"    \" + totalTermBytes + \" bytes\" + (totalTermCount != 0 ? \" (\" + String.format(Locale.ROOT, \"%.1f\", ((double) totalTermBytes)\/totalTermCount) + \" bytes\/term)\" : \"\"));\n    out.println(\"  blocks:\");\n    out.println(\"    \" + totalBlockCount + \" blocks\");\n    out.println(\"    \" + termsOnlyBlockCount + \" terms-only blocks\");\n    out.println(\"    \" + subBlocksOnlyBlockCount + \" sub-block-only blocks\");\n    out.println(\"    \" + mixedBlockCount + \" mixed blocks\");\n    out.println(\"    \" + floorBlockCount + \" floor blocks\");\n    out.println(\"    \" + (totalBlockCount-floorSubBlockCount) + \" non-floor blocks\");\n    out.println(\"    \" + floorSubBlockCount + \" floor sub-blocks\");\n    out.println(\"    \" + totalBlockSuffixBytes + \" term suffix bytes\" + (totalBlockCount != 0 ? \" (\" + String.format(Locale.ROOT, \"%.1f\", ((double) totalBlockSuffixBytes)\/totalBlockCount) + \" suffix-bytes\/block)\" : \"\"));\n    out.println(\"    \" + totalBlockStatsBytes + \" term stats bytes\" + (totalBlockCount != 0 ? \" (\" + String.format(Locale.ROOT, \"%.1f\", ((double) totalBlockStatsBytes)\/totalBlockCount) + \" stats-bytes\/block)\" : \"\"));\n    out.println(\"    \" + totalBlockOtherBytes + \" other bytes\" + (totalBlockCount != 0 ? \" (\" + String.format(Locale.ROOT, \"%.1f\", ((double) totalBlockOtherBytes)\/totalBlockCount) + \" other-bytes\/block)\" : \"\"));\n    if (totalBlockCount != 0) {\n      out.println(\"    by prefix length:\");\n      int total = 0;\n      for(int prefix=0;prefix doesExtend(it, klass));\n    }\n\n    private boolean doesExtend(ASTAnyTypeDeclaration sub, ASTClassOrInterfaceDeclaration superClass) {\n        return sub != superClass && TypeTestUtil.isA(superClass.getTypeMirror(), sub);\n    }\n\n    private boolean hasOnlyPrivateCtors(ASTClassOrInterfaceDeclaration node) {\n        return node.getDeclarations(ASTConstructorDeclaration.class).all(it -> it.getVisibility() == V_PRIVATE)\n            && (node.getVisibility() == V_PRIVATE \/\/ then the default ctor is private\n            || node.getDeclarations(ASTConstructorDeclaration.class).nonEmpty());\n    }\n\n}\n","lang_cluster":"Java","diff_tag":0,"review_comment":"","id":218}
{"diff_hunk":"@@ -42,22 +42,32 @@ class BoundZmqEventBus implements EventBus {\n   private final ZMQ.Socket xsub;\n   private final ExecutorService executor;\n \n+\n   BoundZmqEventBus(ZContext context, String publishConnection, String subscribeConnection) {\n     String address = new NetworkUtils().getHostAddress();\n     Addresses xpubAddr = deriveAddresses(address, publishConnection);\n     Addresses xsubAddr = deriveAddresses(address, subscribeConnection);\n \n+    Curve curve = new Curve();\n+    String[] serverKeys = curve.keypairZ85();\n+    String[] clientKeys = curve.keypairZ85();\n+\n     LOG.info(String.format(\"XPUB binding to %s, XSUB binding to %s\", xpubAddr, xsubAddr));\n \n     xpub = context.createSocket(SocketType.XPUB);\n     xpub.setIPv6(xpubAddr.isIPv6);\n     xpub.setImmediate(true);\n     xpub.bind(xpubAddr.bindTo);\n+    xpub.setCurvePublicKey(serverKeys[0].getBytes());\n+    xpub.setCurveSecretKey(serverKeys[1].getBytes());\n \n     xsub = context.createSocket(SocketType.XSUB);\n     xsub.setIPv6(xsubAddr.isIPv6);\n     xsub.setImmediate(true);\n     xsub.bind(xsubAddr.bindTo);\n+    xsub.setCurvePublicKey(clientKeys[0].getBytes());\n+    xsub.setCurveSecretKey(clientKeys[1].getBytes());\n+    xsub.setCurveServerKey(serverKeys[0].getBytes());\n \n     executor = Executors.newCachedThreadPool(r -> {\n       Thread thread = new Thread(r, \"Message Bus Proxy\");","source_code":"\/\/ Licensed to the Software Freedom Conservancy (SFC) under one\n\/\/ or more contributor license agreements.  See the NOTICE file\n\/\/ distributed with this work for additional information\n\/\/ regarding copyright ownership.  The SFC licenses this file\n\/\/ to you under the Apache License, Version 2.0 (the\n\/\/ \"License\"); you may not use this file except in compliance\n\/\/ with the License.  You may obtain a copy of the License at\n\/\/\n\/\/   http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing,\n\/\/ software distributed under the License is distributed on an\n\/\/ \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n\/\/ KIND, either express or implied.  See the License for the\n\/\/ specific language governing permissions and limitations\n\/\/ under the License.\n\npackage org.openqa.selenium.events.zeromq;\n\nimport org.openqa.selenium.events.Event;\nimport org.openqa.selenium.events.EventBus;\nimport org.openqa.selenium.events.Type;\nimport org.openqa.selenium.net.NetworkUtils;\nimport org.zeromq.SocketType;\nimport org.zeromq.ZContext;\nimport org.zeromq.ZMQ;\n\nimport java.net.Inet6Address;\nimport java.net.InetAddress;\nimport java.net.UnknownHostException;\nimport java.util.concurrent.ExecutorService;\nimport java.util.concurrent.Executors;\nimport java.util.function.Consumer;\nimport java.util.logging.Level;\nimport java.util.logging.Logger;\n\nclass BoundZmqEventBus implements EventBus {\n\n  private static final Logger LOG = Logger.getLogger(EventBus.class.getName());\n  private final UnboundZmqEventBus delegate;\n  private final ZMQ.Socket xpub;\n  private final ZMQ.Socket xsub;\n  private final ExecutorService executor;\n\n  BoundZmqEventBus(ZContext context, String publishConnection, String subscribeConnection) {\n    String address = new NetworkUtils().getHostAddress();\n    Addresses xpubAddr = deriveAddresses(address, publishConnection);\n    Addresses xsubAddr = deriveAddresses(address, subscribeConnection);\n\n    LOG.info(String.format(\"XPUB binding to %s, XSUB binding to %s\", xpubAddr, xsubAddr));\n\n    xpub = context.createSocket(SocketType.XPUB);\n    xpub.setIPv6(xpubAddr.isIPv6);\n    xpub.setImmediate(true);\n    xpub.bind(xpubAddr.bindTo);\n\n    xsub = context.createSocket(SocketType.XSUB);\n    xsub.setIPv6(xsubAddr.isIPv6);\n    xsub.setImmediate(true);\n    xsub.bind(xsubAddr.bindTo);\n\n    executor = Executors.newCachedThreadPool(r -> {\n      Thread thread = new Thread(r, \"Message Bus Proxy\");\n      thread.setDaemon(true);\n      return thread;\n    });\n    executor.submit(() -> ZMQ.proxy(xsub, xpub, null));\n\n    delegate = new UnboundZmqEventBus(context, xpubAddr.advertise, xsubAddr.advertise);\n\n    LOG.info(\"Event bus ready\");\n  }\n\n  @Override\n  public boolean isReady() {\n    return !executor.isShutdown();\n  }\n\n  @Override\n  public void addListener(Type type, Consumer onType) {\n    delegate.addListener(type, onType);\n  }\n\n  @Override\n  public void fire(Event event) {\n    delegate.fire(event);\n  }\n\n  @Override\n  public void close() {\n    delegate.close();\n    executor.shutdown();\n    xsub.close();\n    xpub.close();\n  }\n\n  private Addresses deriveAddresses(String host, String connection) {\n    if (connection.startsWith(\"inproc:\")) {\n      return new Addresses(connection, connection, false);\n    }\n\n    if (!connection.startsWith(\"tcp:\/\/\")) {\n      throw new IllegalArgumentException(\"Connection string must begin with inproc:\/\/ or tcp:\/\/\");\n    }\n\n    int length = \"tcp:\/\/\".length();\n    int colon = connection.indexOf(\":\", length);\n    if (colon == -1) {\n      throw new IllegalArgumentException(\"Unable to determine hostname from \" + connection);\n    }\n    String hostName = connection.substring(length, colon);\n\n    int port = Integer.parseInt(connection.substring(colon + 1));\n\n    if (!\"*\".equals(hostName)) {\n      host = hostName;\n    }\n\n    boolean isAddressIPv6 = false;\n    try {\n      if (InetAddress.getByName(host) instanceof Inet6Address ) {\n        isAddressIPv6 = true;\n        if (!host.startsWith(\"[\")) {\n          host = String.format(\"[%s]\", host);\n        }\n      }\n    } catch (UnknownHostException e) {\n      LOG.log(Level.WARNING, \"Could not determine if host address is IPv6 or IPv4\", e);\n    }\n\n    return new Addresses(\n        connection,\n        String.format(\"tcp:\/\/%s:%d\", host, port),\n        isAddressIPv6\n    );\n  }\n\n  private static class Addresses {\n    Addresses(String bindTo, String advertise, boolean isIPv6) {\n      this.bindTo = bindTo;\n      this.advertise = advertise;\n      this.isIPv6 = isIPv6;\n    }\n\n    String bindTo;\n    String advertise;\n    boolean isIPv6;\n\n    @Override\n    public String toString() {\n      return String.format(\"[binding to %s, advertising as %s]\", bindTo, advertise);\n    }\n  }\n}\n","lang_cluster":"Java","diff_tag":0,"review_comment":"","id":219}
{"diff_hunk":"@@ -102,4 +102,19 @@ public class Invoker implements InvocationHandler {\n \n     throw ExceptionFactory.convertConsumerException(response.getResult());\n   }\n+\n+  protected CompletableFuture completableFutureInvoke(Invocation invocation,\n+      SwaggerConsumerOperation consumerOperation) {\n+    CompletableFuture future = new CompletableFuture<>();\n+    InvokerUtils.reactiveInvoke(invocation, response -> {\n+      if (response.isSuccessed()) {\n+        Object result = consumerOperation.getResponseMapper().mapResponse(response);\n+        future.complete(result);\n+        return;\n+      }\n+\n+      future.completeExceptionally(response.getResult());\n+    });\n+    return future;\n+  }\n }","source_code":"\/*\n * Licensed to the Apache Software Foundation (ASF) under one or more\n * contributor license agreements.  See the NOTICE file distributed with\n * this work for additional information regarding copyright ownership.\n * The ASF licenses this file to You under the Apache License, Version 2.0\n * (the \"License\"); you may not use this file except in compliance with\n * the License.  You may obtain a copy of the License at\n *\n *     http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage io.servicecomb.provider.pojo;\n\nimport java.lang.reflect.InvocationHandler;\nimport java.lang.reflect.Method;\nimport java.lang.reflect.Proxy;\n\nimport org.springframework.util.StringUtils;\n\nimport io.servicecomb.core.CseContext;\nimport io.servicecomb.core.Invocation;\nimport io.servicecomb.core.definition.MicroserviceMeta;\nimport io.servicecomb.core.definition.SchemaMeta;\nimport io.servicecomb.core.invocation.InvocationFactory;\nimport io.servicecomb.core.provider.consumer.InvokerUtils;\nimport io.servicecomb.core.provider.consumer.ReferenceConfig;\nimport io.servicecomb.core.provider.consumer.ReferenceConfigUtils;\nimport io.servicecomb.swagger.engine.SwaggerConsumer;\nimport io.servicecomb.swagger.engine.SwaggerConsumerOperation;\nimport io.servicecomb.swagger.invocation.Response;\nimport io.servicecomb.swagger.invocation.exception.ExceptionFactory;\n\npublic class Invoker implements InvocationHandler {\n  \/\/ \u539f\u59cb\u6570\u636e\n  private String microserviceName;\n\n  private String schemaId;\n\n  private Class consumerIntf;\n\n  \/\/ \u751f\u6210\u7684\u6570\u636e\n  private SchemaMeta schemaMeta;\n\n  private ReferenceConfig referenceConfig;\n\n  private SwaggerConsumer swaggerConsumer;\n\n  @SuppressWarnings(\"unchecked\")\n  public static  T createProxy(String microserviceName, String schemaId, Class consumerIntf) {\n    Invoker invoker = new Invoker(microserviceName, schemaId, consumerIntf);\n    return (T) Proxy.newProxyInstance(consumerIntf.getClassLoader(), new Class[] {consumerIntf}, invoker);\n  }\n\n  public Invoker(String microserviceName, String schemaId, Class consumerIntf) {\n    this.microserviceName = microserviceName;\n    this.schemaId = schemaId;\n    this.consumerIntf = consumerIntf;\n  }\n\n  protected void prepare() {\n    referenceConfig = ReferenceConfigUtils.getForInvoke(microserviceName);\n    MicroserviceMeta microserviceMeta = referenceConfig.getMicroserviceMeta();\n\n    if (StringUtils.isEmpty(schemaId)) {\n      \/\/ \u672a\u6307\u5b9aschemaId\uff0c\u770b\u770bconsumer\u63a5\u53e3\u662f\u5426\u7b49\u4e8e\u5951\u7ea6\u63a5\u53e3\n      schemaMeta = microserviceMeta.findSchemaMeta(consumerIntf);\n      if (schemaMeta == null) {\n        \/\/ \u5c1d\u8bd5\u7528consumer\u63a5\u53e3\u540d\u4f5c\u4e3aschemaId\n        schemaId = consumerIntf.getName();\n        schemaMeta = microserviceMeta.ensureFindSchemaMeta(schemaId);\n      }\n    } else {\n      schemaMeta = microserviceMeta.ensureFindSchemaMeta(schemaId);\n    }\n\n    this.swaggerConsumer = CseContext.getInstance().getSwaggerEnvironment().createConsumer(consumerIntf,\n        schemaMeta.getSwaggerIntf());\n  }\n\n  @Override\n  public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {\n    if (swaggerConsumer == null) {\n      prepare();\n    }\n\n    Invocation invocation =\n        InvocationFactory.forConsumer(referenceConfig, schemaMeta, method.getName(), null);\n\n    SwaggerConsumerOperation consumerOperation = swaggerConsumer.findOperation(method.getName());\n    consumerOperation.getArgumentsMapper().toInvocation(args, invocation);\n\n    Response response = InvokerUtils.innerSyncInvoke(invocation);\n    if (response.isSuccessed()) {\n      return consumerOperation.getResponseMapper().mapResponse(response);\n    }\n\n    throw ExceptionFactory.convertConsumerException(response.getResult());\n  }\n}\n","lang_cluster":"Java","diff_tag":0,"review_comment":"","id":220}
{"diff_hunk":"@@ -7,9 +7,12 @@ package net.sourceforge.pmd.lang.ecmascript.rule;\n import net.sourceforge.pmd.Rule;\n import net.sourceforge.pmd.RuleContext;\n import net.sourceforge.pmd.RuleViolation;\n+import net.sourceforge.pmd.autofix.AutoFixableRuleViolation;\n+import net.sourceforge.pmd.autofix.RuleViolationFix;\n import net.sourceforge.pmd.lang.ast.Node;\n import net.sourceforge.pmd.lang.ecmascript.ast.EcmascriptNode;\n import net.sourceforge.pmd.lang.rule.AbstractRuleViolationFactory;\n+import net.sourceforge.pmd.lang.rule.AutoFixableParametricRuleViolation;\n import net.sourceforge.pmd.lang.rule.ParametricRuleViolation;\n \n public final class EcmascriptRuleViolationFactory extends AbstractRuleViolationFactory {","source_code":"\/**\n * BSD-style license; for more info see http:\/\/pmd.sourceforge.net\/license.html\n *\/\n\npackage net.sourceforge.pmd.lang.ecmascript.rule;\n\nimport net.sourceforge.pmd.Rule;\nimport net.sourceforge.pmd.RuleContext;\nimport net.sourceforge.pmd.RuleViolation;\nimport net.sourceforge.pmd.lang.ast.Node;\nimport net.sourceforge.pmd.lang.ecmascript.ast.EcmascriptNode;\nimport net.sourceforge.pmd.lang.rule.AbstractRuleViolationFactory;\nimport net.sourceforge.pmd.lang.rule.ParametricRuleViolation;\n\npublic final class EcmascriptRuleViolationFactory extends AbstractRuleViolationFactory {\n\n    public static final EcmascriptRuleViolationFactory INSTANCE = new EcmascriptRuleViolationFactory();\n\n    private EcmascriptRuleViolationFactory() {\n    }\n\n    @SuppressWarnings(\"rawtypes\")\n    @Override\n    protected RuleViolation createRuleViolation(Rule rule, RuleContext ruleContext, Node node, String message) {\n        return new ParametricRuleViolation<>(rule, ruleContext, (EcmascriptNode) node, message);\n    }\n\n    protected RuleViolation createRuleViolation(Rule rule, RuleContext ruleContext, Node node, String message,\n            int beginLine, int endLine) {\n        return null; \/\/ FIXME\n    }\n}\n","lang_cluster":"Java","diff_tag":0,"review_comment":"","id":221}
{"diff_hunk":"@@ -1,6 +1,7 @@\n package com.fsck.k9.mail.ssl;\n \n import com.fsck.k9.mail.MessagingException;\n+import com.fsck.k9.mail.ProxySettings;\n \n import java.io.IOException;\n import java.net.Socket;","source_code":"package com.fsck.k9.mail.ssl;\n\nimport com.fsck.k9.mail.MessagingException;\n\nimport java.io.IOException;\nimport java.net.Socket;\nimport java.security.KeyManagementException;\nimport java.security.NoSuchAlgorithmException;\n\npublic interface TrustedSocketFactory {\n    Socket createSocket(Socket socket, String host, int port, String clientCertificateAlias)\n            throws NoSuchAlgorithmException, KeyManagementException, MessagingException, IOException;\n}\n","lang_cluster":"Java","diff_tag":0,"review_comment":"","id":222}
{"diff_hunk":"@@ -102,25 +102,20 @@ public final class BaselineVersions implements Plugin {\n             if (project.file(\"versions.props\").exists()) {\n                 extension.propertiesFile(ImmutableMap.of(\"file\", project.file(\"versions.props\")));\n             }\n-        } else {\n-            TaskProvider checkBomConflict = project.getTasks().register(\n-                    \"checkBomConflict\", CheckBomConflictTask.class, task -> task.setPropsFile(rootVersionsPropsFile));\n-            TaskProvider checkNoUnusedPin = project.getTasks().register(\n-                    \"checkNoUnusedPin\", CheckNoUnusedPinTask.class, task -> task.setPropsFile(rootVersionsPropsFile));\n-\n-            project.getTasks().register(\"checkVersionsProps\", CheckVersionsPropsTask.class, task -> {\n-                task.dependsOn(checkBomConflict, checkNoUnusedPin);\n-                \/\/ If we just run checkVersionsProps --fix, we want to propagate its option to its dependent tasks\n-                checkBomConflict.get().setShouldFix(task.getShouldFix());\n-                checkNoUnusedPin.get().setShouldFix(task.getShouldFix());\n-            });\n-            \/\/ If we run with --parallel --fix, both checkNoUnusedPin and checkBomConflict will try to overwrite the\n-            \/\/ versions file at the same time. Therefore, make sure checkBomConflict runs first.\n-            checkNoUnusedPin.configure(task -> task.mustRunAfter(checkBomConflict));\n-\n-            project.getPluginManager().apply(BasePlugin.class);\n-            project.getTasks().named(\"check\").configure(task -> task.dependsOn(\"checkVersionsProps\"));\n         }\n+\n+        TaskProvider checkBomConflict = project.getTasks().register(\n+                \"checkBomConflict\", CheckBomConflictTask.class, task -> {\n+                    task.setPropsFile(rootVersionsPropsFile);\n+                    \/\/ If we run with --parallel --fix, both checkNoUnusedPin and checkBomConflict will try to overwrite\n+                    \/\/ the versions file at the same time. Therefore, make sure checkNoUnusedPin runs first.\n+                    task.mustRunAfter(checkNoUnusedPin);\n+                });\n+\n+        checkVersionsProps.configure(task -> {\n+            task.dependsOn(checkBomConflict);\n+            checkBomConflict.get().setShouldFix(task.getShouldFix());\n+        });\n     }\n \n     private static File rootVersionsPropsFile(Project project) {","source_code":"\/*\n * (c) Copyright 2018 Palantir Technologies Inc. All rights reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *     http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage com.palantir.baseline.plugins.versions;\n\nimport com.google.common.base.CharMatcher;\nimport com.google.common.collect.ImmutableMap;\nimport java.io.File;\nimport java.io.IOException;\nimport java.nio.file.Files;\nimport java.util.Comparator;\nimport java.util.Set;\nimport java.util.stream.Collectors;\nimport netflix.nebula.dependency.recommender.DependencyRecommendationsPlugin;\nimport netflix.nebula.dependency.recommender.RecommendationStrategies;\nimport netflix.nebula.dependency.recommender.provider.FuzzyVersionResolver;\nimport netflix.nebula.dependency.recommender.provider.RecommendationProviderContainer;\nimport org.gradle.api.Plugin;\nimport org.gradle.api.Project;\nimport org.gradle.api.artifacts.Configuration;\nimport org.gradle.api.artifacts.component.ModuleComponentIdentifier;\nimport org.gradle.api.artifacts.result.ResolutionResult;\nimport org.gradle.api.logging.Logger;\nimport org.gradle.api.logging.Logging;\nimport org.gradle.api.plugins.BasePlugin;\nimport org.gradle.api.tasks.TaskProvider;\n\n\/**\n * Transitively applies nebula.dependency recommender to replace the following common gradle snippet.\n *\n * 
\n * buildscript {\n *     dependencies {\n *         classpath 'com.netflix.nebula:nebula-dependency-recommender:5.2.0'\n *     }\n * }\n *\n * allprojects {\n *     apply plugin: 'nebula.dependency-recommender'\n *\n *     dependencyRecommendations {\n *         strategy OverrideTransitives\n *         propertiesFile file: project.rootProject.file('versions.props')\n *         if (file('versions.props').exists()) {\n *             propertiesFile file: project.file('versions.props')\n *         }\n *     }\n * }\n * <\/pre>\n *\/\npublic final class BaselineVersions implements Plugin {\n\n    private static final Logger log = Logging.getLogger(BaselineVersions.class);\n    static final String GROUP = \"com.palantir.baseline-versions\";\n    \/**\n     * System property which, when true, instructs {@code nebula.dependency-recommender} to only support sourcing\n     * constraints from a BOM. In that case, nebula doesn't support sourcing recommendations from\n     * {@code versions.props} anymore.\n     *\/\n    public static final boolean IS_CORE_BOM_ENABLED = Boolean.getBoolean(\"nebula.features.coreBomSupport\");\n    public static final String DISABLE_PROPERTY = \"com.palantir.baseline-versions.disable\";\n\n    @Override\n    public void apply(Project project) {\n        if (project.hasProperty(DISABLE_PROPERTY)) {\n            log.info(\"Not configuring com.palantir.baseline-versions because \" + DISABLE_PROPERTY + \" was set\");\n            return;\n        }\n        \/\/ apply plugin: \"nebula.dependency-recommender\"\n        project.getPluginManager().apply(DependencyRecommendationsPlugin.class);\n\n        if (IS_CORE_BOM_ENABLED) {\n            log.info(\"Not configuring nebula.dependency-recommender because coreBomSupport is enabled\");\n            return;\n        }\n\n        \/\/ get dependencyRecommendations extension\n        RecommendationProviderContainer extension = project\n                .getExtensions()\n                .getByType(RecommendationProviderContainer.class);\n\n        extension.setStrategy(RecommendationStrategies.OverrideTransitives); \/\/ default is 'ConflictResolved'\n\n        File rootVersionsPropsFile = rootVersionsPropsFile(project);\n        extension.propertiesFile(ImmutableMap.of(\"file\", rootVersionsPropsFile));\n\n        if (project != project.getRootProject()) {\n            \/\/ allow nested projects to specify their own nested versions.props file\n            if (project.file(\"versions.props\").exists()) {\n                extension.propertiesFile(ImmutableMap.of(\"file\", project.file(\"versions.props\")));\n            }\n        } else {\n            TaskProvider checkBomConflict = project.getTasks().register(\n                    \"checkBomConflict\", CheckBomConflictTask.class, task -> task.setPropsFile(rootVersionsPropsFile));\n            TaskProvider checkNoUnusedPin = project.getTasks().register(\n                    \"checkNoUnusedPin\", CheckNoUnusedPinTask.class, task -> task.setPropsFile(rootVersionsPropsFile));\n\n            project.getTasks().register(\"checkVersionsProps\", CheckVersionsPropsTask.class, task -> {\n                task.dependsOn(checkBomConflict, checkNoUnusedPin);\n                \/\/ If we just run checkVersionsProps --fix, we want to propagate its option to its dependent tasks\n                checkBomConflict.get().setShouldFix(task.getShouldFix());\n                checkNoUnusedPin.get().setShouldFix(task.getShouldFix());\n            });\n            \/\/ If we run with --parallel --fix, both checkNoUnusedPin and checkBomConflict will try to overwrite the\n            \/\/ versions file at the same time. Therefore, make sure checkBomConflict runs first.\n            checkNoUnusedPin.configure(task -> task.mustRunAfter(checkBomConflict));\n\n            project.getPluginManager().apply(BasePlugin.class);\n            project.getTasks().named(\"check\").configure(task -> task.dependsOn(\"checkVersionsProps\"));\n        }\n    }\n\n    private static File rootVersionsPropsFile(Project project) {\n        File file = project.getRootProject().file(\"versions.props\");\n        if (!file.canRead()) {\n            try {\n                log.info(\"Could not find 'versions.props' file, creating...\");\n                Files.createFile(file.toPath());\n            } catch (IOException e) {\n                log.warn(\"Unable to create empty versions.props file, please create this manually\", e);\n            }\n        }\n        return file;\n    }\n\n    static Set getAllProjectsResolvedModuleIdentifiers(Project project) {\n        return project.getRootProject().getAllprojects()\n                .stream()\n                .flatMap(project2 -> getResolvedModuleIdentifiers(project2).stream())\n                .collect(Collectors.toSet());\n    }\n\n    static Set getResolvedModuleIdentifiers(Project project) {\n        return project.getConfigurations().stream()\n                .filter(Configuration::isCanBeResolved)\n                .flatMap(configuration -> {\n                    try {\n                        ResolutionResult resolutionResult = configuration.getIncoming().getResolutionResult();\n                        return resolutionResult\n                                .getAllComponents()\n                                .stream()\n                                .map(result -> result.getId())\n                                .filter(cid -> !cid.equals(resolutionResult.getRoot().getId())) \/\/ remove the project\n                                .filter(cid -> cid instanceof ModuleComponentIdentifier)\n                                .map(mcid -> ((ModuleComponentIdentifier) mcid).getModuleIdentifier())\n                                .map(mid -> mid.getGroup() + \":\" + mid.getName());\n                    } catch (Exception e) {\n                        throw new RuntimeException(String.format(\"Error during resolution of the dependency graph of \"\n                                + \"configuration %s\", configuration), e);\n                    }\n                })\n                .collect(Collectors.toSet());\n    }\n\n    \/**\n     * Compares {@code versions.props} matchers by weight. Higher weight means the matcher is more specific.\n     * For example,\n     * 
\n     *     com.google.guava:guava\n     * <\/pre>\n     * is more specific than\n     * 
\n     *     com.google.guava:*\n     * <\/pre>\n     *\/\n    static final Comparator VERSIONS_PROPS_ENTRY_SPECIFIC_COMPARATOR =\n            Comparator.comparing(BaselineVersions::versionsPropsMatcherWeight);\n\n    \/**\n     * The weight of a matcher in {@code versions.props} according to the disambiguation logic defined in\n     * {@code nebula.dependency-recommender}.\n     *\n     * This matches the logic in {@link FuzzyVersionResolver}.\n     *\/\n    private static int versionsPropsMatcherWeight(String matcher) {\n        return CharMatcher.isNot('*').countIn(matcher);\n    }\n}\n","lang_cluster":"Java","diff_tag":0,"review_comment":"","id":223}
{"diff_hunk":"@@ -14,6 +14,8 @@ import org.junit.jupiter.api.Test;\n import java.io.IOException;\n \n import static com.github.javaparser.ast.Modifier.Keyword.STATIC;\n+import static com.github.javaparser.utils.TestUtils.assertEqualsNoEol;\n+import static com.github.javaparser.utils.Utils.EOL;\n \n \/**\n  * These tests are more \"high level\" than the ones in LexicalPreservingPrinterTest.","source_code":"package com.github.javaparser.printer.lexicalpreservation;\n\nimport com.github.javaparser.ast.body.FieldDeclaration;\nimport com.github.javaparser.ast.body.MethodDeclaration;\nimport com.github.javaparser.ast.body.VariableDeclarator;\nimport com.github.javaparser.ast.expr.Expression;\nimport com.github.javaparser.ast.expr.NameExpr;\nimport com.github.javaparser.ast.expr.NullLiteralExpr;\nimport com.github.javaparser.ast.stmt.ReturnStmt;\nimport com.github.javaparser.ast.type.ArrayType;\nimport com.github.javaparser.ast.type.PrimitiveType;\nimport org.junit.jupiter.api.Test;\n\nimport java.io.IOException;\n\nimport static com.github.javaparser.ast.Modifier.Keyword.STATIC;\n\n\/**\n * These tests are more \"high level\" than the ones in LexicalPreservingPrinterTest.\n * The idea is to perform some transformations on the code, print it back and see if the generated code\n * is the expected one. We do not care about the internal state of LexicalPreservingPrinter, just the visible result.\n *\/\nclass TransformationsTest extends  AbstractLexicalPreservingTest {\n\n    @Test\n    void unchangedSimpleClasses() throws IOException {\n        assertUnchanged(\"Example1\");\n        assertUnchanged(\"Example2\");\n    }\n\n    @Test\n    void unchangedComplexFile() throws IOException {\n        assertUnchanged(\"Example4\");\n    }\n\n    @Test\n    void example1() throws IOException {\n        considerExample(\"Example1_original\");\n        cu.getClassByName(\"A\").get().getFieldByName(\"a\").get().setModifiers(STATIC);\n        assertTransformed(\"Example1\", cu);\n    }\n\n    @Test\n    void example2() throws IOException {\n        considerExample(\"Example2_original\");\n        cu.getClassByName(\"A\").get().getFieldByName(\"a\").get().getVariable(0).setInitializer(\"10\");\n        assertTransformed(\"Example2\", cu);\n    }\n\n    @Test\n    void example3() throws IOException {\n        considerExample(\"Example3_original\");\n        cu.getClassByName(\"A\").get().getFieldByName(\"a\").get().getVariable(0).setInitializer((Expression) null);\n        assertTransformed(\"Example3\", cu);\n    }\n\n    @Test\n    void example5() throws IOException {\n        considerExample(\"Example5_original\");\n        cu.getClassByName(\"A\").get().getFieldByName(\"a\").get().getVariable(0).setInitializer(new NullLiteralExpr());\n        assertTransformed(\"Example5\", cu);\n    }\n\n    @Test\n    void example6() throws IOException {\n        considerExample(\"Example6_original\");\n        cu.getClassByName(\"A\").get().getFieldByName(\"a\").get().getVariable(0).setName(\"someOtherName\");\n        assertTransformed(\"Example6\", cu);\n    }\n\n    @Test\n    void example7() throws IOException {\n        considerExample(\"Example7_original\");\n        cu.getClassByName(\"A\").get().getFieldByName(\"a\").get().getVariable(0).setType(new ArrayType(PrimitiveType.intType()));\n        assertTransformed(\"Example7\", cu);\n    }\n\n    @Test\n    void example8() throws IOException {\n        considerExample(\"Example8_original\");\n        FieldDeclaration fd = cu.getClassByName(\"A\").get().getMember(0).asFieldDeclaration();\n        fd.addVariable(new VariableDeclarator(PrimitiveType.intType(), \"b\"));\n        assertTransformed(\"Example8\", cu);\n    }\n\n    @Test\n    void example9() throws IOException {\n        considerExample(\"Example9_original\");\n        FieldDeclaration fd = cu.getClassByName(\"A\").get().getMember(0).asFieldDeclaration();\n        fd.addVariable(new VariableDeclarator(new ArrayType(PrimitiveType.intType()), \"b\"));\n        assertTransformed(\"Example9\", cu);\n    }\n\n    @Test\n    void example10() throws IOException {\n        considerExample(\"Example10_original\");\n        cu.getClassByName(\"A\").get().getMembers().remove(0);\n        assertTransformed(\"Example10\", cu);\n    }\n\n    @Test\n    void exampleParam1() throws IOException {\n        considerExample(\"Example_param1_original\");\n        MethodDeclaration md = cu.getClassByName(\"A\").get().getMember(0).asMethodDeclaration();\n        md.addParameter(\"int\", \"p1\");\n        assertTransformed(\"Example_param1\", cu);\n    }\n\n    @Test\n    void exampleParam2() throws IOException {\n        considerExample(\"Example_param1_original\");\n        MethodDeclaration md = cu.getClassByName(\"A\").get().getMember(0).asMethodDeclaration();\n        md.addParameter(new ArrayType(PrimitiveType.intType()), \"p1\");\n        md.addParameter(\"char\", \"p2\");\n        assertTransformed(\"Example_param2\", cu);\n    }\n\n    @Test\n    void exampleParam3() throws IOException {\n        considerExample(\"Example_param3_original\");\n        MethodDeclaration md = cu.getClassByName(\"A\").get().getMember(0).asMethodDeclaration();\n        md.getParameters().remove(0);\n        assertTransformed(\"Example_param3\", cu);\n    }\n\n    @Test\n    void exampleParam4() throws IOException {\n        considerExample(\"Example_param3_original\");\n        MethodDeclaration md = cu.getClassByName(\"A\").get().getMember(0).asMethodDeclaration();\n        md.getParameters().remove(1);\n        assertTransformed(\"Example_param4\", cu);\n    }\n\n    @Test\n    void exampleParam5() throws IOException {\n        considerExample(\"Example_param3_original\");\n        MethodDeclaration md = cu.getClassByName(\"A\").get().getMember(0).asMethodDeclaration();\n        md.setType(PrimitiveType.intType());\n        assertTransformed(\"Example_param5b\", cu);\n        md.getBody().get().getStatements().add(new ReturnStmt(new NameExpr(\"p1\")));\n        assertTransformed(\"Example_param5\", cu);\n    }\n}\n","lang_cluster":"Java","diff_tag":0,"review_comment":"","id":224}
{"diff_hunk":"@@ -31,17 +31,25 @@ import org.springframework.context.annotation.Bean;\n import org.springframework.context.annotation.Configuration;\n import org.springframework.core.io.support.PathMatchingResourcePatternResolver;\n \n+import java.io.File;\n+import java.io.IOException;\n+import java.net.MalformedURLException;\n+import java.net.URISyntaxException;\n+import java.net.URL;\n+import java.util.ArrayList;\n+import java.util.List;\n+\n @Configuration\n public class RepositoryConfiguration {\n-    @Value(\"${application.repository.configuration}\")\n-    private String repositoryConfiguration;\n-\n     @Value(\"${application.repository.forceIncompatibleOperatingSystems:false}\")\n     private boolean enforceUncompatibleOperatingSystems;\n \n     @Value(\"${application.user.cache}\")\n     private String cacheDirectoryPath;\n \n+    @Value(\"${application.repository.list}\")\n+    private String repositoryListPath;\n+\n     @Autowired\n     private MultithreadingConfiguration multithreadingConfiguration;\n ","source_code":"\/*\n * Copyright (C) 2015-2017 P\u00c2RIS Quentin\n *\n * This program is free software; you can redistribute it and\/or modify\n * it under the terms of the GNU General Public License as published by\n * the Free Software Foundation; either version 2 of the License, or\n * (at your option) any later version.\n *\n * This program is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n * GNU General Public License for more details.\n *\n * You should have received a copy of the GNU General Public License along\n * with this program; if not, write to the Free Software Foundation, Inc.,\n * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n *\/\n\npackage org.phoenicis.repository;\n\nimport com.fasterxml.jackson.databind.ObjectMapper;\nimport org.phoenicis.repository.repositoryTypes.BackgroundRepository;\nimport org.phoenicis.repository.repositoryTypes.ClasspathRepository;\nimport org.phoenicis.repository.repositoryTypes.LocalRepository;\nimport org.phoenicis.multithreading.MultithreadingConfiguration;\nimport org.phoenicis.tools.ToolsConfiguration;\nimport org.phoenicis.tools.files.FileUtilities;\nimport org.springframework.beans.factory.annotation.Autowired;\nimport org.springframework.beans.factory.annotation.Value;\nimport org.springframework.context.annotation.Bean;\nimport org.springframework.context.annotation.Configuration;\nimport org.springframework.core.io.support.PathMatchingResourcePatternResolver;\n\n@Configuration\npublic class RepositoryConfiguration {\n    @Value(\"${application.repository.configuration}\")\n    private String repositoryConfiguration;\n\n    @Value(\"${application.repository.forceIncompatibleOperatingSystems:false}\")\n    private boolean enforceUncompatibleOperatingSystems;\n\n    @Value(\"${application.user.cache}\")\n    private String cacheDirectoryPath;\n\n    @Autowired\n    private MultithreadingConfiguration multithreadingConfiguration;\n\n    @Autowired\n    private ToolsConfiguration toolsConfiguration;\n\n    @Autowired\n    private FileUtilities fileUtilities;\n\n    @Bean\n    public RepositoryManager repositoryManager() {\n        RepositoryManager repositoryManager = new DefaultRepositoryManager(\n                multithreadingConfiguration.appsExecutorService(), enforceUncompatibleOperatingSystems,\n                toolsConfiguration, cacheDirectoryPath, fileUtilities, localRepositoryFactory(),\n                classPathRepositoryFactory(), backgroundRepositoryFactory());\n\n        \/\/ set initial repositories\n        repositoryManager.addRepositories(this.repositoryConfiguration.split(\";\"));\n\n        return repositoryManager;\n    }\n\n    @Bean\n    ClasspathRepository.Factory classPathRepositoryFactory() {\n        return new ClasspathRepository.Factory(objectMapper(), new PathMatchingResourcePatternResolver());\n    }\n\n    @Bean\n    LocalRepository.Factory localRepositoryFactory() {\n        return new LocalRepository.Factory(objectMapper());\n    }\n\n    @Bean\n    BackgroundRepository.Factory backgroundRepositoryFactory() {\n        return new BackgroundRepository.Factory();\n    }\n\n    @Bean\n    ObjectMapper objectMapper() {\n        return new ObjectMapper();\n    }\n\n}\n","lang_cluster":"Java","diff_tag":0,"review_comment":"","id":225}
{"diff_hunk":"@@ -24,6 +24,7 @@ import java.io.File;\n import java.io.IOException;\n import java.time.LocalDateTime;\n import java.time.OffsetDateTime;\n+import java.util.Collections;\n import java.util.List;\n import java.util.TimeZone;\n import org.apache.iceberg.Files;","source_code":"\/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *   http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing,\n * software distributed under the License is distributed on an\n * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n * KIND, either express or implied.  See the License for the\n * specific language governing permissions and limitations\n * under the License.\n *\/\n\npackage org.apache.iceberg.data.orc;\n\nimport com.google.common.collect.Lists;\nimport java.io.File;\nimport java.io.IOException;\nimport java.time.LocalDateTime;\nimport java.time.OffsetDateTime;\nimport java.util.List;\nimport java.util.TimeZone;\nimport org.apache.iceberg.Files;\nimport org.apache.iceberg.Schema;\nimport org.apache.iceberg.data.DataTest;\nimport org.apache.iceberg.data.DataTestHelpers;\nimport org.apache.iceberg.data.GenericRecord;\nimport org.apache.iceberg.data.RandomGenericData;\nimport org.apache.iceberg.data.Record;\nimport org.apache.iceberg.io.CloseableIterable;\nimport org.apache.iceberg.io.FileAppender;\nimport org.apache.iceberg.orc.ORC;\nimport org.apache.iceberg.types.Types;\nimport org.junit.Assert;\nimport org.junit.Test;\n\nimport static org.apache.iceberg.types.Types.NestedField.required;\n\npublic class TestGenericData extends DataTest {\n\n  @Override\n  protected void writeAndValidate(Schema schema) throws IOException {\n    List expected = RandomGenericData.generate(schema, 100, 0L);\n\n    File testFile = temp.newFile();\n    Assert.assertTrue(\"Delete should succeed\", testFile.delete());\n\n    try (FileAppender writer = ORC.write(Files.localOutput(testFile))\n        .schema(schema)\n        .createWriterFunc(GenericOrcWriter::buildWriter)\n        .build()) {\n      for (Record rec : expected) {\n        writer.add(rec);\n      }\n    }\n\n    List rows;\n    try (CloseableIterable reader = ORC.read(Files.localInput(testFile))\n        .project(schema)\n        .createReaderFunc(fileSchema -> GenericOrcReader.buildReader(schema, fileSchema))\n        .build()) {\n      rows = Lists.newArrayList(reader);\n    }\n\n    for (int i = 0; i < expected.size(); i += 1) {\n      DataTestHelpers.assertEquals(schema.asStruct(), expected.get(i), rows.get(i));\n    }\n  }\n\n  @Test\n  public void writeAndValidateTimestamps() throws IOException {\n    Schema timestampSchema = new Schema(\n        required(1, \"tsTzCol\", Types.TimestampType.withZone()),\n        required(2, \"tsCol\", Types.TimestampType.withoutZone())\n    );\n\n    \/\/ Write using America\/New_York timezone\n    TimeZone.setDefault(TimeZone.getTimeZone(\"America\/New_York\"));\n    GenericRecord record1 = GenericRecord.create(timestampSchema);\n    record1.setField(\"tsTzCol\", OffsetDateTime.parse(\"2017-01-16T17:10:34-08:00\"));\n    record1.setField(\"tsCol\", LocalDateTime.parse(\"1970-01-01T00:01:00\"));\n    GenericRecord record2 = GenericRecord.create(timestampSchema);\n    record2.setField(\"tsTzCol\", OffsetDateTime.parse(\"2017-05-16T17:10:34-08:00\"));\n    record2.setField(\"tsCol\", LocalDateTime.parse(\"1970-05-01T00:01:00\"));\n    GenericRecord record3 = GenericRecord.create(timestampSchema);\n    record3.setField(\"tsTzCol\", OffsetDateTime.parse(\"1935-01-16T17:10:34-08:00\"));\n    record3.setField(\"tsCol\", LocalDateTime.parse(\"1935-01-01T00:01:00\"));\n    GenericRecord record4 = GenericRecord.create(timestampSchema);\n    record4.setField(\"tsTzCol\", OffsetDateTime.parse(\"1935-05-16T17:10:34-08:00\"));\n    record4.setField(\"tsCol\", LocalDateTime.parse(\"1935-05-01T00:01:00\"));\n\n    File testFile = temp.newFile();\n    Assert.assertTrue(\"Delete should succeed\", testFile.delete());\n\n    try (FileAppender writer = ORC.write(Files.localOutput(testFile))\n        .schema(timestampSchema)\n        .createWriterFunc(GenericOrcWriter::buildWriter)\n        .build()) {\n      writer.add(record1);\n      writer.add(record2);\n      writer.add(record3);\n      writer.add(record4);\n    }\n\n    \/\/ Read using Asia\/Kolkata timezone\n    TimeZone.setDefault(TimeZone.getTimeZone(\"Asia\/Kolkata\"));\n    List rows;\n    try (CloseableIterable reader = ORC.read(Files.localInput(testFile))\n        .project(timestampSchema)\n        .createReaderFunc(fileSchema -> GenericOrcReader.buildReader(timestampSchema, fileSchema))\n        .build()) {\n      rows = Lists.newArrayList(reader);\n    }\n\n    Assert.assertEquals(OffsetDateTime.parse(\"2017-01-17T01:10:34Z\"), rows.get(0).getField(\"tsTzCol\"));\n    Assert.assertEquals(LocalDateTime.parse(\"1970-01-01T00:01:00\"), rows.get(0).getField(\"tsCol\"));\n    Assert.assertEquals(OffsetDateTime.parse(\"2017-05-17T01:10:34Z\"), rows.get(1).getField(\"tsTzCol\"));\n    Assert.assertEquals(LocalDateTime.parse(\"1970-05-01T00:01:00\"), rows.get(1).getField(\"tsCol\"));\n    Assert.assertEquals(OffsetDateTime.parse(\"1935-01-17T01:10:34Z\"), rows.get(2).getField(\"tsTzCol\"));\n    Assert.assertEquals(LocalDateTime.parse(\"1935-01-01T00:01:00\"), rows.get(2).getField(\"tsCol\"));\n    Assert.assertEquals(OffsetDateTime.parse(\"1935-05-17T01:10:34Z\"), rows.get(3).getField(\"tsTzCol\"));\n    Assert.assertEquals(LocalDateTime.parse(\"1935-05-01T00:01:00\"), rows.get(3).getField(\"tsCol\"));\n  }\n}\n","lang_cluster":"Java","diff_tag":0,"review_comment":"","id":226}
{"diff_hunk":"@@ -57,6 +57,9 @@ public class AzkabanCommonModule extends AbstractModule {\n     bind(ProjectLoader.class).to(JdbcProjectLoader.class).in(Scopes.SINGLETON);\n     bind(Props.class).toInstance(props);\n     bind(Storage.class).to(resolveStorageClassType()).in(Scopes.SINGLETON);\n+    bind(AzDBOperator.class).to(AzDBOperatorImpl.class).in(Scopes.SINGLETON);\n+    \/\/todo kunkun-tang : Consider both H2 DataSource and MysqlDatasource case.\n+    bind(AzkabanDataSource.class).toInstance(dataSource);\n   }\n \n   public Class resolveStorageClassType() {","source_code":"\/*\n * Copyright 2017 LinkedIn Corp.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n * use this file except in compliance with the License. You may obtain a copy of\n * the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n * License for the specific language governing permissions and limitations under\n * the License.\n *\n *\/\npackage azkaban;\n\nimport azkaban.project.JdbcProjectLoader;\nimport azkaban.project.ProjectLoader;\nimport azkaban.spi.Storage;\nimport azkaban.spi.StorageException;\nimport azkaban.storage.LocalStorage;\nimport azkaban.storage.StorageConfig;\nimport azkaban.storage.StorageImplementationType;\nimport azkaban.utils.Props;\nimport com.google.inject.AbstractModule;\nimport com.google.inject.Inject;\nimport com.google.inject.Provides;\nimport com.google.inject.Scopes;\nimport com.google.inject.Singleton;\nimport java.io.File;\n\nimport static azkaban.storage.StorageImplementationType.*;\n\n\npublic class AzkabanCommonModule extends AbstractModule {\n  private final Props props;\n  \/**\n   * Storage Implementation\n   * This can be any of the {@link StorageImplementationType} values in which case {@link StorageFactory} will create\n   * the appropriate storage instance. Or one can feed in a custom implementation class using the full qualified\n   * path required by a classloader.\n   *\n   * examples: LOCAL, DATABASE, azkaban.storage.MyFavStorage\n   *\n   *\/\n  private final String storageImplementation;\n\n  public AzkabanCommonModule(Props props) {\n    this.props = props;\n    this.storageImplementation = props.getString(Constants.ConfigurationKeys.AZKABAN_STORAGE_TYPE, LOCAL.name());\n  }\n\n  @Override\n  protected void configure() {\n    bind(ProjectLoader.class).to(JdbcProjectLoader.class).in(Scopes.SINGLETON);\n    bind(Props.class).toInstance(props);\n    bind(Storage.class).to(resolveStorageClassType()).in(Scopes.SINGLETON);\n  }\n\n  public Class resolveStorageClassType() {\n    final StorageImplementationType type = StorageImplementationType.from(storageImplementation);\n    if (type != null) {\n      return type.getImplementationClass();\n    } else {\n      return loadCustomStorageClass(storageImplementation);\n    }\n  }\n\n  private Class loadCustomStorageClass(String storageImplementation) {\n    try {\n      return (Class) Class.forName(storageImplementation);\n    } catch (ClassNotFoundException e) {\n      throw new StorageException(e);\n    }\n  }\n\n  @Inject\n  public @Provides\n  LocalStorage createLocalStorage(StorageConfig config) {\n    return new LocalStorage(new File(config.getBaseDirectoryPath()));\n  }\n}\n","lang_cluster":"Java","diff_tag":0,"review_comment":"","id":227}
{"diff_hunk":"@@ -89,11 +89,11 @@ public class PkiKeyStoreConfiguration {\n \n     private String keyStoreType = DEFAULT_KEYSTORE_TYPE;\n     private Path keyStorePath;\n-    private Supplier keyStorePasswordSupplier;\n+    private Path keyStorePasswordPath;\n     private String certificateAlias = DEFAULT_CERTIFICATE_ALIAS;\n     private String trustStoreType = DEFAULT_KEYSTORE_TYPE;\n     private Path trustStorePath;\n-    private Supplier trustStorePasswordSupplier;\n+    private Path trustStorePasswordPath;\n     private Path crlFilePath;\n \n     public Builder() {}","source_code":"\/*\n * Copyright ConsenSys AG.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n * the License. You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n * an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n * specific language governing permissions and limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n *\/\npackage org.hyperledger.besu.pki.config;\n\nimport static java.util.Objects.requireNonNull;\n\nimport java.nio.file.Path;\nimport java.util.Optional;\nimport java.util.function.Supplier;\n\npublic class PkiKeyStoreConfiguration {\n\n  public static String DEFAULT_KEYSTORE_TYPE = \"PKCS12\";\n  public static String DEFAULT_CERTIFICATE_ALIAS = \"validator\";\n\n  private final String keyStoreType;\n  private final Path keyStorePath;\n  private final Supplier keyStorePasswordSupplier;\n  private final String certificateAlias;\n  private final String trustStoreType;\n  private final Path trustStorePath;\n  private final Supplier trustStorePasswordSupplier;\n  private final Optional crlFilePath;\n\n  public PkiKeyStoreConfiguration(\n      final String keyStoreType,\n      final Path keyStorePath,\n      final Supplier keyStorePasswordSupplier,\n      final String certificateAlias,\n      final String trustStoreType,\n      final Path trustStorePath,\n      final Supplier trustStorePasswordSupplier,\n      final Optional crlFilePath) {\n    this.keyStoreType = keyStoreType;\n    this.keyStorePath = keyStorePath;\n    this.keyStorePasswordSupplier = keyStorePasswordSupplier;\n    this.certificateAlias = certificateAlias;\n    this.trustStoreType = trustStoreType;\n    this.trustStorePath = trustStorePath;\n    this.trustStorePasswordSupplier = trustStorePasswordSupplier;\n    this.crlFilePath = crlFilePath;\n  }\n\n  public String getKeyStoreType() {\n    return keyStoreType;\n  }\n\n  public Path getKeyStorePath() {\n    return keyStorePath;\n  }\n\n  public String getKeyStorePassword() {\n    return null == keyStorePasswordSupplier ? null : keyStorePasswordSupplier.get();\n  }\n\n  public String getCertificateAlias() {\n    return certificateAlias;\n  }\n\n  public String getTrustStoreType() {\n    return trustStoreType;\n  }\n\n  public Path getTrustStorePath() {\n    return trustStorePath;\n  }\n\n  public String getTrustStorePassword() {\n    return trustStorePasswordSupplier.get();\n  }\n\n  public Optional getCrlFilePath() {\n    return crlFilePath;\n  }\n\n  public static final class Builder {\n\n    private String keyStoreType = DEFAULT_KEYSTORE_TYPE;\n    private Path keyStorePath;\n    private Supplier keyStorePasswordSupplier;\n    private String certificateAlias = DEFAULT_CERTIFICATE_ALIAS;\n    private String trustStoreType = DEFAULT_KEYSTORE_TYPE;\n    private Path trustStorePath;\n    private Supplier trustStorePasswordSupplier;\n    private Path crlFilePath;\n\n    public Builder() {}\n\n    public Builder withKeyStoreType(final String keyStoreType) {\n      this.keyStoreType = keyStoreType;\n      return this;\n    }\n\n    public Builder withKeyStorePath(final Path keyStorePath) {\n      this.keyStorePath = keyStorePath;\n      return this;\n    }\n\n    public Builder withKeyStorePasswordSupplier(final Supplier keyStorePasswordSupplier) {\n      this.keyStorePasswordSupplier = keyStorePasswordSupplier;\n      return this;\n    }\n\n    public Builder withCertificateAlias(final String certificateAlias) {\n      this.certificateAlias = certificateAlias;\n      return this;\n    }\n\n    public Builder withTrustStoreType(final String trustStoreType) {\n      this.trustStoreType = trustStoreType;\n      return this;\n    }\n\n    public Builder withTrustStorePath(final Path trustStorePath) {\n      this.trustStorePath = trustStorePath;\n      return this;\n    }\n\n    public Builder withTrustStorePasswordSupplier(\n        final Supplier trustStorePasswordSupplier) {\n      this.trustStorePasswordSupplier = trustStorePasswordSupplier;\n      return this;\n    }\n\n    public Builder withCrlFilePath(final Path filePath) {\n      this.crlFilePath = filePath;\n      return this;\n    }\n\n    public PkiKeyStoreConfiguration build() {\n      requireNonNull(keyStoreType, \"Key Store Type must not be null\");\n      requireNonNull(keyStorePasswordSupplier, \"Key Store password supplier must not be null\");\n      return new PkiKeyStoreConfiguration(\n          keyStoreType,\n          keyStorePath,\n          keyStorePasswordSupplier,\n          certificateAlias,\n          trustStoreType,\n          trustStorePath,\n          trustStorePasswordSupplier,\n          Optional.ofNullable(crlFilePath));\n    }\n  }\n}\n","lang_cluster":"Java","diff_tag":0,"review_comment":"","id":228}
{"diff_hunk":"@@ -136,7 +136,7 @@ public class TestLoadBalancer {\n     List servers = new ArrayList();\n     Server server = Mockito.mock(Server.class);\n     servers.add(server);\n-    Mockito.when(serverList.getInitialListOfServers()).thenReturn(servers);\n+    loadBalancer.setServerList(servers);\n \n     TransactionControlFilter filter = Mockito.mock(TransactionControlFilter.class);\n     Mockito.when(filter.getFilteredListOfServers(servers)).thenReturn(servers);","source_code":"\/*\n * Copyright 2017 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *    http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage io.servicecomb.loadbalance;\n\nimport static org.junit.Assert.assertNotNull;\n\nimport java.util.ArrayList;\nimport java.util.List;\n\nimport org.junit.Assert;\nimport org.junit.Test;\nimport org.mockito.Mockito;\n\nimport com.netflix.loadbalancer.AbstractLoadBalancer.ServerGroup;\nimport com.netflix.loadbalancer.IRule;\nimport com.netflix.loadbalancer.Server;\n\nimport io.servicecomb.loadbalance.filter.SimpleTransactionControlFilter;\nimport io.servicecomb.loadbalance.filter.TransactionControlFilter;\n\npublic class TestLoadBalancer {\n\n  private CseServerList serverList = Mockito.mock(CseServerList.class);\n\n  private IRule rule = Mockito.mock(IRule.class);\n\n  private LoadBalancer loadBalancer = new LoadBalancer(serverList, rule);\n\n  @Test\n  public void testLoadBalancerFullOperationWithoutException() {\n\n    List newServers = new ArrayList();\n    Server server = Mockito.mock(Server.class);\n    newServers.add(server);\n\n    loadBalancer.chooseServer();\n\n    Object key = Mockito.mock(Object.class);\n\n    loadBalancer.chooseServer(key);\n    loadBalancer.getAllServers();\n    loadBalancer.getLoadBalancerStats();\n    loadBalancer.getReachableServers();\n\n    assertNotNull(loadBalancer.getAllServers());\n  }\n\n  @Test\n  public void testAddServerException() {\n    boolean status = true;\n    List newServers = new ArrayList();\n    Server server = Mockito.mock(Server.class);\n\n    newServers.add(server);\n\n    try {\n\n      loadBalancer.addServers(newServers);\n    } catch (Exception e) {\n\n      status = false;\n\n      Assert.assertEquals(\"Not implemented.\", e.getMessage());\n    }\n\n    Assert.assertFalse(status);\n  }\n\n  @Test\n  public void testServerListException() {\n    boolean status = true;\n    List newServers = new ArrayList();\n    Server server = Mockito.mock(Server.class);\n\n    newServers.add(server);\n\n    try {\n\n      loadBalancer.getServerList(ServerGroup.ALL);\n    } catch (Exception e) {\n\n      status = false;\n\n      Assert.assertEquals(\"Not implemented.\", e.getMessage());\n    }\n\n    Assert.assertFalse(status);\n  }\n\n  @Test\n  public void testMarkServerDownException() {\n    boolean status = true;\n    List newServers = new ArrayList();\n    Server server = Mockito.mock(Server.class);\n\n    newServers.add(server);\n\n    try {\n\n      loadBalancer.markServerDown(server);\n    } catch (Exception e) {\n\n      status = false;\n\n      Assert.assertEquals(\"Not implemented.\", e.getMessage());\n    }\n\n    Assert.assertFalse(status);\n  }\n\n  @Test\n  public void testFilter() {\n    Assert.assertEquals(0, loadBalancer.getFilterSize());\n\n    TransactionControlFilter filter = new SimpleTransactionControlFilter();\n    loadBalancer.putFilter(\"test\", filter);\n    Assert.assertEquals(1, loadBalancer.getFilterSize());\n  }\n\n  @Test\n  public void testGetAllServers() {\n    List servers = new ArrayList();\n    Server server = Mockito.mock(Server.class);\n    servers.add(server);\n    Mockito.when(serverList.getInitialListOfServers()).thenReturn(servers);\n\n    TransactionControlFilter filter = Mockito.mock(TransactionControlFilter.class);\n    Mockito.when(filter.getFilteredListOfServers(servers)).thenReturn(servers);\n    Assert.assertEquals(servers, loadBalancer.getAllServers());\n  }\n}\n","lang_cluster":"Java","diff_tag":0,"review_comment":"","id":229}
{"diff_hunk":"@@ -1,35 +1,10 @@\n #include \"flatbuffers\/grpc.h\"\n #include \"monster_test_generated.h\"\n+#include \"test_assert.h\"\n+#include \"test_builder.h\"\n \n-static int builder_test_error = 0;\n-\n-#define test_assert(condition) do { \\\n-  if(!(condition)) { \\\n-    fprintf(stderr, \"%s:%d: %s failed.\\n\", __FILE__, __LINE__, #condition);\\\n-    builder_test_error = 1;\\\n-  } \\\n-} while(0)\n-\n-using namespace MyGame::Example;\n-\n-const std::string m1_name = \"Cyberdemon\";\n-const Color m1_color = Color_Red;\n-const std::string m2_name = \"Imp\";\n-const Color m2_color = Color_Green;\n-\n-flatbuffers::Offset populate1(flatbuffers::FlatBufferBuilder &builder) {\n-  auto name_offset = builder.CreateString(m1_name);\n-  return CreateMonster(builder, nullptr, 0, 0, name_offset, 0, m1_color);\n-}\n-\n-flatbuffers::Offset populate2(flatbuffers::FlatBufferBuilder &builder) {\n-  auto name_offset = builder.CreateString(m2_name);\n-  return CreateMonster(builder, nullptr, 0, 0, name_offset, 0, m2_color);\n-}\n-\n-bool release_n_verify(flatbuffers::FlatBufferBuilder &fbb, const std::string &expected_name, Color color) {\n-  flatbuffers::DetachedBuffer buf = fbb.Release();\n-  const Monster *monster = flatbuffers::GetRoot(buf.data());\n+bool verify(flatbuffers::grpc::Message &msg, const std::string &expected_name, Color color) {\n+  const Monster *monster = msg.GetRoot();\n   return (monster->name()->str() == expected_name) && (monster->color() == color);\n }\n ","source_code":"#include \"flatbuffers\/grpc.h\"\n#include \"monster_test_generated.h\"\n\nstatic int builder_test_error = 0;\n\n#define test_assert(condition) do { \\\n  if(!(condition)) { \\\n    fprintf(stderr, \"%s:%d: %s failed.\\n\", __FILE__, __LINE__, #condition);\\\n    builder_test_error = 1;\\\n  } \\\n} while(0)\n\nusing namespace MyGame::Example;\n\nconst std::string m1_name = \"Cyberdemon\";\nconst Color m1_color = Color_Red;\nconst std::string m2_name = \"Imp\";\nconst Color m2_color = Color_Green;\n\nflatbuffers::Offset populate1(flatbuffers::FlatBufferBuilder &builder) {\n  auto name_offset = builder.CreateString(m1_name);\n  return CreateMonster(builder, nullptr, 0, 0, name_offset, 0, m1_color);\n}\n\nflatbuffers::Offset populate2(flatbuffers::FlatBufferBuilder &builder) {\n  auto name_offset = builder.CreateString(m2_name);\n  return CreateMonster(builder, nullptr, 0, 0, name_offset, 0, m2_color);\n}\n\nbool release_n_verify(flatbuffers::FlatBufferBuilder &fbb, const std::string &expected_name, Color color) {\n  flatbuffers::DetachedBuffer buf = fbb.Release();\n  const Monster *monster = flatbuffers::GetRoot(buf.data());\n  return (monster->name()->str() == expected_name) && (monster->color() == color);\n}\n\nbool release_n_verify(flatbuffers::grpc::MessageBuilder &mbb, const std::string &expected_name, Color color) {\n  flatbuffers::grpc::Message msg = mbb.ReleaseMessage();\n  const Monster *monster = msg.GetRoot();\n  return (monster->name()->str() == expected_name) && (monster->color() == color);\n}\n\nstruct OwnedAllocator : public flatbuffers::DefaultAllocator {};\n\nstruct TestHeapMessageBuilder : public flatbuffers::FlatBufferBuilder {\n  TestHeapMessageBuilder()\n    : flatbuffers::FlatBufferBuilder(2048, new OwnedAllocator(), true) {}\n};\n\ntemplate \nstruct BuilderTests {\n  static void empty_builder_movector_test() {\n    Builder b1;\n    size_t b1_size = b1.GetSize();\n    Builder b2(std::move(b1));\n    size_t b2_size = b2.GetSize();\n    test_assert(b1_size == 0);\n    test_assert(b1_size == b2_size);\n  }\n\n  static void nonempty_builder_movector_test() {\n    Builder b1;\n    populate1(b1);\n    size_t b1_size = b1.GetSize();\n    Builder b2(std::move(b1));\n    test_assert(b1_size == b2.GetSize());\n    test_assert(0 == b1.GetSize());\n  }\n\n  static void builder_movector_before_finish_test() {\n    Builder b1;\n    auto root_offset1 = populate1(b1);\n    Builder b2(std::move(b1));\n    b2.Finish(root_offset1);\n    test_assert(release_n_verify(b2, m1_name, m1_color));\n    test_assert(0 == b1.GetSize());\n  }\n\n  static void builder_movector_after_finish_test() {\n    Builder b1;\n    auto root_offset1 = populate1(b1);\n    b1.Finish(root_offset1);\n    Builder b2(std::move(b1));\n    test_assert(release_n_verify(b2, m1_name, m1_color));\n    test_assert(0 == b1.GetSize());\n  }\n\n  static void builder_move_assign_before_finish_test() {\n    Builder b1;\n    auto root_offset1 = populate1(b1);\n    Builder b2;\n    populate2(b2);\n    b2 = std::move(b1);\n    b2.Finish(root_offset1);\n    test_assert(release_n_verify(b2, m1_name, m1_color));\n    test_assert(0 == b1.GetSize());\n  }\n\n  static void builder_move_assign_after_finish_test() {\n    Builder b1;\n    auto root_offset1 = populate1(b1);\n    b1.Finish(root_offset1);\n    Builder b2;\n    auto root_offset2 = populate2(b2);\n    b2.Finish(root_offset2);\n    b2 = std::move(b1);\n    test_assert(release_n_verify(b2, m1_name, m1_color));\n    test_assert(0 == b1.GetSize());\n  }\n\n  static void builder_swap_before_finish_test() {\n    Builder b1;\n    auto root_offset1 = populate1(b1);\n    auto size1 = b1.GetSize();\n    Builder b2;\n    auto root_offset2 = populate2(b2);\n    auto size2 = b2.GetSize();\n    b1.Swap(b2);\n    b1.Finish(root_offset2);\n    b2.Finish(root_offset1);\n    test_assert(b1.GetSize() > size2);\n    test_assert(b2.GetSize() > size1);\n    test_assert(release_n_verify(b1, m2_name, m2_color));\n    test_assert(release_n_verify(b2, m1_name, m1_color));\n  }\n\n  static void builder_swap_after_finish_test() {\n    Builder b1;\n    auto root_offset1 = populate1(b1);\n    b1.Finish(root_offset1);\n    auto size1 = b1.GetSize();\n    Builder b2;\n    auto root_offset2 = populate2(b2);\n    b2.Finish(root_offset2);\n    auto size2 = b2.GetSize();\n    b1.Swap(b2);\n    test_assert(b1.GetSize() == size2);\n    test_assert(b2.GetSize() == size1);\n    test_assert(release_n_verify(b1, m2_name, m2_color));\n    test_assert(release_n_verify(b2, m1_name, m1_color));\n  }\n\n  static void all_tests() {\n    empty_builder_movector_test();\n    nonempty_builder_movector_test();\n    builder_movector_before_finish_test();\n    builder_movector_after_finish_test();\n    builder_move_assign_before_finish_test();\n    builder_move_assign_after_finish_test();\n    builder_swap_before_finish_test();\n    builder_swap_after_finish_test();\n  }\n};\n\nint builder_tests() {\n  BuilderTests::all_tests();\n  BuilderTests::all_tests();\n  BuilderTests::all_tests();\n  return builder_test_error;\n}\n","lang_cluster":"Java","diff_tag":0,"review_comment":"","id":230}
{"diff_hunk":"@@ -14,6 +14,8 @@\n  *\/\n package org.hyperledger.besu.ethereum.core.encoding;\n \n+import static org.hyperledger.besu.ethereum.core.Transaction.GO_QUORUM_PRIVATE_TRANSACTION_V_VALUE_MAX;\n+import static org.hyperledger.besu.ethereum.core.Transaction.GO_QUORUM_PRIVATE_TRANSACTION_V_VALUE_MIN;\n import static org.hyperledger.besu.ethereum.core.Transaction.REPLAY_PROTECTED_V_BASE;\n import static org.hyperledger.besu.ethereum.core.Transaction.REPLAY_PROTECTED_V_MIN;\n import static org.hyperledger.besu.ethereum.core.Transaction.REPLAY_UNPROTECTED_V_BASE;","source_code":"\/*\n * Copyright ConsenSys AG.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n * the License. You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n * an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n * specific language governing permissions and limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n *\/\npackage org.hyperledger.besu.ethereum.core.encoding;\n\nimport static org.hyperledger.besu.ethereum.core.Transaction.REPLAY_PROTECTED_V_BASE;\nimport static org.hyperledger.besu.ethereum.core.Transaction.REPLAY_PROTECTED_V_MIN;\nimport static org.hyperledger.besu.ethereum.core.Transaction.REPLAY_UNPROTECTED_V_BASE;\nimport static org.hyperledger.besu.ethereum.core.Transaction.REPLAY_UNPROTECTED_V_BASE_PLUS_1;\nimport static org.hyperledger.besu.ethereum.core.Transaction.TWO;\n\nimport org.hyperledger.besu.config.experimental.ExperimentalEIPs;\nimport org.hyperledger.besu.crypto.SECP256K1;\nimport org.hyperledger.besu.ethereum.core.Address;\nimport org.hyperledger.besu.ethereum.core.Transaction;\nimport org.hyperledger.besu.ethereum.core.Wei;\nimport org.hyperledger.besu.ethereum.rlp.RLPInput;\n\nimport java.math.BigInteger;\nimport java.util.Optional;\n\nimport org.apache.tuweni.bytes.Bytes;\n\n@FunctionalInterface\npublic interface TransactionRLPDecoder {\n\n  TransactionRLPDecoder FRONTIER = frontierDecoder();\n  TransactionRLPDecoder EIP1559 = eip1559Decoder();\n\n  static Transaction decodeTransaction(final RLPInput input) {\n    return (ExperimentalEIPs.eip1559Enabled ? EIP1559 : FRONTIER).decode(input);\n  }\n\n  Transaction decode(RLPInput input);\n\n  static TransactionRLPDecoder frontierDecoder() {\n    return input -> {\n      input.enterList();\n      final Transaction.Builder builder =\n          Transaction.builder()\n              .nonce(input.readLongScalar())\n              .gasPrice(Wei.of(input.readUInt256Scalar()))\n              .gasLimit(input.readLongScalar())\n              .to(input.readBytes(v -> v.size() == 0 ? null : Address.wrap(v)))\n              .value(Wei.of(input.readUInt256Scalar()))\n              .payload(input.readBytes());\n\n      final BigInteger v = input.readBigIntegerScalar();\n      final byte recId;\n      Optional chainId = Optional.empty();\n      if (v.equals(REPLAY_UNPROTECTED_V_BASE) || v.equals(REPLAY_UNPROTECTED_V_BASE_PLUS_1)) {\n        recId = v.subtract(REPLAY_UNPROTECTED_V_BASE).byteValueExact();\n      } else if (v.compareTo(REPLAY_PROTECTED_V_MIN) > 0) {\n        chainId = Optional.of(v.subtract(REPLAY_PROTECTED_V_BASE).divide(TWO));\n        recId =\n            v.subtract(TWO.multiply(chainId.get()).add(REPLAY_PROTECTED_V_BASE)).byteValueExact();\n      } else {\n        throw new RuntimeException(\n            String.format(\"An unsupported encoded `v` value of %s was found\", v));\n      }\n      final BigInteger r = input.readUInt256Scalar().toBytes().toUnsignedBigInteger();\n      final BigInteger s = input.readUInt256Scalar().toBytes().toUnsignedBigInteger();\n      final SECP256K1.Signature signature = SECP256K1.Signature.create(r, s, recId);\n\n      input.leaveList();\n\n      chainId.ifPresent(builder::chainId);\n      return builder.signature(signature).build();\n    };\n  }\n\n  static TransactionRLPDecoder eip1559Decoder() {\n    return input -> {\n      input.enterList();\n\n      final Transaction.Builder builder =\n          Transaction.builder()\n              .nonce(input.readLongScalar())\n              .gasPrice(Wei.of(input.readUInt256Scalar()))\n              .gasLimit(input.readLongScalar())\n              .to(input.readBytes(v -> v.size() == 0 ? null : Address.wrap(v)))\n              .value(Wei.of(input.readUInt256Scalar()))\n              .payload(input.readBytes());\n\n      final Bytes maybeGasPremiumOrV = input.readBytes();\n      final Bytes maybeFeeCapOrR = input.readBytes();\n      final Bytes maybeVOrS = input.readBytes();\n      final BigInteger v, r, s;\n      \/\/ if this is the end of the list we are processing a legacy transaction\n      if (input.isEndOfCurrentList()) {\n        v = maybeGasPremiumOrV.toUnsignedBigInteger();\n        r = maybeFeeCapOrR.toUnsignedBigInteger();\n        s = maybeVOrS.toUnsignedBigInteger();\n      } else {\n        \/\/ otherwise this is an EIP-1559 transaction\n        builder\n            .gasPremium(Wei.of(maybeGasPremiumOrV.toBigInteger()))\n            .feeCap(Wei.of(maybeFeeCapOrR.toBigInteger()));\n        v = maybeVOrS.toBigInteger();\n        r = input.readUInt256Scalar().toBytes().toUnsignedBigInteger();\n        s = input.readUInt256Scalar().toBytes().toUnsignedBigInteger();\n      }\n      final byte recId;\n      Optional chainId = Optional.empty();\n      if (v.equals(REPLAY_UNPROTECTED_V_BASE) || v.equals(REPLAY_UNPROTECTED_V_BASE_PLUS_1)) {\n        recId = v.subtract(REPLAY_UNPROTECTED_V_BASE).byteValueExact();\n      } else if (v.compareTo(REPLAY_PROTECTED_V_MIN) > 0) {\n        chainId = Optional.of(v.subtract(REPLAY_PROTECTED_V_BASE).divide(TWO));\n        recId =\n            v.subtract(TWO.multiply(chainId.get()).add(REPLAY_PROTECTED_V_BASE)).byteValueExact();\n      } else {\n        throw new RuntimeException(\n            String.format(\"An unsupported encoded `v` value of %s was found\", v));\n      }\n      final SECP256K1.Signature signature = SECP256K1.Signature.create(r, s, recId);\n      input.leaveList();\n      chainId.ifPresent(builder::chainId);\n      return builder.signature(signature).build();\n    };\n  }\n}\n","lang_cluster":"Java","diff_tag":0,"review_comment":"","id":231}
{"diff_hunk":"@@ -133,8 +133,20 @@ public class StorageCallbacksImpl implements StorageCallbacks {\n                     + \" FROM \" + PodDBAdapter.TABLE_NAME_FEEDS\n                     + \" WHERE \" + PodDBAdapter.TABLE_NAME_FEEDS + \".\" + PodDBAdapter.KEY_ID\n                     + \" = \" + PodDBAdapter.TABLE_NAME_FEED_ITEMS + \".\" + PodDBAdapter.KEY_FEED + \")\");\n+\n             db.execSQL(\"ALTER TABLE \" + PodDBAdapter.TABLE_NAME_FEEDS\n                     + \" ADD COLUMN \" + PodDBAdapter.KEY_HIDE + \" TEXT\");\n+\n+            \n+            db.execSQL(\"ALTER TABLE \" + PodDBAdapter.TABLE_NAME_FEEDS\n+                    + \" ADD COLUMN \" + PodDBAdapter.KEY_LAST_UPDATE_FAILED + \" INTEGER DEFAULT 0\");\n+\n+            \/\/ create indexes\n+            db.execSQL(PodDBAdapter.CREATE_INDEX_FEEDITEMS_FEED);\n+            db.execSQL(PodDBAdapter.CREATE_INDEX_FEEDITEMS_IMAGE);\n+            db.execSQL(PodDBAdapter.CREATE_INDEX_FEEDMEDIA_FEEDITEM);\n+            db.execSQL(PodDBAdapter.CREATE_INDEX_QUEUE_FEEDITEM);\n+            db.execSQL(PodDBAdapter.CREATE_INDEX_SIMPLECHAPTERS_FEEDITEM);\n         }\n     }\n }","source_code":"package de.danoeh.antennapod.config;\n\n\nimport android.content.ContentValues;\nimport android.database.Cursor;\nimport android.database.sqlite.SQLiteDatabase;\nimport android.util.Log;\n\nimport de.danoeh.antennapod.core.StorageCallbacks;\nimport de.danoeh.antennapod.core.storage.PodDBAdapter;\n\npublic class StorageCallbacksImpl implements StorageCallbacks {\n\n    @Override\n    public int getDatabaseVersion() {\n        return 15;\n    }\n\n    @Override\n    public void onUpgrade(SQLiteDatabase db, int oldVersion, int newVersion) {\n        Log.w(\"DBAdapter\", \"Upgrading from version \" + oldVersion + \" to \"\n                + newVersion + \".\");\n        if (oldVersion <= 1) {\n            db.execSQL(\"ALTER TABLE \" + PodDBAdapter.TABLE_NAME_FEEDS + \" ADD COLUMN \"\n                    + PodDBAdapter.KEY_TYPE + \" TEXT\");\n        }\n        if (oldVersion <= 2) {\n            db.execSQL(\"ALTER TABLE \" + PodDBAdapter.TABLE_NAME_SIMPLECHAPTERS\n                    + \" ADD COLUMN \" + PodDBAdapter.KEY_LINK + \" TEXT\");\n        }\n        if (oldVersion <= 3) {\n            db.execSQL(\"ALTER TABLE \" + PodDBAdapter.TABLE_NAME_FEED_ITEMS\n                    + \" ADD COLUMN \" + PodDBAdapter.KEY_ITEM_IDENTIFIER + \" TEXT\");\n        }\n        if (oldVersion <= 4) {\n            db.execSQL(\"ALTER TABLE \" + PodDBAdapter.TABLE_NAME_FEEDS + \" ADD COLUMN \"\n                    + PodDBAdapter.KEY_FEED_IDENTIFIER + \" TEXT\");\n        }\n        if (oldVersion <= 5) {\n            db.execSQL(\"ALTER TABLE \" + PodDBAdapter.TABLE_NAME_DOWNLOAD_LOG\n                    + \" ADD COLUMN \" + PodDBAdapter.KEY_REASON_DETAILED + \" TEXT\");\n            db.execSQL(\"ALTER TABLE \" + PodDBAdapter.TABLE_NAME_DOWNLOAD_LOG\n                    + \" ADD COLUMN \" + PodDBAdapter.KEY_DOWNLOADSTATUS_TITLE + \" TEXT\");\n        }\n        if (oldVersion <= 6) {\n            db.execSQL(\"ALTER TABLE \" + PodDBAdapter.TABLE_NAME_SIMPLECHAPTERS\n                    + \" ADD COLUMN \" + PodDBAdapter.KEY_CHAPTER_TYPE + \" INTEGER\");\n        }\n        if (oldVersion <= 7) {\n            db.execSQL(\"ALTER TABLE \" + PodDBAdapter.TABLE_NAME_FEED_MEDIA\n                    + \" ADD COLUMN \" + PodDBAdapter.KEY_PLAYBACK_COMPLETION_DATE\n                    + \" INTEGER\");\n        }\n        if (oldVersion <= 8) {\n            final int KEY_ID_POSITION = 0;\n            final int KEY_MEDIA_POSITION = 1;\n\n            \/\/ Add feeditem column to feedmedia table\n            db.execSQL(\"ALTER TABLE \" + PodDBAdapter.TABLE_NAME_FEED_MEDIA\n                    + \" ADD COLUMN \" + PodDBAdapter.KEY_FEEDITEM\n                    + \" INTEGER\");\n            Cursor feeditemCursor = db.query(PodDBAdapter.TABLE_NAME_FEED_ITEMS,\n                    new String[]{PodDBAdapter.KEY_ID, PodDBAdapter.KEY_MEDIA}, \"? > 0\",\n                    new String[]{PodDBAdapter.KEY_MEDIA}, null, null, null);\n            if (feeditemCursor.moveToFirst()) {\n                db.beginTransaction();\n                ContentValues contentValues = new ContentValues();\n                do {\n                    long mediaId = feeditemCursor.getLong(KEY_MEDIA_POSITION);\n                    contentValues.put(PodDBAdapter.KEY_FEEDITEM, feeditemCursor.getLong(KEY_ID_POSITION));\n                    db.update(PodDBAdapter.TABLE_NAME_FEED_MEDIA, contentValues, PodDBAdapter.KEY_ID + \"=?\", new String[]{String.valueOf(mediaId)});\n                    contentValues.clear();\n                } while (feeditemCursor.moveToNext());\n                db.setTransactionSuccessful();\n                db.endTransaction();\n            }\n            feeditemCursor.close();\n        }\n        if (oldVersion <= 9) {\n            db.execSQL(\"ALTER TABLE \" + PodDBAdapter.TABLE_NAME_FEEDS\n                    + \" ADD COLUMN \" + PodDBAdapter.KEY_AUTO_DOWNLOAD\n                    + \" INTEGER DEFAULT 1\");\n        }\n        if (oldVersion <= 10) {\n            db.execSQL(\"ALTER TABLE \" + PodDBAdapter.TABLE_NAME_FEEDS\n                    + \" ADD COLUMN \" + PodDBAdapter.KEY_FLATTR_STATUS\n                    + \" INTEGER\");\n            db.execSQL(\"ALTER TABLE \" + PodDBAdapter.TABLE_NAME_FEED_ITEMS\n                    + \" ADD COLUMN \" + PodDBAdapter.KEY_FLATTR_STATUS\n                    + \" INTEGER\");\n            db.execSQL(\"ALTER TABLE \" + PodDBAdapter.TABLE_NAME_FEED_MEDIA\n                    + \" ADD COLUMN \" + PodDBAdapter.KEY_PLAYED_DURATION\n                    + \" INTEGER\");\n        }\n        if (oldVersion <= 11) {\n            db.execSQL(\"ALTER TABLE \" + PodDBAdapter.TABLE_NAME_FEEDS\n                    + \" ADD COLUMN \" + PodDBAdapter.KEY_USERNAME\n                    + \" TEXT\");\n            db.execSQL(\"ALTER TABLE \" + PodDBAdapter.TABLE_NAME_FEEDS\n                    + \" ADD COLUMN \" + PodDBAdapter.KEY_PASSWORD\n                    + \" TEXT\");\n            db.execSQL(\"ALTER TABLE \" + PodDBAdapter.TABLE_NAME_FEED_ITEMS\n                    + \" ADD COLUMN \" + PodDBAdapter.KEY_IMAGE\n                    + \" INTEGER\");\n        }\n        if (oldVersion <= 12) {\n            db.execSQL(\"ALTER TABLE \" + PodDBAdapter.TABLE_NAME_FEEDS\n                    + \" ADD COLUMN \" + PodDBAdapter.KEY_IS_PAGED + \" INTEGER DEFAULT 0\");\n            db.execSQL(\"ALTER TABLE \" + PodDBAdapter.TABLE_NAME_FEEDS\n                    + \" ADD COLUMN \" + PodDBAdapter.KEY_NEXT_PAGE_LINK + \" TEXT\");\n        }\n        if (oldVersion <= 13) {\n            \/\/ remove duplicate rows in \"Chapters\" table that were created because of a bug.\n            db.execSQL(String.format(\"DELETE FROM %s WHERE %s NOT IN \" +\n                            \"(SELECT MIN(%s) as %s FROM %s GROUP BY %s,%s,%s,%s,%s)\",\n                    PodDBAdapter.TABLE_NAME_SIMPLECHAPTERS,\n                    PodDBAdapter.KEY_ID,\n                    PodDBAdapter.KEY_ID,\n                    PodDBAdapter.KEY_ID,\n                    PodDBAdapter.TABLE_NAME_SIMPLECHAPTERS,\n                    PodDBAdapter.KEY_TITLE,\n                    PodDBAdapter.KEY_START,\n                    PodDBAdapter.KEY_FEEDITEM,\n                    PodDBAdapter.KEY_LINK,\n                    PodDBAdapter.KEY_CHAPTER_TYPE));\n        }\n        if(oldVersion <= 14) {\n            db.execSQL(\"ALTER TABLE \" + PodDBAdapter.TABLE_NAME_FEED_ITEMS\n                    + \" ADD COLUMN \" + PodDBAdapter.KEY_AUTO_DOWNLOAD + \" INTEGER\");\n            db.execSQL(\"UPDATE \" + PodDBAdapter.TABLE_NAME_FEED_ITEMS\n                    + \" SET \" + PodDBAdapter.KEY_AUTO_DOWNLOAD + \" = \"\n                    + \"(SELECT \" + PodDBAdapter.KEY_AUTO_DOWNLOAD\n                    + \" FROM \" + PodDBAdapter.TABLE_NAME_FEEDS\n                    + \" WHERE \" + PodDBAdapter.TABLE_NAME_FEEDS + \".\" + PodDBAdapter.KEY_ID\n                    + \" = \" + PodDBAdapter.TABLE_NAME_FEED_ITEMS + \".\" + PodDBAdapter.KEY_FEED + \")\");\n            db.execSQL(\"ALTER TABLE \" + PodDBAdapter.TABLE_NAME_FEEDS\n                    + \" ADD COLUMN \" + PodDBAdapter.KEY_HIDE + \" TEXT\");\n        }\n    }\n}\n","lang_cluster":"Java","diff_tag":0,"review_comment":"","id":232}
{"diff_hunk":"@@ -65,7 +65,7 @@ public class FileAnalyser {\n \n         try {\n             byte[] data = Files.readAllBytes(path);\n-            return Magic.getMagicMatch(data);\n+            return Magic.getMagicMatch(data, true);\n         } catch (MagicException | MagicParseException | IOException e) {\n             throw new IllegalStateException(\"Unable to detect mimetype of the file\", e);\n         }","source_code":"\/*\n * Copyright (C) 2015-2017 P\u00c2RIS Quentin\n *\n * This program is free software; you can redistribute it and\/or modify\n * it under the terms of the GNU General Public License as published by\n * the Free Software Foundation; either version 2 of the License, or\n * (at your option) any later version.\n *\n * This program is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n * GNU General Public License for more details.\n *\n * You should have received a copy of the GNU General Public License along\n * with this program; if not, write to the Free Software Foundation, Inc.,\n * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n *\/\n\npackage org.phoenicis.tools.files;\n\nimport net.sf.jmimemagic.*;\nimport org.apache.commons.io.FileUtils;\nimport org.phoenicis.configuration.security.Safe;\nimport org.slf4j.Logger;\nimport org.slf4j.LoggerFactory;\n\nimport javax.activation.MimetypesFileTypeMap;\nimport java.io.File;\nimport java.io.IOException;\nimport java.nio.file.Files;\nimport java.nio.file.Path;\nimport java.nio.file.Paths;\n\n@Safe\npublic class FileAnalyser {\n    private static final Logger LOGGER = LoggerFactory.getLogger(FileAnalyser.class);\n\n    \/**\n     * Identify which line delimiter is used in a file\n     * \n     * @param fileContent\n     *            string to analyse\n     * @return the line separator as a string. Null if the file has no line\n     *         separator\n     *\/\n    public static String identifyLineDelimiter(String fileContent) {\n        if (fileContent.matches(\"(?s).*(\\\\r\\\\n).*\")) { \/\/ Windows \/\/$NON-NLS-1$\n            return \"\\r\\n\"; \/\/$NON-NLS-1$\n        } else if (fileContent.matches(\"(?s).*(\\\\n).*\")) { \/\/ Unix\/Linux \/\/$NON-NLS-1$\n            return \"\\n\"; \/\/$NON-NLS-1$\n        } else if (fileContent.matches(\"(?s).*(\\\\r).*\")) { \/\/ Legacy mac os 9. Newer OS X use \\n \/\/$NON-NLS-1$\n            return \"\\r\"; \/\/$NON-NLS-1$\n        } else {\n            return \"\\n\"; \/\/ fallback onto '\\n' if nothing matches. \/\/$NON-NLS-1$\n        }\n    }\n\n    public static String identifyLineDelimiter(File fileToAnalyse) throws IOException {\n        final String fileContent = FileUtils.readFileToString(fileToAnalyse);\n        return identifyLineDelimiter(fileContent);\n    }\n\n    private MagicMatch getMatch(File inputFile) throws MagicMatchNotFoundException {\n        final Path path = Paths.get(inputFile.getAbsolutePath());\n\n        try {\n            byte[] data = Files.readAllBytes(path);\n            return Magic.getMagicMatch(data);\n        } catch (MagicException | MagicParseException | IOException e) {\n            throw new IllegalStateException(\"Unable to detect mimetype of the file\", e);\n        }\n    }\n\n    public String getDescription(File inputFile) {\n        try {\n            return getMatch(inputFile).getDescription();\n        } catch (MagicMatchNotFoundException e) {\n            throw new IllegalStateException(\"Unable to detect mimetype of the file\", e);\n        }\n    }\n\n    public String getMimetype(File inputFile) {\n        try {\n            return getMatch(inputFile).getMimeType();\n        } catch (MagicMatchNotFoundException e) {\n            LOGGER.debug(\"Failed to get Mime Type\", e);\n            final MimetypesFileTypeMap mimeTypesMap = new MimetypesFileTypeMap();\n            return mimeTypesMap.getContentType(inputFile);\n        }\n    }\n}\n","lang_cluster":"Java","diff_tag":0,"review_comment":"","id":233}
{"diff_hunk":"@@ -119,6 +119,8 @@ public class V2Request extends SolrRequest implements MapWriter {\n     private boolean useBinary = false;\n \n     private boolean forceV2EndPoint = false;\n+    private ResponseParser parser;\n+    private String mimeType;\n \n     \/**\n      * Create a Builder object based on the provided resource.","source_code":"\/*\n * Licensed to the Apache Software Foundation (ASF) under one or more\n * contributor license agreements.  See the NOTICE file distributed with\n * this work for additional information regarding copyright ownership.\n * The ASF licenses this file to You under the Apache License, Version 2.0\n * (the \"License\"); you may not use this file except in compliance with\n * the License.  You may obtain a copy of the License at\n *\n *     http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage org.apache.solr.client.solrj.request;\n\nimport java.io.IOException;\nimport java.io.OutputStream;\nimport java.util.concurrent.atomic.AtomicLong;\nimport java.util.regex.Matcher;\nimport java.util.regex.Pattern;\n\nimport org.apache.solr.client.solrj.SolrClient;\nimport org.apache.solr.client.solrj.SolrRequest;\nimport org.apache.solr.client.solrj.response.V2Response;\nimport org.apache.solr.common.MapWriter;\nimport org.apache.solr.common.params.SolrParams;\nimport org.apache.solr.common.util.JavaBinCodec;\nimport org.apache.solr.common.util.Utils;\n\nimport static org.apache.solr.common.params.CommonParams.JAVABIN_MIME;\nimport static org.apache.solr.common.params.CommonParams.JSON_MIME;\n\npublic class V2Request extends SolrRequest implements MapWriter {\n  \/\/only for debugging purposes\n  public static final ThreadLocal v2Calls = new ThreadLocal<>();\n  static final Pattern COLL_REQ_PATTERN = Pattern.compile(\"\/(c|collections)\/([^\/])+\/(?!shards)\");\n  private Object payload;\n  private SolrParams solrParams;\n  public final boolean useBinary;\n  private String collection;\n  private boolean forceV2 = false;\n  private boolean isPerCollectionRequest = false;\n\n  private V2Request(METHOD m, String resource, boolean useBinary) {\n    super(m, resource);\n    Matcher matcher = COLL_REQ_PATTERN.matcher(getPath());\n    if (matcher.find()) {\n      this.collection = matcher.group(2);\n      isPerCollectionRequest = true;\n    }\n    this.useBinary = useBinary;\n\n  }\n\n  public boolean isForceV2(){\n    return forceV2;\n  }\n\n  @Override\n  public SolrParams getParams() {\n    return solrParams;\n  }\n\n  @Override\n  public RequestWriter.ContentWriter getContentWriter(String s) {\n    if (v2Calls.get() != null) v2Calls.get().incrementAndGet();\n    if (payload == null) return null;\n    if (payload instanceof String) {\n      return new RequestWriter.StringPayloadContentWriter((String) payload, JSON_MIME);\n    }\n    return new RequestWriter.ContentWriter() {\n      @Override\n      public void write(OutputStream os) throws IOException {\n        if (useBinary) {\n          new JavaBinCodec().marshal(payload, os);\n        } else {\n          Utils.writeJson(payload, os, false);\n        }\n      }\n\n      @Override\n      public String getContentType() {\n        return useBinary ? JAVABIN_MIME : JSON_MIME;\n      }\n    };\n  }\n\n  public boolean isPerCollectionRequest() {\n    return isPerCollectionRequest;\n  }\n\n  @Override\n  public String getCollection() {\n    return collection;\n  }\n\n  @Override\n  protected V2Response createResponse(SolrClient client) {\n    return new V2Response();\n  }\n\n  @Override\n  public void writeMap(EntryWriter ew) throws IOException {\n    ew.put(\"method\", getMethod().toString());\n    ew.put(\"path\", getPath());\n    ew.putIfNotNull(\"params\", solrParams);\n    ew.putIfNotNull(\"command\", payload);\n  }\n\n  public static class Builder {\n    private String resource;\n    private METHOD method = METHOD.GET;\n    private Object payload;\n    private SolrParams params;\n    private boolean useBinary = false;\n\n    private boolean forceV2EndPoint = false;\n\n    \/**\n     * Create a Builder object based on the provided resource.\n     * The default method is GET.\n     *\n     * @param resource resource of the request for example \"\/collections\" or \"\/cores\/core-name\"\n     *\/\n    public Builder(String resource) {\n      if (!resource.startsWith(\"\/\")) resource = \"\/\" + resource;\n      this.resource = resource;\n    }\n\n    public Builder withMethod(METHOD m) {\n      this.method = m;\n      return this;\n    }\n\n    \/**\n     * Only for testing. It's always true otherwise\n     *\/\n    public Builder forceV2(boolean flag) {\n      forceV2EndPoint = flag;\n      return this;\n    }\n\n    \/**\n     * Set payload for request.\n     *\n     * @param payload as UTF-8 String\n     * @return builder object\n     *\/\n    public Builder withPayload(String payload) {\n      if (payload != null) {\n        this.payload = payload;\n      }\n      return this;\n    }\n\n    public Builder withPayload(Object payload) {\n      this.payload = payload;\n      return this;\n    }\n\n\n    public Builder withParams(SolrParams params) {\n      this.params = params;\n      return this;\n    }\n\n    public Builder useBinary(boolean flag) {\n      this.useBinary = flag;\n      return this;\n    }\n\n    public V2Request build() {\n      V2Request v2Request = new V2Request(method, resource, useBinary);\n      v2Request.solrParams = params;\n      v2Request.payload = payload;\n      v2Request.forceV2 = forceV2EndPoint;\n      return v2Request;\n    }\n  }\n}\n","lang_cluster":"Java","diff_tag":0,"review_comment":"","id":234}
{"diff_hunk":"@@ -27,7 +27,10 @@\n package com.salesforce.androidsdk.rest;\n \n import java.io.IOException;\n+import java.util.HashMap;\n+import java.util.Map;\n \n+import org.apache.http.Header;\n import org.apache.http.HttpEntity;\n import org.apache.http.HttpResponse;\n import org.apache.http.HttpStatus;","source_code":"\/*\n * Copyright (c) 2012, salesforce.com, inc.\n * All rights reserved.\n * Redistribution and use of this software in source and binary forms, with or\n * without modification, are permitted provided that the following conditions\n * are met:\n * - Redistributions of source code must retain the above copyright notice, this\n * list of conditions and the following disclaimer.\n * - Redistributions in binary form must reproduce the above copyright notice,\n * this list of conditions and the following disclaimer in the documentation\n * and\/or other materials provided with the distribution.\n * - Neither the name of salesforce.com, inc. nor the names of its contributors\n * may be used to endorse or promote products derived from this software without\n * specific prior written permission of salesforce.com, inc.\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n * POSSIBILITY OF SUCH DAMAGE.\n *\/\npackage com.salesforce.androidsdk.rest;\n\nimport java.io.IOException;\n\nimport org.apache.http.HttpEntity;\nimport org.apache.http.HttpResponse;\nimport org.apache.http.HttpStatus;\nimport org.apache.http.ParseException;\nimport org.apache.http.protocol.HTTP;\nimport org.apache.http.util.EntityUtils;\nimport org.json.JSONArray;\nimport org.json.JSONException;\nimport org.json.JSONObject;\n\nimport com.android.volley.NetworkResponse;\n\n\n\/**\n * RestResponse: Class to represent any REST response.\n * \n *\/\npublic class RestResponse {\n\t\n\tprivate final int statusCode;\n\tprivate final HttpResponse response;\n\n\t\/\/ Populated when \"consume\" is called\n\tprivate byte[] responseAsBytes;\n\tprivate String responseCharSet;\n\t\n\t\/\/ Lazily computed\n\tprivate String responseAsString;\n\tprivate JSONObject responseAsJSONObject;\n\tprivate JSONArray responseAsJSONArray;\n\n\t\/**\n\t * Constructor\n\t * @param response\n\t *\/\n\tpublic RestResponse(HttpResponse response) {\n\t\tthis.response = response;\n\t\tthis.statusCode = response.getStatusLine().getStatusCode();\n\t}\n\t\n\t\/**\n\t * Constructor\n\t * @param response\n\t *\/\n\tpublic RestResponse(NetworkResponse response) {\n\t\tthis.response = null;\n\t\tthis.statusCode = response.statusCode;\n\t\tthis.responseAsBytes = response.data;\n\t\t\t\t\n\t}\n\t\n\t\/**\n\t * @return HTTP status code of the response\n\t *\/\n\tpublic int getStatusCode() {\n\t\treturn statusCode; \n\t}\n\t\n\t\/**\n\t * @return the base HTTP response of the response. The can be useful for response that are not JSON, such as binary payloads.\n\t *\/\n\t\/**\n\t * @return true for response with 2xx status codes\n\t *\/\n\tpublic boolean isSuccess() {\n\t\t\/\/ 2xx success\n\t\treturn (statusCode >= HttpStatus.SC_OK && statusCode < HttpStatus.SC_MULTIPLE_CHOICES);\t\n\t}\n\t\n\t\/**\n\t * Fully consume response entity content and closes content stream\n\t * Must be called before returning control to the UI thread\n\t * @throws IOException \n\t *\/\n\tpublic void consume() throws IOException {\n\t\tif (responseAsBytes != null) {\n\t\t\t\/\/ already consumed\n\t\t\treturn;\t\t\t\n\t\t}\n\t\t\n\t\tHttpEntity entity = response.getEntity();\n\t\tif (entity != null) {\n\t\t\tresponseCharSet = EntityUtils.getContentCharSet(entity);\t\t\n\t\t\tresponseAsBytes = EntityUtils.toByteArray(entity);\n\t\t}\n\t\telse {\n\t\t\tresponseAsBytes = new byte[0];\n\t\t}\n\t}\n\n\t\/**\n\t * @return byte[] for entire response\n\t * @throws IOException\n\t *\/\n\tpublic byte[] asBytes() throws IOException {\n\t\tif (responseAsBytes == null) {\n\t\t\tconsume();\n\t\t}\n\t\treturn responseAsBytes;\n\t}\t\n\t\n\t\/**\n\t * String is built the first time the method is called.\n\t * \n\t * @return string for entire response\n\t * @throws ParseException\n\t * @throws IOException\n\t *\/\n\tpublic String asString() throws ParseException, IOException {\n\t\tif (responseAsString == null) {\n\t\t\tresponseAsString = new String(asBytes(), (responseCharSet == null ? HTTP.UTF_8 : responseCharSet));\n\t\t}\n\t\treturn responseAsString;\n\t}\n\t\n\t\/**\n\t * JSONObject is built the first time the method is called.\n\t * \n\t * @return JSONObject for response\n\t * @throws ParseException\n\t * @throws JSONException\n\t * @throws IOException\n\t *\/\n\tpublic JSONObject asJSONObject() throws ParseException, JSONException, IOException {\n\t\tif (responseAsJSONObject == null) {\n\t\t\tresponseAsJSONObject = new JSONObject(asString());\n\t\t}\n\t\treturn responseAsJSONObject;\n\t}\n\n\t\/**\n\t * JSONArray is built the first time the method is called.\n\t * \n\t * @return JSONObject for response\n\t * @throws ParseException\n\t * @throws JSONException\n\t * @throws IOException\n\t *\/\n\tpublic JSONArray asJSONArray() throws ParseException, JSONException, IOException {\n\t\tif (responseAsJSONArray == null) {\n\t\t\tresponseAsJSONArray = new JSONArray(asString());\n\t\t}\n\t\treturn responseAsJSONArray;\n\t}\n\t\n\t\n\t@Override\n\tpublic String toString() {\n\t\ttry {\n\t\t\treturn asString();\n\t\t} catch (Exception e) {\n\t\t\treturn response.toString();\n\t\t}\n\t}\n}\n","lang_cluster":"Java","diff_tag":0,"review_comment":"","id":235}
{"diff_hunk":"@@ -83,7 +83,11 @@ public class MetricsRestServiceImpl extends AbstractRestProcessEngineAware imple\n       metrics = query.interval();\n     }\n \n-    return convertToDtos(metrics);\n+    final List dtoList = convertToDtos(metrics);\n+    if (name != null) {\n+      dtoList.forEach(dto -> dto.setName(name));\n+    }\n+    return dtoList;\n   }\n \n   @Override","source_code":"\/*\n * Copyright Camunda Services GmbH and\/or licensed to Camunda Services GmbH\n * under one or more contributor license agreements. See the NOTICE file\n * distributed with this work for additional information regarding copyright\n * ownership. Camunda licenses this file to you under the Apache License,\n * Version 2.0; you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *     http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\npackage org.camunda.bpm.engine.rest.impl;\n\nimport org.camunda.bpm.engine.rest.MetricsRestService;\nimport org.camunda.bpm.engine.rest.sub.metrics.MetricsResource;\nimport org.camunda.bpm.engine.rest.sub.metrics.MetricsResourceImpl;\n\nimport com.fasterxml.jackson.databind.ObjectMapper;\nimport java.util.ArrayList;\nimport java.util.Date;\nimport java.util.List;\nimport javax.ws.rs.core.MultivaluedMap;\nimport javax.ws.rs.core.Response;\nimport javax.ws.rs.core.UriInfo;\nimport org.camunda.bpm.engine.management.MetricsQuery;\nimport org.camunda.bpm.engine.rest.dto.metrics.MetricsIntervalResultDto;\nimport org.camunda.bpm.engine.management.MetricIntervalValue;\nimport org.camunda.bpm.engine.rest.dto.converter.DateConverter;\nimport org.camunda.bpm.engine.rest.dto.converter.IntegerConverter;\nimport org.camunda.bpm.engine.rest.dto.converter.LongConverter;\n\n\/**\n * @author Daniel Meyer\n *\n *\/\npublic class MetricsRestServiceImpl extends AbstractRestProcessEngineAware implements MetricsRestService {\n\n  public static final String QUERY_PARAM_NAME = \"name\";\n  public static final String QUERY_PARAM_REPORTER = \"reporter\";\n  public static final String QUERY_PARAM_START_DATE = \"startDate\";\n  public static final String QUERY_PARAM_END_DATE = \"endDate\";\n  public static final String QUERY_PARAM_FIRST_RESULT = \"firstResult\";\n  public static final String QUERY_PARAM_MAX_RESULTS = \"maxResults\";\n  public static final String QUERY_PARAM_INTERVAL = \"interval\";\n  public static final String QUERY_PARAM_DATE = \"date\";\n  public static final String QUERY_PARAM_AGG_BY_REPORTER = \"aggregateByReporter\";\n\n  protected final DateConverter dateConverter;\n\n  public MetricsRestServiceImpl(String engineName, ObjectMapper objectMapper) {\n    super(engineName, objectMapper);\n    dateConverter = new DateConverter();\n    dateConverter.setObjectMapper(objectMapper);\n  }\n\n  @Override\n  public MetricsResource getMetrics(String name) {\n    return new MetricsResourceImpl(name, processEngine, objectMapper);\n  }\n\n  @Override\n  public List interval(UriInfo uriInfo) {\n    MultivaluedMap queryParameters = uriInfo.getQueryParameters();\n    MetricsQuery query = processEngine.getManagementService()\n      .createMetricsQuery()\n      .name(queryParameters.getFirst(QUERY_PARAM_NAME))\n      .reporter(queryParameters.getFirst(QUERY_PARAM_REPORTER));\n\n    applyQueryParams(query, queryParameters);\n\n    List metrics;\n    LongConverter longConverter = new LongConverter();\n    longConverter.setObjectMapper(objectMapper);\n    if (queryParameters.getFirst(QUERY_PARAM_INTERVAL) != null) {\n      long interval = longConverter.convertQueryParameterToType(queryParameters.getFirst(QUERY_PARAM_INTERVAL));\n      metrics = query.interval(interval);\n    } else {\n      metrics = query.interval();\n    }\n\n    return convertToDtos(metrics);\n  }\n\n  @Override\n  public Response deleteTaskMetrics(String dateString) {\n    Date date = dateConverter.convertQueryParameterToType(dateString);\n    processEngine.getManagementService().deleteTaskMetrics(date);\n\n    \/\/ return no content (204) since resource is deleted\n    return Response.noContent().build();\n  }\n\n  protected void applyQueryParams(MetricsQuery query, MultivaluedMap queryParameters) {\n    if(queryParameters.getFirst(QUERY_PARAM_START_DATE) != null) {\n      Date startDate = dateConverter.convertQueryParameterToType(queryParameters.getFirst(QUERY_PARAM_START_DATE));\n      query.startDate(startDate);\n    }\n\n    if(queryParameters.getFirst(QUERY_PARAM_END_DATE) != null) {\n      Date endDate = dateConverter.convertQueryParameterToType(queryParameters.getFirst(QUERY_PARAM_END_DATE));\n      query.endDate(endDate);\n    }\n\n    IntegerConverter intConverter = new IntegerConverter();\n    intConverter.setObjectMapper(objectMapper);\n\n    if (queryParameters.getFirst(QUERY_PARAM_FIRST_RESULT) != null) {\n      int firstResult = intConverter.convertQueryParameterToType(queryParameters.getFirst(QUERY_PARAM_FIRST_RESULT));\n      query.offset(firstResult);\n    }\n\n    if (queryParameters.getFirst(QUERY_PARAM_MAX_RESULTS) != null) {\n      int maxResults = intConverter.convertQueryParameterToType(queryParameters.getFirst(QUERY_PARAM_MAX_RESULTS));\n      query.limit(maxResults);\n    }\n\n    if(queryParameters.getFirst(QUERY_PARAM_AGG_BY_REPORTER) != null) {\n      query.aggregateByReporter();\n    }\n  }\n\n  protected List convertToDtos(List metrics) {\n    List intervalMetrics = new ArrayList<>();\n    for (MetricIntervalValue m : metrics) {\n      intervalMetrics.add(new MetricsIntervalResultDto(m));\n    }\n    return intervalMetrics;\n  }\n}\n","lang_cluster":"Java","diff_tag":0,"review_comment":"","id":236}
{"diff_hunk":"@@ -52,6 +52,9 @@ public class JobTypePluginSet {\n     this.pluginJobPropsProcessor = new HashMap<>();\n     this.jobToClassName = new HashMap<>();\n     this.jobToClassLoaderURLs = new HashMap<>();\n+    this.jobToDefaultProxyUser = new HashMap<>();\n+    this.defaultProxyUsersJobTypeClasses = new HashSet<>();\n+    this.defaultProxyUsersFilter = new HashSet<>();\n   }\n \n   \/**","source_code":"\/*\n * Copyright 2014 LinkedIn Corp.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n * use this file except in compliance with the License. You may obtain a copy of\n * the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n * License for the specific language governing permissions and limitations under\n * the License.\n *\/\npackage azkaban.jobtype;\n\nimport azkaban.utils.Props;\n\nimport java.net.URL;\nimport java.util.HashMap;\nimport java.util.Map;\n\n\/**\n * Container for job type plugins\n *\n * This contains the jobClass objects, the properties for loading plugins, and the properties given\n * by default to the plugin.\n *\n * This class is not thread safe, so adding to this class should only be populated and controlled by\n * the JobTypeManager\n *\/\npublic class JobTypePluginSet {\n  private static final URL[] EMPTY_URLS = new URL[0];\n  private final Map pluginJobPropsMap;\n  private final Map pluginLoadPropsMap;\n  private final Map pluginPrivatePropsMap;\n  private final Map pluginJobPropsProcessor;\n  private final Map jobToClassName;\n  private final Map jobToClassLoaderURLs;\n\n  private Props commonJobProps;\n  private Props commonLoadProps;\n\n  \/**\n   * Base constructor\n   *\/\n  public JobTypePluginSet() {\n    this.pluginJobPropsMap = new HashMap<>();\n    this.pluginLoadPropsMap = new HashMap<>();\n    this.pluginPrivatePropsMap = new HashMap<>();\n    this.pluginJobPropsProcessor = new HashMap<>();\n    this.jobToClassName = new HashMap<>();\n    this.jobToClassLoaderURLs = new HashMap<>();\n  }\n\n  \/**\n   * Copy constructor\n   *\/\n  public JobTypePluginSet(final JobTypePluginSet clone) {\n    this.pluginJobPropsMap = new HashMap<>(clone.pluginJobPropsMap);\n    this.pluginLoadPropsMap = new HashMap<>(clone.pluginLoadPropsMap);\n    this.pluginPrivatePropsMap = new HashMap<>(clone.pluginPrivatePropsMap);\n    this.commonJobProps = clone.commonJobProps;\n    this.commonLoadProps = clone.commonLoadProps;\n    this.pluginJobPropsProcessor = clone.pluginJobPropsProcessor;\n    this.jobToClassName = clone.jobToClassName;\n    this.jobToClassLoaderURLs = clone.jobToClassLoaderURLs;\n  }\n\n  \/**\n   * Gets common properties for every jobtype\n   *\/\n  public Props getCommonPluginJobProps() {\n    return this.commonJobProps;\n  }\n\n  \/**\n   * Sets the common properties shared in every jobtype\n   *\/\n  public void setCommonPluginJobProps(final Props commonJobProps) {\n    this.commonJobProps = commonJobProps;\n  }\n\n  \/**\n   * Gets the common properties used to load a plugin\n   *\/\n  public Props getCommonPluginLoadProps() {\n    return this.commonLoadProps;\n  }\n\n  \/**\n   * Sets the common properties used to load every plugin\n   *\/\n  public void setCommonPluginLoadProps(final Props commonLoadProps) {\n    this.commonLoadProps = commonLoadProps;\n  }\n\n  \/**\n   * Get the properties for a jobtype used to setup and load a plugin\n   *\/\n  public Props getPluginLoaderProps(final String jobTypeName) {\n    return this.pluginLoadPropsMap.get(jobTypeName);\n  }\n\n  \/**\n   * Get the plugin private properties for the jobtype\n   *\/\n  public Props getPluginPrivateProps(final String jobTypeName) {\n    return this.pluginPrivatePropsMap.get(jobTypeName);\n  }\n  \/**\n   * Get the properties that will be given to the plugin as default job properties.\n   *\/\n  public Props getPluginJobProps(final String jobTypeName) {\n    return this.pluginJobPropsMap.get(jobTypeName);\n  }\n\n  public void addPluginClassName(final String jobTypeName, final String jobTypeClassName) {\n    this.jobToClassName.put(jobTypeName, jobTypeClassName);\n  }\n\n  \/**\n   * Gets the plugin job class name\n   *\/\n  public String getPluginClassName(final String jobTypeName) {\n    return this.jobToClassName.get(jobTypeName);\n  }\n\n  \/**\n   * Get the resource URLs that should be added to its associated job ClassLoader.\n   *\/\n  public URL[] getPluginClassLoaderURLs(final String jobTypeName) {\n    return this.jobToClassLoaderURLs.getOrDefault(jobTypeName, EMPTY_URLS);\n  }\n\n  \/**\n   * Adds plugin job properties used as default runtime properties\n   *\/\n  public void addPluginJobProps(final String jobTypeName, final Props props) {\n    this.pluginJobPropsMap.put(jobTypeName, props);\n  }\n\n  \/**\n   * Add resource URLs that should be made available to ClassLoader of all jobs of the given jobtype.\n   *\/\n  public void addPluginClassLoaderURLs(final String jobTypeName, final URL[] urls) {\n    this.jobToClassLoaderURLs.put(jobTypeName, urls);\n  }\n\n  \/**\n   * Adds plugin load properties used to load the plugin\n   *\/\n  public void addPluginLoadProps(final String jobTypeName, final Props props) {\n    this.pluginLoadPropsMap.put(jobTypeName, props);\n  }\n\n  \/**\n   * Adds plugins private properties used by the plugin\n   *\/\n  public void addPluginPrivateProps(final String jobTypeName, final Props props) {\n    this.pluginPrivatePropsMap.put(jobTypeName, props);\n  }\n\n  public JobPropsProcessor getPluginJobPropsProcessor(\n      final String jobTypeName) {\n    return this.pluginJobPropsProcessor.get(jobTypeName);\n  }\n\n  public void addPluginJobPropsProcessor(final String jobTypeName,\n      JobPropsProcessor jobPropsProcessor) {\n    this.pluginJobPropsProcessor.put(jobTypeName, jobPropsProcessor);\n  }\n}\n","lang_cluster":"Java","diff_tag":0,"review_comment":"","id":237}
{"diff_hunk":"@@ -56,28 +56,6 @@ public class DefaultString {\n     return comment;\n   }\n \n-  private static final ImmutableMap SAMPLE_STRINGS =\n-      ImmutableMap.builder()\n-          .put(\n-              SampleKey.create(\"compute\", \"zone\", \"[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?\"),\n-              \"us-central1-f\")\n-          .put(\n-              SampleKey.create(\"autoscaler\", \"zone\", \"[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?\"),\n-              \"us-central1-f\")\n-          .put(\n-              SampleKey.create(\"clouduseraccounts\", \"zone\", \"[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?\"),\n-              \"us-central1-f\")\n-          .build();\n-\n-  public static String getSample(String apiName, String fieldName, String pattern) {\n-    String sample = null;\n-    if (pattern != null) {\n-      \/\/ If the pattern has a specially-recognized sample, use the sample.\n-      sample = SAMPLE_STRINGS.get(SampleKey.create(apiName, fieldName, pattern));\n-    }\n-    return sample == null ? \"\" : sample;\n-  }\n-\n   \/**\n    * Returns a non-trivial placeholder for pattern with a no-brace and lower-case format style. An\n    * empty string is returned for unrecognized patterns.","source_code":"\/* Copyright 2016 Google Inc\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\npackage com.google.api.codegen.discovery;\n\nimport com.google.api.codegen.Inflector;\nimport com.google.api.codegen.LanguageUtil;\nimport com.google.auto.value.AutoValue;\nimport com.google.common.annotations.VisibleForTesting;\nimport com.google.common.collect.ImmutableList;\nimport com.google.common.collect.ImmutableMap;\nimport java.util.ArrayList;\nimport java.util.List;\nimport javax.annotation.Nullable;\n\n\/** Creates default string from path patterns. *\/\npublic class DefaultString {\n\n  @AutoValue\n  abstract static class SampleKey {\n    abstract String getApiName();\n\n    abstract String getFieldName();\n\n    abstract String getRegexp();\n\n    static SampleKey create(String apiName, String fieldName, String regexp) {\n      return new AutoValue_DefaultString_SampleKey(apiName, fieldName, regexp);\n    }\n  }\n\n  private final String define;\n  private final String comment;\n\n  public DefaultString(String define, String comment) {\n    this.define = define;\n    this.comment = comment;\n  }\n\n  public String getDefine() {\n    return define;\n  }\n\n  public String getComment() {\n    return comment;\n  }\n\n  private static final ImmutableMap SAMPLE_STRINGS =\n      ImmutableMap.builder()\n          .put(\n              SampleKey.create(\"compute\", \"zone\", \"[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?\"),\n              \"us-central1-f\")\n          .put(\n              SampleKey.create(\"autoscaler\", \"zone\", \"[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?\"),\n              \"us-central1-f\")\n          .put(\n              SampleKey.create(\"clouduseraccounts\", \"zone\", \"[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?\"),\n              \"us-central1-f\")\n          .build();\n\n  public static String getSample(String apiName, String fieldName, String pattern) {\n    String sample = null;\n    if (pattern != null) {\n      \/\/ If the pattern has a specially-recognized sample, use the sample.\n      sample = SAMPLE_STRINGS.get(SampleKey.create(apiName, fieldName, pattern));\n    }\n    return sample == null ? \"\" : sample;\n  }\n\n  \/**\n   * Returns a non-trivial placeholder for pattern with a no-brace and lower-case format style. An\n   * empty string is returned for unrecognized patterns.\n   *\n   * 

Variables are formatted according to format (ex: \"my-%s\").\n *\n *

For example: \"projects\/my-project\/logs\/my-log\" or \"my-project\"\n *\/\n public static String getNonTrivialPlaceholder(String pattern, String format) {\n if (pattern != null) {\n String def = forPattern(pattern, format);\n if (def != null) {\n return def;\n }\n }\n return \"\";\n }\n\n private static final String WILDCARD_PATTERN = \"[^\/]*\";\n\n \/** Returns a default string from `pattern`, or null if the pattern is not supported. *\/\n @VisibleForTesting\n static String forPattern(String pattern, String placeholderFormat) {\n \/\/ We only care about patterns that have alternating literal and wildcard like\n \/\/ ^foo\/[^\/]*\/bar\/[^\/]*$\n \/\/ Ignore if what we get looks nothing like this.\n if (pattern == null || !pattern.startsWith(\"^\") || !pattern.endsWith(\"$\")) {\n return null;\n }\n pattern = pattern.substring(1, pattern.length() - 1);\n ImmutableList elems = parse(pattern);\n if (!validElems(elems)) {\n return null;\n }\n\n StringBuilder ret = new StringBuilder();\n for (int i = 0; i < elems.size(); i += 2) {\n String literal = elems.get(i).getLiteral();\n String placeholder = Inflector.singularize(literal);\n placeholder = LanguageUtil.lowerCamelToLowerUnderscore(placeholder).replace('_', '-');\n ret.append('\/')\n .append(literal)\n .append(\"\/\")\n .append(String.format(placeholderFormat, placeholder));\n }\n return ret.substring(1);\n }\n\n \/**\n * Parses pattern, with the leading '^' and trailing '$' removed, into a list representing the\n * pattern.\n *\/\n private static ImmutableList parse(String pattern) {\n List elems = new ArrayList<>();\n while (pattern.length() > 0) {\n int slash;\n if (pattern.startsWith(WILDCARD_PATTERN)) {\n elems.add(Elem.WILDCARD);\n pattern = pattern.substring(WILDCARD_PATTERN.length());\n } else if ((slash = pattern.indexOf(\"\/\")) >= 0) {\n elems.add(Elem.createLiteral(pattern.substring(0, slash)));\n pattern = pattern.substring(slash);\n } else {\n elems.add(Elem.createLiteral(pattern));\n pattern = \"\";\n }\n\n if (pattern.startsWith(\"\/\")) {\n pattern = pattern.substring(1);\n }\n }\n return ImmutableList.copyOf(elems);\n }\n\n \/**\n * Returns whether the pattern represented by the list is in a form we expect.\n *\n *

A valid pattern must have the same number of literals and wildcards, alternating, and starts\n * with a literal. Literals must consists of only letters.\n *\/\n private static boolean validElems(ImmutableList elems) {\n if (elems.size() % 2 != 0) {\n return false;\n }\n ImmutableList expect =\n ImmutableList.of(ElemType.LITERAL, ElemType.WILDCARD);\n for (int i = 0; i < elems.size(); i++) {\n if (elems.get(i).getType() != expect.get(i % expect.size())) {\n return false;\n }\n }\n for (int i = 0; i < elems.size(); i += 2) {\n for (char c : elems.get(i).getLiteral().toCharArray()) {\n if (!Character.isLetter(c)) {\n return false;\n }\n }\n }\n return true;\n }\n\n enum ElemType {\n LITERAL,\n WILDCARD\n }\n\n @AutoValue\n abstract static class Elem {\n abstract ElemType getType();\n\n @Nullable\n abstract String getLiteral();\n\n private static final Elem WILDCARD = new AutoValue_DefaultString_Elem(ElemType.WILDCARD, null);\n\n private static Elem createLiteral(String lit) {\n return new AutoValue_DefaultString_Elem(ElemType.LITERAL, lit);\n }\n }\n}\n","lang_cluster":"Java","diff_tag":0,"review_comment":"","id":238} {"diff_hunk":"@@ -109,11 +109,7 @@ public class BesuEventsImpl implements BesuEvents {\n .collect(toUnmodifiableList());\n final List> besuTopics =\n topics.stream()\n- .map(\n- subList ->\n- subList.stream()\n- .map(bytes -> LogTopic.wrap(bytes))\n- .collect(toUnmodifiableList()))\n+ .map(subList -> subList.stream().map(LogTopic::wrap).collect(toUnmodifiableList()))\n .collect(toUnmodifiableList());\n \n final LogsQuery logsQuery = new LogsQuery(besuAddresses, besuTopics);","source_code":"\/*\n * Copyright ConsenSys AG.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n * the License. You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n * an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n * specific language governing permissions and limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n *\/\npackage org.hyperledger.besu.services;\n\nimport static java.util.stream.Collectors.toUnmodifiableList;\n\nimport org.hyperledger.besu.ethereum.api.query.LogsQuery;\nimport org.hyperledger.besu.ethereum.chain.Blockchain;\nimport org.hyperledger.besu.ethereum.core.BlockBody;\nimport org.hyperledger.besu.ethereum.core.Difficulty;\nimport org.hyperledger.besu.ethereum.core.LogTopic;\nimport org.hyperledger.besu.ethereum.core.LogWithMetadata;\nimport org.hyperledger.besu.ethereum.eth.sync.BlockBroadcaster;\nimport org.hyperledger.besu.ethereum.eth.sync.state.SyncState;\nimport org.hyperledger.besu.ethereum.eth.transactions.TransactionPool;\nimport org.hyperledger.besu.plugin.data.Address;\nimport org.hyperledger.besu.plugin.data.BlockHeader;\nimport org.hyperledger.besu.plugin.data.PropagatedBlockContext;\nimport org.hyperledger.besu.plugin.services.BesuEvents;\n\nimport java.util.List;\nimport java.util.function.Supplier;\n\nimport org.apache.tuweni.bytes.Bytes32;\nimport org.apache.tuweni.units.bigints.UInt256;\n\npublic class BesuEventsImpl implements BesuEvents {\n private final Blockchain blockchain;\n private final BlockBroadcaster blockBroadcaster;\n private final TransactionPool transactionPool;\n private final SyncState syncState;\n\n public BesuEventsImpl(\n final Blockchain blockchain,\n final BlockBroadcaster blockBroadcaster,\n final TransactionPool transactionPool,\n final SyncState syncState) {\n this.blockchain = blockchain;\n this.blockBroadcaster = blockBroadcaster;\n this.transactionPool = transactionPool;\n this.syncState = syncState;\n }\n\n @Override\n public long addBlockPropagatedListener(final BlockPropagatedListener listener) {\n return blockBroadcaster.subscribePropagateNewBlocks(\n (block, totalDifficulty) ->\n listener.onBlockPropagated(\n blockPropagatedContext(block::getHeader, block::getBody, () -> totalDifficulty)));\n }\n\n @Override\n public void removeBlockPropagatedListener(final long listenerIdentifier) {\n blockBroadcaster.unsubscribePropagateNewBlocks(listenerIdentifier);\n }\n\n @Override\n public long addTransactionAddedListener(final TransactionAddedListener listener) {\n return transactionPool.subscribePendingTransactions(listener::onTransactionAdded);\n }\n\n @Override\n public void removeTransactionAddedListener(final long listenerIdentifier) {\n transactionPool.unsubscribePendingTransactions(listenerIdentifier);\n }\n\n @Override\n public long addTransactionDroppedListener(\n final TransactionDroppedListener transactionDroppedListener) {\n return transactionPool.subscribeDroppedTransactions(\n transactionDroppedListener::onTransactionDropped);\n }\n\n @Override\n public void removeTransactionDroppedListener(final long listenerIdentifier) {\n transactionPool.unsubscribeDroppedTransactions(listenerIdentifier);\n }\n\n @Override\n public long addSyncStatusListener(final SyncStatusListener syncStatusListener) {\n return syncState.subscribeSyncStatus(syncStatusListener);\n }\n\n @Override\n public void removeSyncStatusListener(final long listenerIdentifier) {\n syncState.unsubscribeSyncStatus(listenerIdentifier);\n }\n\n @Override\n public long addLogListener(\n final List

addresses,\n final List> topics,\n final LogListener logListener) {\n final List besuAddresses =\n addresses.stream()\n .map(org.hyperledger.besu.ethereum.core.Address::fromPlugin)\n .collect(toUnmodifiableList());\n final List> besuTopics =\n topics.stream()\n .map(\n subList ->\n subList.stream()\n .map(bytes -> LogTopic.wrap(bytes))\n .collect(toUnmodifiableList()))\n .collect(toUnmodifiableList());\n\n final LogsQuery logsQuery = new LogsQuery(besuAddresses, besuTopics);\n\n return blockchain.observeLogs(\n logWithMetadata -> {\n if (logsQuery.matches(LogWithMetadata.fromPlugin(logWithMetadata))) {\n logListener.onLogEmitted(logWithMetadata);\n }\n });\n }\n\n @Override\n public void removeLogListener(final long listenerIdentifier) {\n blockchain.removeObserver(listenerIdentifier);\n }\n\n private static PropagatedBlockContext blockPropagatedContext(\n final Supplier blockHeaderSupplier,\n final Supplier blockBodySupplier,\n final Supplier totalDifficultySupplier) {\n return new PropagatedBlockContext() {\n @Override\n public BlockHeader getBlockHeader() {\n return blockHeaderSupplier.get();\n }\n\n @Override\n public BlockBody getBlockBody() {\n return blockBodySupplier.get();\n }\n\n @Override\n public UInt256 getTotalDifficulty() {\n return totalDifficultySupplier.get().toUInt256();\n }\n };\n }\n}\n","lang_cluster":"Java","diff_tag":0,"review_comment":"","id":239} {"diff_hunk":"@@ -41,11 +41,13 @@ class ParquetReadSupport extends ReadSupport {\n private final Schema expectedSchema;\n private final ReadSupport wrapped;\n private final boolean callInit;\n+ private final NameMapping nameMapping;\n \n- ParquetReadSupport(Schema expectedSchema, ReadSupport readSupport, boolean callInit) {\n+ ParquetReadSupport(Schema expectedSchema, ReadSupport readSupport, boolean callInit, NameMapping nameMapping) {\n this.expectedSchema = expectedSchema;\n this.wrapped = readSupport;\n this.callInit = callInit;\n+ this.nameMapping = nameMapping;\n }\n \n @Override","source_code":"\/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements. See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership. The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License. You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing,\n * software distributed under the License is distributed on an\n * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n * KIND, either express or implied. See the License for the\n * specific language governing permissions and limitations\n * under the License.\n *\/\n\npackage org.apache.iceberg.parquet;\n\nimport com.google.common.collect.ImmutableMap;\nimport com.google.common.collect.Sets;\nimport java.util.Map;\nimport java.util.Set;\nimport org.apache.hadoop.conf.Configuration;\nimport org.apache.iceberg.Schema;\nimport org.apache.iceberg.avro.AvroSchemaUtil;\nimport org.apache.parquet.avro.AvroReadSupport;\nimport org.apache.parquet.hadoop.api.InitContext;\nimport org.apache.parquet.hadoop.api.ReadSupport;\nimport org.apache.parquet.io.api.RecordMaterializer;\nimport org.apache.parquet.schema.MessageType;\n\n\/**\n * Parquet {@link ReadSupport} that handles column projection based on {@link Schema} column IDs.\n *\n * @param Java type produced by this read support instance\n *\/\nclass ParquetReadSupport extends ReadSupport {\n private final Schema expectedSchema;\n private final ReadSupport wrapped;\n private final boolean callInit;\n\n ParquetReadSupport(Schema expectedSchema, ReadSupport readSupport, boolean callInit) {\n this.expectedSchema = expectedSchema;\n this.wrapped = readSupport;\n this.callInit = callInit;\n }\n\n @Override\n @SuppressWarnings(\"deprecation\")\n public ReadContext init(Configuration configuration, Map keyValueMetaData, MessageType fileSchema) {\n \/\/ Columns are selected from the Parquet file by taking the read context's message type and\n \/\/ matching to the file's columns by full path, so this must select columns by using the path\n \/\/ in the file's schema.\n\n MessageType projection = ParquetSchemaUtil.hasIds(fileSchema) ?\n ParquetSchemaUtil.pruneColumns(fileSchema, expectedSchema) :\n ParquetSchemaUtil.pruneColumnsFallback(fileSchema, expectedSchema);\n\n \/\/ override some known backward-compatibility options\n configuration.set(\"parquet.strict.typing\", \"false\");\n configuration.set(\"parquet.avro.add-list-element-records\", \"false\");\n configuration.set(\"parquet.avro.write-old-list-structure\", \"false\");\n\n \/\/ set Avro schemas in case the reader is Avro\n AvroReadSupport.setRequestedProjection(configuration,\n AvroSchemaUtil.convert(expectedSchema, projection.getName()));\n org.apache.avro.Schema avroReadSchema = AvroSchemaUtil.buildAvroProjection(\n AvroSchemaUtil.convert(ParquetSchemaUtil.convert(projection), projection.getName()),\n expectedSchema, ImmutableMap.of());\n AvroReadSupport.setAvroReadSchema(configuration, ParquetAvro.parquetAvroSchema(avroReadSchema));\n\n \/\/ let the context set up read support metadata, but always use the correct projection\n ReadContext context = null;\n if (callInit) {\n try {\n context = wrapped.init(configuration, keyValueMetaData, projection);\n } catch (UnsupportedOperationException e) {\n \/\/ try the InitContext version\n context = wrapped.init(new InitContext(\n configuration, makeMultimap(keyValueMetaData), projection));\n }\n }\n\n return new ReadContext(projection,\n context != null ? context.getReadSupportMetadata() : ImmutableMap.of());\n }\n\n @Override\n public RecordMaterializer prepareForRead(Configuration configuration,\n Map fileMetadata,\n MessageType fileMessageType,\n ReadContext readContext) {\n \/\/ This is the type created in init that was based on the file's schema. The schema that this\n \/\/ will pass to the wrapped ReadSupport needs to match the expected schema's names. Rather than\n \/\/ renaming the file's schema, convert the expected schema to Parquet. This relies on writing\n \/\/ files with the correct schema.\n \/\/ TODO: this breaks when columns are reordered.\n MessageType readSchema = ParquetSchemaUtil.convert(expectedSchema, fileMessageType.getName());\n return wrapped.prepareForRead(configuration, fileMetadata, readSchema, readContext);\n }\n\n private Map> makeMultimap(Map map) {\n ImmutableMap.Builder> builder = ImmutableMap.builder();\n for (Map.Entry entry : map.entrySet()) {\n builder.put(entry.getKey(), Sets.newHashSet(entry.getValue()));\n }\n return builder.build();\n }\n}\n","lang_cluster":"Java","diff_tag":0,"review_comment":"","id":240} {"diff_hunk":"@@ -19,6 +19,7 @@ import static org.hyperledger.besu.util.FutureUtils.exceptionallyCompose;\n import org.hyperledger.besu.ethereum.ProtocolContext;\n import org.hyperledger.besu.ethereum.core.BlockHeader;\n import org.hyperledger.besu.ethereum.eth.manager.EthContext;\n+import org.hyperledger.besu.ethereum.eth.manager.EthPeer;\n import org.hyperledger.besu.ethereum.eth.manager.task.WaitForPeersTask;\n import org.hyperledger.besu.ethereum.eth.sync.ChainDownloader;\n import org.hyperledger.besu.ethereum.eth.sync.SynchronizerConfiguration;","source_code":"\/*\n * Copyright 2019 ConsenSys AG.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n * the License. You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n * an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n * specific language governing permissions and limitations under the License.\n *\/\npackage org.hyperledger.besu.ethereum.eth.sync.fastsync;\n\nimport static java.util.concurrent.CompletableFuture.completedFuture;\nimport static org.hyperledger.besu.util.FutureUtils.completedExceptionally;\nimport static org.hyperledger.besu.util.FutureUtils.exceptionallyCompose;\n\nimport org.hyperledger.besu.ethereum.ProtocolContext;\nimport org.hyperledger.besu.ethereum.core.BlockHeader;\nimport org.hyperledger.besu.ethereum.eth.manager.EthContext;\nimport org.hyperledger.besu.ethereum.eth.manager.task.WaitForPeersTask;\nimport org.hyperledger.besu.ethereum.eth.sync.ChainDownloader;\nimport org.hyperledger.besu.ethereum.eth.sync.SynchronizerConfiguration;\nimport org.hyperledger.besu.ethereum.eth.sync.state.SyncState;\nimport org.hyperledger.besu.ethereum.mainnet.ProtocolSchedule;\nimport org.hyperledger.besu.metrics.BesuMetricCategory;\nimport org.hyperledger.besu.plugin.services.MetricsSystem;\nimport org.hyperledger.besu.plugin.services.metrics.Counter;\nimport org.hyperledger.besu.util.ExceptionUtils;\n\nimport java.time.Duration;\nimport java.util.concurrent.CompletableFuture;\nimport java.util.concurrent.TimeoutException;\nimport java.util.concurrent.atomic.AtomicLong;\n\nimport org.apache.logging.log4j.LogManager;\nimport org.apache.logging.log4j.Logger;\n\npublic class FastSyncActions {\n\n private static final Logger LOG = LogManager.getLogger();\n private final SynchronizerConfiguration syncConfig;\n private final ProtocolSchedule protocolSchedule;\n private final ProtocolContext protocolContext;\n private final EthContext ethContext;\n private final SyncState syncState;\n private final MetricsSystem metricsSystem;\n private final Counter pivotBlockSelectionCounter;\n private final AtomicLong pivotBlockGauge = new AtomicLong(0);\n\n public FastSyncActions(\n final SynchronizerConfiguration syncConfig,\n final ProtocolSchedule protocolSchedule,\n final ProtocolContext protocolContext,\n final EthContext ethContext,\n final SyncState syncState,\n final MetricsSystem metricsSystem) {\n this.syncConfig = syncConfig;\n this.protocolSchedule = protocolSchedule;\n this.protocolContext = protocolContext;\n this.ethContext = ethContext;\n this.syncState = syncState;\n this.metricsSystem = metricsSystem;\n\n pivotBlockSelectionCounter =\n metricsSystem.createCounter(\n BesuMetricCategory.SYNCHRONIZER,\n \"fast_sync_pivot_block_selected_count\",\n \"Number of times a fast sync pivot block has been selected\");\n metricsSystem.createLongGauge(\n BesuMetricCategory.SYNCHRONIZER,\n \"fast_sync_pivot_block_current\",\n \"The current fast sync pivot block\",\n pivotBlockGauge::get);\n }\n\n public CompletableFuture waitForSuitablePeers(final FastSyncState fastSyncState) {\n if (fastSyncState.hasPivotBlockHeader()) {\n return waitForAnyPeer().thenApply(ignore -> fastSyncState);\n }\n\n LOG.debug(\"Waiting for at least {} peers.\", syncConfig.getFastSyncMinimumPeerCount());\n return waitForPeers(syncConfig.getFastSyncMinimumPeerCount())\n .thenApply(successfulWaitResult -> fastSyncState);\n }\n\n private CompletableFuture waitForAnyPeer() {\n final CompletableFuture waitForPeerResult =\n ethContext.getScheduler().timeout(WaitForPeersTask.create(ethContext, 1, metricsSystem));\n return exceptionallyCompose(\n waitForPeerResult,\n throwable -> {\n if (ExceptionUtils.rootCause(throwable) instanceof TimeoutException) {\n return waitForAnyPeer();\n }\n return completedExceptionally(throwable);\n });\n }\n\n private CompletableFuture waitForPeers(final int count) {\n final WaitForPeersTask waitForPeersTask =\n WaitForPeersTask.create(ethContext, count, metricsSystem);\n return waitForPeersTask.run();\n }\n\n public CompletableFuture selectPivotBlock(final FastSyncState fastSyncState) {\n return fastSyncState.hasPivotBlockHeader()\n ? completedFuture(fastSyncState)\n : selectPivotBlockFromPeers();\n }\n\n private CompletableFuture selectPivotBlockFromPeers() {\n return ethContext\n .getEthPeers()\n .bestPeerWithHeightEstimate()\n \/\/ Only select a pivot block number when we have a minimum number of height estimates\n .filter(\n peer -> {\n final long peerCount = countPeersWithEstimatedHeight();\n final int minPeerCount = syncConfig.getFastSyncMinimumPeerCount();\n if (peerCount < minPeerCount) {\n LOG.info(\n \"Waiting for peers with chain height information. {} \/ {} required peers currently available.\",\n peerCount,\n minPeerCount);\n return false;\n }\n return true;\n })\n .map(\n peer -> {\n final long pivotBlockNumber =\n peer.chainState().getEstimatedHeight() - syncConfig.getFastSyncPivotDistance();\n if (pivotBlockNumber <= BlockHeader.GENESIS_BLOCK_NUMBER) {\n \/\/ Peer's chain isn't long enough, return an empty value so we can try again.\n LOG.info(\"Waiting for peer with sufficient chain height\");\n return null;\n }\n LOG.info(\"Selecting block number {} as fast sync pivot block.\", pivotBlockNumber);\n pivotBlockSelectionCounter.inc();\n pivotBlockGauge.set(pivotBlockNumber);\n return completedFuture(new FastSyncState(pivotBlockNumber));\n })\n .orElseGet(this::retrySelectPivotBlockAfterDelay);\n }\n\n private long countPeersWithEstimatedHeight() {\n return ethContext\n .getEthPeers()\n .streamAvailablePeers()\n .filter(peer -> peer.chainState().hasEstimatedHeight())\n .count();\n }\n\n private CompletableFuture retrySelectPivotBlockAfterDelay() {\n return ethContext\n .getScheduler()\n .scheduleFutureTask(\n () ->\n waitForPeers(syncConfig.getFastSyncMinimumPeerCount())\n .thenCompose(ignore -> selectPivotBlockFromPeers()),\n Duration.ofSeconds(1));\n }\n\n public CompletableFuture downloadPivotBlockHeader(\n final FastSyncState currentState) {\n if (currentState.getPivotBlockHeader().isPresent()) {\n return completedFuture(currentState);\n }\n return new PivotBlockRetriever<>(\n protocolSchedule,\n ethContext,\n metricsSystem,\n currentState.getPivotBlockNumber().getAsLong())\n .downloadPivotBlockHeader();\n }\n\n public ChainDownloader createChainDownloader(final FastSyncState currentState) {\n return FastSyncChainDownloader.create(\n syncConfig,\n protocolSchedule,\n protocolContext,\n ethContext,\n syncState,\n metricsSystem,\n currentState.getPivotBlockHeader().get());\n }\n}\n","lang_cluster":"Java","diff_tag":0,"review_comment":"","id":241} {"diff_hunk":"@@ -43,10 +43,14 @@ import org.apache.spark.sql.connector.write.WriteBuilder;\n import org.apache.spark.sql.sources.Filter;\n import org.apache.spark.sql.types.StructType;\n import org.apache.spark.sql.util.CaseInsensitiveStringMap;\n+import org.slf4j.Logger;\n+import org.slf4j.LoggerFactory;\n \n public class SparkTable implements org.apache.spark.sql.connector.catalog.Table,\n SupportsRead, SupportsWrite, SupportsDelete {\n \n+ private static final Logger LOG = LoggerFactory.getLogger(SparkTable.class);\n+\n private static final Set RESERVED_PROPERTIES = Sets.newHashSet(\"provider\", \"format\", \"current-snapshot-id\");\n private static final Set CAPABILITIES = ImmutableSet.of(\n TableCapability.BATCH_READ,","source_code":"\/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements. See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership. The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License. You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing,\n * software distributed under the License is distributed on an\n * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n * KIND, either express or implied. See the License for the\n * specific language governing permissions and limitations\n * under the License.\n *\/\n\npackage org.apache.iceberg.spark.source;\n\nimport java.util.Map;\nimport java.util.Set;\nimport org.apache.iceberg.Table;\nimport org.apache.iceberg.TableProperties;\nimport org.apache.iceberg.exceptions.ValidationException;\nimport org.apache.iceberg.expressions.Expression;\nimport org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap;\nimport org.apache.iceberg.relocated.com.google.common.collect.ImmutableSet;\nimport org.apache.iceberg.relocated.com.google.common.collect.Sets;\nimport org.apache.iceberg.spark.Spark3Util;\nimport org.apache.iceberg.spark.SparkFilters;\nimport org.apache.iceberg.spark.SparkSchemaUtil;\nimport org.apache.spark.sql.SparkSession;\nimport org.apache.spark.sql.connector.catalog.SupportsDelete;\nimport org.apache.spark.sql.connector.catalog.SupportsRead;\nimport org.apache.spark.sql.connector.catalog.SupportsWrite;\nimport org.apache.spark.sql.connector.catalog.TableCapability;\nimport org.apache.spark.sql.connector.expressions.Transform;\nimport org.apache.spark.sql.connector.read.ScanBuilder;\nimport org.apache.spark.sql.connector.write.LogicalWriteInfo;\nimport org.apache.spark.sql.connector.write.WriteBuilder;\nimport org.apache.spark.sql.sources.Filter;\nimport org.apache.spark.sql.types.StructType;\nimport org.apache.spark.sql.util.CaseInsensitiveStringMap;\n\npublic class SparkTable implements org.apache.spark.sql.connector.catalog.Table,\n SupportsRead, SupportsWrite, SupportsDelete {\n\n private static final Set RESERVED_PROPERTIES = Sets.newHashSet(\"provider\", \"format\", \"current-snapshot-id\");\n private static final Set CAPABILITIES = ImmutableSet.of(\n TableCapability.BATCH_READ,\n TableCapability.BATCH_WRITE,\n TableCapability.STREAMING_WRITE,\n TableCapability.OVERWRITE_BY_FILTER,\n TableCapability.OVERWRITE_DYNAMIC);\n\n private final Table icebergTable;\n private final StructType requestedSchema;\n private final boolean refreshEagerly;\n private StructType lazyTableSchema = null;\n private SparkSession lazySpark = null;\n\n public SparkTable(Table icebergTable, boolean refreshEagerly) {\n this(icebergTable, null, refreshEagerly);\n }\n\n public SparkTable(Table icebergTable, StructType requestedSchema, boolean refreshEagerly) {\n this.icebergTable = icebergTable;\n this.requestedSchema = requestedSchema;\n this.refreshEagerly = refreshEagerly;\n\n if (requestedSchema != null) {\n \/\/ convert the requested schema to throw an exception if any requested fields are unknown\n SparkSchemaUtil.convert(icebergTable.schema(), requestedSchema);\n }\n }\n\n private SparkSession sparkSession() {\n if (lazySpark == null) {\n this.lazySpark = SparkSession.active();\n }\n\n return lazySpark;\n }\n\n public Table table() {\n return icebergTable;\n }\n\n @Override\n public String name() {\n return icebergTable.toString();\n }\n\n @Override\n public StructType schema() {\n if (lazyTableSchema == null) {\n if (requestedSchema != null) {\n this.lazyTableSchema = SparkSchemaUtil.convert(SparkSchemaUtil.prune(icebergTable.schema(), requestedSchema));\n } else {\n this.lazyTableSchema = SparkSchemaUtil.convert(icebergTable.schema());\n }\n }\n\n return lazyTableSchema;\n }\n\n @Override\n public Transform[] partitioning() {\n return Spark3Util.toTransforms(icebergTable.spec());\n }\n\n @Override\n public Map properties() {\n ImmutableMap.Builder propsBuilder = ImmutableMap.builder();\n\n String fileFormat = icebergTable.properties()\n .getOrDefault(TableProperties.DEFAULT_FILE_FORMAT, TableProperties.DEFAULT_FILE_FORMAT_DEFAULT);\n propsBuilder.put(\"format\", \"iceberg\/\" + fileFormat);\n propsBuilder.put(\"provider\", \"iceberg\");\n String currentSnapshotId = icebergTable.currentSnapshot() != null ?\n String.valueOf(icebergTable.currentSnapshot().snapshotId()) : \"none\";\n propsBuilder.put(\"current-snapshot-id\", currentSnapshotId);\n\n icebergTable.properties().entrySet().stream()\n .filter(entry -> !RESERVED_PROPERTIES.contains(entry.getKey()))\n .forEach(propsBuilder::put);\n\n return propsBuilder.build();\n }\n\n @Override\n public Set capabilities() {\n return CAPABILITIES;\n }\n\n @Override\n public ScanBuilder newScanBuilder(CaseInsensitiveStringMap options) {\n if (refreshEagerly) {\n icebergTable.refresh();\n }\n\n SparkScanBuilder scanBuilder = new SparkScanBuilder(sparkSession(), icebergTable, options);\n\n if (requestedSchema != null) {\n scanBuilder.pruneColumns(requestedSchema);\n }\n\n return scanBuilder;\n }\n\n @Override\n public WriteBuilder newWriteBuilder(LogicalWriteInfo info) {\n return new SparkWriteBuilder(sparkSession(), icebergTable, info);\n }\n\n @Override\n public void deleteWhere(Filter[] filters) {\n Expression deleteExpr = SparkFilters.convert(filters);\n\n try {\n icebergTable.newDelete()\n .set(\"spark.app.id\", sparkSession().sparkContext().applicationId())\n .deleteFromRowFilter(deleteExpr)\n .commit();\n } catch (ValidationException e) {\n throw new IllegalArgumentException(\"Failed to cleanly delete data files matching: \" + deleteExpr, e);\n }\n }\n\n @Override\n public String toString() {\n return icebergTable.toString();\n }\n\n @Override\n public boolean equals(Object other) {\n if (this == other) {\n return true;\n } else if (other == null || getClass() != other.getClass()) {\n return false;\n }\n\n \/\/ use only name in order to correctly invalidate Spark cache\n SparkTable that = (SparkTable) other;\n return icebergTable.name().equals(that.icebergTable.name());\n }\n\n @Override\n public int hashCode() {\n \/\/ use only name in order to correctly invalidate Spark cache\n return icebergTable.name().hashCode();\n }\n}\n","lang_cluster":"Java","diff_tag":0,"review_comment":"","id":242} {"diff_hunk":"@@ -28,6 +28,7 @@ import org.springframework.security.saml2.Saml2Exception;\n *\n * @author Josh Cummings\n * @author Ryan Cassar\n+ * @author Marcus da Coregio\n * @since 5.4\n *\/\n public final class RelyingPartyRegistrations {","source_code":"\/*\n * Copyright 2002-2020 the original author or authors.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage org.springframework.security.saml2.provider.service.registration;\n\nimport java.io.IOException;\nimport java.io.InputStream;\n\nimport org.springframework.core.io.DefaultResourceLoader;\nimport org.springframework.core.io.ResourceLoader;\nimport org.springframework.security.saml2.Saml2Exception;\n\n\/**\n * A utility class for constructing instances of {@link RelyingPartyRegistration}\n *\n * @author Josh Cummings\n * @author Ryan Cassar\n * @since 5.4\n *\/\npublic final class RelyingPartyRegistrations {\n\n\tprivate static final OpenSamlAssertingPartyMetadataConverter assertingPartyMetadataConverter = new OpenSamlAssertingPartyMetadataConverter();\n\n\tprivate static final ResourceLoader resourceLoader = new DefaultResourceLoader();\n\n\tprivate RelyingPartyRegistrations() {\n\t}\n\n\t\/**\n\t * Return a {@link RelyingPartyRegistration.Builder} based off of the given SAML 2.0\n\t * Asserting Party (IDP) metadata location.\n\t *\n\t * Valid locations can be classpath- or file-based or they can be HTTP endpoints. Some\n\t * valid endpoints might include:\n\t *\n\t *
\n\t *   metadataLocation = \"classpath:asserting-party-metadata.xml\";\n\t *   metadataLocation = \"file:asserting-party-metadata.xml\";\n\t *   metadataLocation = \"https:\/\/ap.example.org\/metadata\";\n\t * <\/pre>\n\t *\n\t * Note that by default the registrationId is set to be the given metadata location,\n\t * but this will most often not be sufficient. To complete the configuration, most\n\t * applications will also need to provide a registrationId, like so:\n\t *\n\t * 
\n\t *\tRelyingPartyRegistration registration = RelyingPartyRegistrations\n\t * \t\t.fromMetadataLocation(metadataLocation)\n\t * \t\t.registrationId(\"registration-id\")\n\t * \t\t.build();\n\t * <\/pre>\n\t *\n\t * Also note that an {@code IDPSSODescriptor} typically only contains information\n\t * about the asserting party. Thus, you will need to remember to still populate\n\t * anything about the relying party, like any private keys the relying party will use\n\t * for signing AuthnRequests.\n\t * @param metadataLocation The classpath- or file-based locations or HTTP endpoints of\n\t * the asserting party metadata file\n\t * @return the {@link RelyingPartyRegistration.Builder} for further configuration\n\t *\/\n\tpublic static RelyingPartyRegistration.Builder fromMetadataLocation(String metadataLocation) {\n\t\ttry (InputStream source = resourceLoader.getResource(metadataLocation).getInputStream()) {\n\t\t\treturn assertingPartyMetadataConverter.convert(source);\n\t\t}\n\t\tcatch (IOException ex) {\n\t\t\tif (ex.getCause() instanceof Saml2Exception) {\n\t\t\t\tthrow (Saml2Exception) ex.getCause();\n\t\t\t}\n\t\t\tthrow new Saml2Exception(ex);\n\t\t}\n\t}\n\n}\n","lang_cluster":"Java","diff_tag":0,"review_comment":"","id":243}
{"diff_hunk":"@@ -40,6 +40,7 @@ import org.apache.iceberg.io.TaskWriter;\n import org.apache.iceberg.io.UnpartitionedWriter;\n import org.apache.iceberg.relocated.com.google.common.collect.Lists;\n import org.apache.iceberg.spark.SparkSchemaUtil;\n+import org.apache.iceberg.util.PropertyUtil;\n import org.apache.spark.TaskContext;\n import org.apache.spark.api.java.JavaRDD;\n import org.apache.spark.broadcast.Broadcast;","source_code":"\/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *   http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing,\n * software distributed under the License is distributed on an\n * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n * KIND, either express or implied.  See the License for the\n * specific language governing permissions and limitations\n * under the License.\n *\/\n\npackage org.apache.iceberg.spark.source;\n\nimport java.io.Serializable;\nimport java.util.Collection;\nimport java.util.List;\nimport java.util.Locale;\nimport java.util.Map;\nimport java.util.stream.Collectors;\nimport org.apache.iceberg.CombinedScanTask;\nimport org.apache.iceberg.DataFile;\nimport org.apache.iceberg.FileFormat;\nimport org.apache.iceberg.PartitionSpec;\nimport org.apache.iceberg.Schema;\nimport org.apache.iceberg.Table;\nimport org.apache.iceberg.TableProperties;\nimport org.apache.iceberg.encryption.EncryptionManager;\nimport org.apache.iceberg.io.FileIO;\nimport org.apache.iceberg.io.LocationProvider;\nimport org.apache.iceberg.io.OutputFileFactory;\nimport org.apache.iceberg.io.TaskWriter;\nimport org.apache.iceberg.io.UnpartitionedWriter;\nimport org.apache.iceberg.relocated.com.google.common.collect.Lists;\nimport org.apache.iceberg.spark.SparkSchemaUtil;\nimport org.apache.spark.TaskContext;\nimport org.apache.spark.api.java.JavaRDD;\nimport org.apache.spark.broadcast.Broadcast;\nimport org.apache.spark.sql.catalyst.InternalRow;\nimport org.apache.spark.sql.types.StructType;\nimport org.slf4j.Logger;\nimport org.slf4j.LoggerFactory;\n\nimport static org.apache.iceberg.TableProperties.DEFAULT_NAME_MAPPING;\n\npublic class RowDataRewriter implements Serializable {\n\n  private static final Logger LOG = LoggerFactory.getLogger(RowDataRewriter.class);\n\n  private final Schema schema;\n  private final PartitionSpec spec;\n  private final Map properties;\n  private final FileFormat format;\n  private final Broadcast io;\n  private final Broadcast encryptionManager;\n  private final LocationProvider locations;\n  private final String nameMapping;\n  private final boolean caseSensitive;\n\n  public RowDataRewriter(Table table, PartitionSpec spec, boolean caseSensitive,\n                         Broadcast io, Broadcast encryptionManager) {\n    this.schema = table.schema();\n    this.spec = spec;\n    this.locations = table.locationProvider();\n    this.properties = table.properties();\n    this.io = io;\n    this.encryptionManager = encryptionManager;\n\n    this.caseSensitive = caseSensitive;\n    this.nameMapping = table.properties().get(DEFAULT_NAME_MAPPING);\n\n    String formatString = table.properties().getOrDefault(\n        TableProperties.DEFAULT_FILE_FORMAT, TableProperties.DEFAULT_FILE_FORMAT_DEFAULT);\n    this.format = FileFormat.valueOf(formatString.toUpperCase(Locale.ENGLISH));\n  }\n\n  public List rewriteDataForTasks(JavaRDD taskRDD) {\n    JavaRDD> dataFilesRDD = taskRDD.map(this::rewriteDataForTask);\n\n    return dataFilesRDD.collect().stream()\n        .flatMap(Collection::stream)\n        .collect(Collectors.toList());\n  }\n\n  private List rewriteDataForTask(CombinedScanTask task) throws Exception {\n    TaskContext context = TaskContext.get();\n    int partitionId = context.partitionId();\n    long taskId = context.taskAttemptId();\n\n    RowDataReader dataReader = new RowDataReader(\n        task, schema, schema, nameMapping, io.value(), encryptionManager.value(), caseSensitive);\n\n    StructType structType = SparkSchemaUtil.convert(schema);\n    SparkAppenderFactory appenderFactory = new SparkAppenderFactory(properties, schema, structType);\n    OutputFileFactory fileFactory = new OutputFileFactory(\n        spec, format, locations, io.value(), encryptionManager.value(), partitionId, taskId);\n\n    TaskWriter writer;\n    if (spec.fields().isEmpty()) {\n      writer = new UnpartitionedWriter<>(spec, format, appenderFactory, fileFactory, io.value(), Long.MAX_VALUE);\n    } else {\n      writer = new SparkPartitionedWriter(spec, format, appenderFactory, fileFactory, io.value(), Long.MAX_VALUE,\n          schema, structType);\n    }\n\n    try {\n      while (dataReader.next()) {\n        InternalRow row = dataReader.get();\n        writer.write(row);\n      }\n\n      dataReader.close();\n      dataReader = null;\n\n      writer.close();\n      return Lists.newArrayList(writer.complete());\n\n    } catch (Throwable originalThrowable) {\n      try {\n        LOG.error(\"Aborting task\", originalThrowable);\n        context.markTaskFailed(originalThrowable);\n\n        LOG.error(\"Aborting commit for partition {} (task {}, attempt {}, stage {}.{})\",\n            partitionId, taskId, context.attemptNumber(), context.stageId(), context.stageAttemptNumber());\n        if (dataReader != null) {\n          dataReader.close();\n        }\n        writer.abort();\n        LOG.error(\"Aborted commit for partition {} (task {}, attempt {}, stage {}.{})\",\n            partitionId, taskId, context.taskAttemptId(), context.stageId(), context.stageAttemptNumber());\n\n      } catch (Throwable inner) {\n        if (originalThrowable != inner) {\n          originalThrowable.addSuppressed(inner);\n          LOG.warn(\"Suppressing exception in catch: {}\", inner.getMessage(), inner);\n        }\n      }\n\n      if (originalThrowable instanceof Exception) {\n        throw originalThrowable;\n      } else {\n        throw new RuntimeException(originalThrowable);\n      }\n    }\n  }\n}\n","lang_cluster":"Java","diff_tag":0,"review_comment":"","id":244}
{"diff_hunk":"@@ -30,6 +30,7 @@ import org.json.JSONObject;\n \n import android.text.TextUtils;\n \n+import com.salesforce.androidsdk.smartsync.manager.SyncManager;\n import com.salesforce.androidsdk.smartsync.model.SalesforceObject;\n import com.salesforce.androidsdk.smartsync.util.Constants;\n ","source_code":"\/*\n * Copyright (c) 2014, salesforce.com, inc.\n * All rights reserved.\n * Redistribution and use of this software in source and binary forms, with or\n * without modification, are permitted provided that the following conditions\n * are met:\n * - Redistributions of source code must retain the above copyright notice, this\n * list of conditions and the following disclaimer.\n * - Redistributions in binary form must reproduce the above copyright notice,\n * this list of conditions and the following disclaimer in the documentation\n * and\/or other materials provided with the distribution.\n * - Neither the name of salesforce.com, inc. nor the names of its contributors\n * may be used to endorse or promote products derived from this software without\n * specific prior written permission of salesforce.com, inc.\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n * POSSIBILITY OF SUCH DAMAGE.\n *\/\npackage com.salesforce.samples.smartsyncexplorer.objects;\n\nimport org.json.JSONObject;\n\nimport android.text.TextUtils;\n\nimport com.salesforce.androidsdk.smartsync.model.SalesforceObject;\nimport com.salesforce.androidsdk.smartsync.util.Constants;\n\n\/**\n * A simple representation of a Contact object.\n *\n * @author bhariharan\n *\/\npublic class ContactObject extends SalesforceObject {\n\n\tpublic static final String LAST_NAME = \"LastName\";\n\tpublic static final String[] CONTACT_FIELDS = {\n\t\t\"Id\",\n\t\t\"Name\",\n\t\t\"FirstName\",\n\t\tLAST_NAME,\n\t\t\"Title\",\n\t\t\"Phone\",\n\t\t\"Email\",\n\t\t\"Department\",\n\t\t\"HomePhone\"\n\t};\n\n\t\n\t\/**\n\t * Parameterized constructor.\n\t *\n\t * @param data Raw data.\n\t *\/\n\tpublic ContactObject(JSONObject data) {\n\t\tsuper(data);\n\t\tobjectType = Constants.CONTACT;\n\t\tobjectId = data.optString(CONTACT_FIELDS[0]);\n\t\tname = data.optString(CONTACT_FIELDS[1]);\n\t}\n\n\t\/**\n\t * Returns first name of the contact.\n\t *\n\t * @return First name of the contact.\n\t *\/\n\tpublic String getFirstName() {\n\t\treturn sanitizeText(rawData.optString(CONTACT_FIELDS[2]));\n\t}\n\n\t\/**\n\t * Returns last name of the contact.\n\t *\n\t * @return Last name of the contact.\n\t *\/\n\tpublic String getLastName() {\n\t\treturn sanitizeText(rawData.optString(CONTACT_FIELDS[3]));\n\t}\n\n\t\/**\n\t * Returns title of the contact.\n\t *\n\t * @return Title of the contact.\n\t *\/\n\tpublic String getTitle() {\n\t\treturn sanitizeText(rawData.optString(CONTACT_FIELDS[4]));\n\t}\n\n\t\/**\n\t * Returns phone number of the contact.\n\t *\n\t * @return Phone number of the contact.\n\t *\/\n\tpublic String getPhone() {\n\t\treturn sanitizeText(rawData.optString(CONTACT_FIELDS[5]));\n\t}\n\n\t\/**\n\t * Returns e-mail address of the contact.\n\t *\n\t * @return E-mail address of the contact.\n\t *\/\n\tpublic String getEmail() {\n\t\treturn sanitizeText(rawData.optString(CONTACT_FIELDS[6]));\n\t}\n\n\t\/**\n\t * Returns department of the contact.\n\t *\n\t * @return Department of the contact.\n\t *\/\n\tpublic String getDepartment() {\n\t\treturn sanitizeText(rawData.optString(CONTACT_FIELDS[7]));\n\t}\n\n\t\/**\n\t * Returns home phone number of the contact.\n\t *\n\t * @return Home phone number of the contact.\n\t *\/\n\tpublic String getHomePhone() {\n\t\treturn sanitizeText(rawData.optString(CONTACT_FIELDS[8]));\n\t}\n\n\tprivate String sanitizeText(String text) {\n\t\tif (TextUtils.isEmpty(text) || text.equals(Constants.NULL_STRING)) {\n\t\t\treturn Constants.EMPTY_STRING;\n\t\t}\n\t\treturn text;\n\t}\n}\n","lang_cluster":"Java","diff_tag":0,"review_comment":"","id":245}
{"diff_hunk":"@@ -102,24 +102,13 @@ public final class OperationSignature extends Signature {\n             }\n         }\n \n+\n         private static boolean isGetterOrSetter(ASTMethodDeclaration node) {\n             String name = node.getName();\n-            if (NAME_PATTERN.matcher(name).matches()) {\n+            if (GETTER_OR_SETTER_NAME_PATTERN.matcher(name).matches()) {\n                 return true;\n             }\n \n-            if (node.isAbstract()) {\n-                return false;\n-            }\n-\n-            int length = node.getEndLine() - node.getBeginLine();\n-\n-            if (length > 6) {\n-                return false;\n-            } else if (length > 4 && node.getFirstDescendantOfType(ASTIfStatement.class) == null) {\n-                return false;\n-            }\n-\n             ClassScope scope = node.getScope().getEnclosingScope(ClassScope.class);\n \n             \/\/ fields names mapped to their types","source_code":"\/**\n * BSD-style license; for more info see http:\/\/pmd.sourceforge.net\/license.html\n *\/\n\npackage net.sourceforge.pmd.lang.java.oom.signature;\n\nimport java.util.HashMap;\nimport java.util.HashSet;\nimport java.util.List;\nimport java.util.Map;\nimport java.util.Set;\nimport java.util.regex.Pattern;\n\nimport net.sourceforge.pmd.lang.java.ast.ASTConstructorDeclaration;\nimport net.sourceforge.pmd.lang.java.ast.ASTFieldDeclaration;\nimport net.sourceforge.pmd.lang.java.ast.ASTFormalParameters;\nimport net.sourceforge.pmd.lang.java.ast.ASTIfStatement;\nimport net.sourceforge.pmd.lang.java.ast.ASTMethodDeclaration;\nimport net.sourceforge.pmd.lang.java.ast.ASTMethodOrConstructorDeclaration;\nimport net.sourceforge.pmd.lang.java.ast.ASTName;\nimport net.sourceforge.pmd.lang.java.ast.ASTPrimaryExpression;\nimport net.sourceforge.pmd.lang.java.ast.ASTPrimaryPrefix;\nimport net.sourceforge.pmd.lang.java.ast.ASTPrimarySuffix;\nimport net.sourceforge.pmd.lang.java.ast.ASTReturnStatement;\nimport net.sourceforge.pmd.lang.java.ast.ASTStatementExpression;\nimport net.sourceforge.pmd.lang.java.ast.ASTType;\nimport net.sourceforge.pmd.lang.java.symboltable.ClassScope;\nimport net.sourceforge.pmd.lang.java.symboltable.VariableNameDeclaration;\nimport net.sourceforge.pmd.lang.symboltable.NameOccurrence;\n\n\/**\n * Signature for an operation.\n *\n * @author Cl\u00e9ment Fournier\n *\/\npublic final class OperationSignature extends Signature {\n\n    private static final Map POOL = new HashMap<>();\n\n    public final Role role;\n    public final boolean isAbstract;\n\n\n    private OperationSignature(Visibility visibility, Role role, boolean isAbstract) {\n        super(visibility);\n        this.role = role;\n        this.isAbstract = isAbstract;\n    }\n\n    \/**\n     * Builds an operation signature from a method or constructor declaration.\n     *\n     * @param node The node\n     *\n     * @return The signature of the parameter\n     *\/\n    public static OperationSignature buildFor(ASTMethodOrConstructorDeclaration node) {\n        int code = code(Visibility.get(node), Role.get(node), node.isAbstract());\n        if (!POOL.containsKey(code)) {\n            POOL.put(code, new OperationSignature(Visibility.get(node), Role.get(node), node.isAbstract()));\n        }\n        return POOL.get(code);\n    }\n\n    \/** Used internally by the pooler. *\/\n    private static int code(Visibility visibility, Role role, boolean isAbstract) {\n        return visibility.hashCode() * 31 + role.hashCode() * 2 + (isAbstract ? 1 : 0);\n    }\n\n    @Override\n    public boolean equals(Object o) {\n        return o instanceof OperationSignature && super.equals(o) && role == ((OperationSignature) o).role\n            && isAbstract == ((OperationSignature) o).isAbstract;\n    }\n\n    @Override\n    public int hashCode() {\n        return super.hashCode() * 2 + role.hashCode() * 4 + (isAbstract ? 1 : 0);\n    }\n\n    \/**\n     * Role of an operation.\n     *\/\n    public enum Role {\n        GETTER_OR_SETTER, CONSTRUCTOR, METHOD, STATIC;\n\n        private static final Pattern NAME_PATTERN = Pattern.compile(\"(?:get|set|is|increment|decrement)\\\\w*\");\n\n\n        public static Role get(ASTMethodOrConstructorDeclaration node) {\n            return node instanceof ASTConstructorDeclaration ? CONSTRUCTOR : get((ASTMethodDeclaration) node);\n        }\n\n\n        private static Role get(ASTMethodDeclaration node) {\n            if (node.isStatic()) {\n                return STATIC;\n            } else if (isGetterOrSetter(node)) {\n                return GETTER_OR_SETTER;\n            } else {\n                return METHOD;\n            }\n        }\n\n        private static boolean isGetterOrSetter(ASTMethodDeclaration node) {\n            String name = node.getName();\n            if (NAME_PATTERN.matcher(name).matches()) {\n                return true;\n            }\n\n            if (node.isAbstract()) {\n                return false;\n            }\n\n            int length = node.getEndLine() - node.getBeginLine();\n\n            if (length > 6) {\n                return false;\n            } else if (length > 4 && node.getFirstDescendantOfType(ASTIfStatement.class) == null) {\n                return false;\n            }\n\n            ClassScope scope = node.getScope().getEnclosingScope(ClassScope.class);\n\n            \/\/ fields names mapped to their types\n            Map fieldNames = new HashMap<>();\n\n            for (Map.Entry> decl\n                : scope.getVariableDeclarations().entrySet()) {\n\n                ASTFieldDeclaration field = decl.getKey().getNode()\n                                                .getFirstParentOfType(ASTFieldDeclaration.class);\n\n                fieldNames.put(field.getVariableName(), field.getFirstChildOfType(ASTType.class).getTypeImage());\n            }\n\n            return isGetter(node, fieldNames) || isSetter(node, fieldNames);\n        }\n\n        \/** Attempts to determine if the method is a getter. *\/\n        private static boolean isGetter(ASTMethodDeclaration node, Map fieldNames) {\n\n\n            List returnStatements\n                = node.getBlock().findDescendantsOfType(ASTReturnStatement.class);\n\n            for (ASTReturnStatement st : returnStatements) {\n                ASTName name = st.getFirstDescendantOfType(ASTName.class);\n                if (name == null) {\n                    continue;\n                }\n\n                if (fieldNames.containsKey(name.getImage().split(\"\\\\.\")[0])) {\n                    return true;\n                }\n            }\n\n            return false;\n        }\n\n        \/** Attempts to determine if the method is a setter. *\/\n        private static boolean isSetter(ASTMethodDeclaration node, Map fieldNames) {\n\n            if (node.getFirstDescendantOfType(ASTFormalParameters.class).jjtGetNumChildren() != 1) {\n                return false;\n            }\n\n            List statementExpressions\n                = node.getBlock().findDescendantsOfType(ASTStatementExpression.class);\n            Set namesToCheck = new HashSet<>();\n\n            for (ASTStatementExpression st : statementExpressions) {\n                ASTName name = st.getFirstDescendantOfType(ASTName.class);\n                if (name == null) {\n                    \/\/ not an assignment, check for method\n                    ASTPrimaryExpression prim = st.getFirstChildOfType(ASTPrimaryExpression.class);\n                    ASTPrimaryPrefix prefix = prim.getFirstChildOfType(ASTPrimaryPrefix.class);\n\n                    if (prefix.usesThisModifier() || prefix.usesSuperModifier()) {\n                        namesToCheck.add(prim.getFirstChildOfType(ASTPrimarySuffix.class).getImage());\n                    } else {\n                        namesToCheck.add(prefix.getImage().split(\"\\\\.\")[0]);\n                    }\n                } else {\n                    \/\/ this is a direct assignment\n                    namesToCheck.add(name.getImage().split(\"\\\\.\")[0]);\n                }\n            }\n\n            for (String name : namesToCheck) {\n                if (fieldNames.containsKey(name)) {\n                    return true;\n                }\n            }\n            return false;\n        }\n    }\n}\n","lang_cluster":"Java","diff_tag":0,"review_comment":"","id":246}
{"diff_hunk":"@@ -125,13 +125,13 @@ public class TransactionValidatorProviderTest {\n   public void validatorsAtHeadContractCallIsCached() {\n     final List
validators =\n Lists.newArrayList(Address.fromHexString(\"5\"), Address.fromHexString(\"6\"));\n- when(validatorContractController.getValidators(3)).thenReturn(validators);\n+ when(validatorContractController.getValidators(3, CONTRACT_ADDRESS)).thenReturn(validators);\n \n final TransactionValidatorProvider validatorProvider =\n- new TransactionValidatorProvider(blockChain, validatorContractController);\n+ new TransactionValidatorProvider(blockChain, validatorContractController, forksSchedule);\n \n assertThat(validatorProvider.getValidatorsAtHead()).containsExactlyElementsOf(validators);\n- verify(validatorContractController).getValidators(3);\n+ verify(validatorContractController).getValidators(3, CONTRACT_ADDRESS);\n \n assertThat(validatorProvider.getValidatorsAtHead()).containsExactlyElementsOf(validators);\n verifyNoMoreInteractions(validatorContractController);","source_code":"\/*\n * Copyright ConsenSys AG.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n * the License. You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n * an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n * specific language governing permissions and limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n *\/\npackage org.hyperledger.besu.consensus.qbft.validator;\n\nimport static java.util.Collections.emptyList;\nimport static org.assertj.core.api.Assertions.assertThat;\nimport static org.hyperledger.besu.ethereum.core.InMemoryKeyValueStorageProvider.createInMemoryBlockchain;\nimport static org.mockito.Mockito.mock;\nimport static org.mockito.Mockito.verify;\nimport static org.mockito.Mockito.verifyNoMoreInteractions;\nimport static org.mockito.Mockito.when;\n\nimport org.hyperledger.besu.datatypes.Address;\nimport org.hyperledger.besu.datatypes.Hash;\nimport org.hyperledger.besu.ethereum.chain.MutableBlockchain;\nimport org.hyperledger.besu.ethereum.core.AddressHelpers;\nimport org.hyperledger.besu.ethereum.core.Block;\nimport org.hyperledger.besu.ethereum.core.BlockBody;\nimport org.hyperledger.besu.ethereum.core.BlockHeaderTestFixture;\n\nimport java.util.Collection;\nimport java.util.List;\nimport java.util.stream.Collectors;\n\nimport com.google.common.collect.Lists;\nimport org.apache.tuweni.bytes.Bytes;\nimport org.junit.Before;\nimport org.junit.Test;\n\npublic class TransactionValidatorProviderTest {\n private final ValidatorContractController validatorContractController =\n mock(ValidatorContractController.class);\n\n protected MutableBlockchain blockChain;\n protected Block genesisBlock;\n protected Block block_1;\n protected Block block_2;\n private Block block_3;\n\n private final BlockHeaderTestFixture headerBuilder = new BlockHeaderTestFixture();\n\n @Before\n public void setup() {\n genesisBlock = createEmptyBlock(0, Hash.ZERO);\n blockChain = createInMemoryBlockchain(genesisBlock);\n headerBuilder.extraData(Bytes.wrap(new byte[32]));\n\n block_1 = createEmptyBlock(1, genesisBlock.getHeader().getHash());\n block_2 = createEmptyBlock(2, block_1.getHeader().getHash());\n block_3 = createEmptyBlock(3, block_2.getHeader().getHash());\n\n blockChain.appendBlock(block_1, emptyList());\n blockChain.appendBlock(block_2, emptyList());\n blockChain.appendBlock(block_3, emptyList());\n }\n\n private Block createEmptyBlock(final long blockNumber, final Hash parentHash) {\n headerBuilder.number(blockNumber).parentHash(parentHash).coinbase(AddressHelpers.ofValue(0));\n return new Block(headerBuilder.buildHeader(), new BlockBody(emptyList(), emptyList()));\n }\n\n @Test\n public void validatorsAfterBlockAreRetrievedUsingContractController() {\n final List
validatorsAt2 =\n Lists.newArrayList(Address.fromHexString(\"5\"), Address.fromHexString(\"6\"));\n final List
validatorsAt3 =\n Lists.newArrayList(\n Address.fromHexString(\"5\"), Address.fromHexString(\"6\"), Address.fromHexString(\"7\"));\n when(validatorContractController.getValidators(2)).thenReturn(validatorsAt2);\n when(validatorContractController.getValidators(3)).thenReturn(validatorsAt3);\n\n final TransactionValidatorProvider validatorProvider =\n new TransactionValidatorProvider(blockChain, validatorContractController);\n\n assertThat(validatorProvider.getValidatorsAfterBlock(block_2.getHeader()))\n .containsExactlyElementsOf(validatorsAt2);\n assertThat(validatorProvider.getValidatorsAfterBlock(block_3.getHeader()))\n .containsExactlyElementsOf(validatorProvider.getValidatorsAfterBlock(block_3.getHeader()));\n }\n\n @Test\n public void validatorsForBlockAreRetrievedUsingContractController() {\n final List
validatorsAt2 =\n Lists.newArrayList(Address.fromHexString(\"5\"), Address.fromHexString(\"6\"));\n final List
validatorsAt3 =\n Lists.newArrayList(\n Address.fromHexString(\"5\"), Address.fromHexString(\"6\"), Address.fromHexString(\"7\"));\n when(validatorContractController.getValidators(2)).thenReturn(validatorsAt2);\n when(validatorContractController.getValidators(3)).thenReturn(validatorsAt3);\n\n final TransactionValidatorProvider validatorProvider =\n new TransactionValidatorProvider(blockChain, validatorContractController);\n\n assertThat(validatorProvider.getValidatorsForBlock(block_2.getHeader()))\n .containsExactlyElementsOf(validatorsAt2);\n assertThat(validatorProvider.getValidatorsForBlock(block_3.getHeader()))\n .containsExactlyElementsOf(validatorProvider.getValidatorsForBlock(block_3.getHeader()));\n }\n\n @Test\n public void validatorsAtHeadAreRetrievedUsingContractController() {\n final List
validators =\n Lists.newArrayList(Address.fromHexString(\"5\"), Address.fromHexString(\"6\"));\n when(validatorContractController.getValidators(3)).thenReturn(validators);\n\n final TransactionValidatorProvider validatorProvider =\n new TransactionValidatorProvider(blockChain, validatorContractController);\n\n assertThat(validatorProvider.getValidatorsAtHead()).containsExactlyElementsOf(validators);\n }\n\n @Test\n public void validatorsAtHeadContractCallIsCached() {\n final List
validators =\n Lists.newArrayList(Address.fromHexString(\"5\"), Address.fromHexString(\"6\"));\n when(validatorContractController.getValidators(3)).thenReturn(validators);\n\n final TransactionValidatorProvider validatorProvider =\n new TransactionValidatorProvider(blockChain, validatorContractController);\n\n assertThat(validatorProvider.getValidatorsAtHead()).containsExactlyElementsOf(validators);\n verify(validatorContractController).getValidators(3);\n\n assertThat(validatorProvider.getValidatorsAtHead()).containsExactlyElementsOf(validators);\n verifyNoMoreInteractions(validatorContractController);\n }\n\n @Test\n public void validatorsAfterBlockContractCallIsCached() {\n final List
validators =\n Lists.newArrayList(Address.fromHexString(\"5\"), Address.fromHexString(\"6\"));\n when(validatorContractController.getValidators(2)).thenReturn(validators);\n\n final TransactionValidatorProvider validatorProvider =\n new TransactionValidatorProvider(blockChain, validatorContractController);\n\n final Collection
result =\n validatorProvider.getValidatorsAfterBlock(block_2.getHeader());\n assertThat(result).containsExactlyElementsOf(validators);\n verify(validatorContractController).getValidators(2);\n\n final Collection
resultCached =\n validatorProvider.getValidatorsAfterBlock(block_2.getHeader());\n assertThat(resultCached).containsExactlyElementsOf(validators);\n verifyNoMoreInteractions(validatorContractController);\n }\n\n @Test\n public void validatorsMustBeSorted() {\n final List
validators =\n Lists.newArrayList(\n Address.fromHexString(\"9\"), Address.fromHexString(\"8\"), Address.fromHexString(\"7\"));\n when(validatorContractController.getValidators(3)).thenReturn(validators);\n\n final TransactionValidatorProvider validatorProvider =\n new TransactionValidatorProvider(blockChain, validatorContractController);\n\n final Collection
result = validatorProvider.getValidatorsAtHead();\n final List
expectedValidators =\n validators.stream().sorted().collect(Collectors.toList());\n assertThat(result).containsExactlyElementsOf(expectedValidators);\n }\n\n @Test\n public void voteProviderIsEmpty() {\n TransactionValidatorProvider transactionValidatorProvider =\n new TransactionValidatorProvider(blockChain, validatorContractController);\n\n assertThat(transactionValidatorProvider.getVoteProviderAtHead()).isEmpty();\n }\n}\n","lang_cluster":"Java","diff_tag":0,"review_comment":"","id":247} {"diff_hunk":"@@ -14,13 +14,17 @@\n *\/\n package org.hyperledger.besu.plugin.services.storage.rocksdb.unsegmented;\n \n+import static java.util.stream.Collectors.toUnmodifiableSet;\n+\n import org.hyperledger.besu.plugin.services.MetricsSystem;\n import org.hyperledger.besu.plugin.services.exception.StorageException;\n import org.hyperledger.besu.plugin.services.metrics.OperationTimer;\n import org.hyperledger.besu.plugin.services.storage.KeyValueStorage;\n import org.hyperledger.besu.plugin.services.storage.KeyValueStorageTransaction;\n+import org.hyperledger.besu.plugin.services.storage.rocksdb.RocksDBExceptionAdapter;\n import org.hyperledger.besu.plugin.services.storage.rocksdb.RocksDBMetrics;\n import org.hyperledger.besu.plugin.services.storage.rocksdb.RocksDBMetricsFactory;\n+import org.hyperledger.besu.plugin.services.storage.rocksdb.RocksDbKeyIterator;\n import org.hyperledger.besu.plugin.services.storage.rocksdb.RocksDbUtil;\n import org.hyperledger.besu.plugin.services.storage.rocksdb.configuration.RocksDBConfiguration;\n import org.hyperledger.besu.services.kvstore.KeyValueStorageTransactionTransitionValidatorDecorator;","source_code":"\/*\n * Copyright ConsenSys AG.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n * the License. You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n * an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n * specific language governing permissions and limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n *\/\npackage org.hyperledger.besu.plugin.services.storage.rocksdb.unsegmented;\n\nimport org.hyperledger.besu.plugin.services.MetricsSystem;\nimport org.hyperledger.besu.plugin.services.exception.StorageException;\nimport org.hyperledger.besu.plugin.services.metrics.OperationTimer;\nimport org.hyperledger.besu.plugin.services.storage.KeyValueStorage;\nimport org.hyperledger.besu.plugin.services.storage.KeyValueStorageTransaction;\nimport org.hyperledger.besu.plugin.services.storage.rocksdb.RocksDBMetrics;\nimport org.hyperledger.besu.plugin.services.storage.rocksdb.RocksDBMetricsFactory;\nimport org.hyperledger.besu.plugin.services.storage.rocksdb.RocksDbUtil;\nimport org.hyperledger.besu.plugin.services.storage.rocksdb.configuration.RocksDBConfiguration;\nimport org.hyperledger.besu.services.kvstore.KeyValueStorageTransactionTransitionValidatorDecorator;\n\nimport java.util.Optional;\nimport java.util.Set;\nimport java.util.concurrent.atomic.AtomicBoolean;\nimport java.util.function.Predicate;\n\nimport com.google.common.collect.Sets;\nimport org.apache.logging.log4j.LogManager;\nimport org.apache.logging.log4j.Logger;\nimport org.rocksdb.BlockBasedTableConfig;\nimport org.rocksdb.LRUCache;\nimport org.rocksdb.Options;\nimport org.rocksdb.RocksDBException;\nimport org.rocksdb.RocksIterator;\nimport org.rocksdb.Statistics;\nimport org.rocksdb.TransactionDB;\nimport org.rocksdb.TransactionDBOptions;\nimport org.rocksdb.WriteOptions;\n\npublic class RocksDBKeyValueStorage implements KeyValueStorage {\n\n static {\n RocksDbUtil.loadNativeLibrary();\n }\n\n private static final Logger LOG = LogManager.getLogger();\n\n private final Options options;\n private final TransactionDBOptions txOptions;\n private final TransactionDB db;\n private final AtomicBoolean closed = new AtomicBoolean(false);\n private final RocksDBMetrics rocksDBMetrics;\n\n public RocksDBKeyValueStorage(\n final RocksDBConfiguration configuration,\n final MetricsSystem metricsSystem,\n final RocksDBMetricsFactory rocksDBMetricsFactory) {\n\n try {\n final Statistics stats = new Statistics();\n options =\n new Options()\n .setCreateIfMissing(true)\n .setMaxOpenFiles(configuration.getMaxOpenFiles())\n .setTableFormatConfig(createBlockBasedTableConfig(configuration))\n .setMaxBackgroundCompactions(configuration.getMaxBackgroundCompactions())\n .setStatistics(stats);\n options.getEnv().setBackgroundThreads(configuration.getBackgroundThreadCount());\n\n txOptions = new TransactionDBOptions();\n db = TransactionDB.open(options, txOptions, configuration.getDatabaseDir().toString());\n rocksDBMetrics = rocksDBMetricsFactory.create(metricsSystem, configuration, db, stats);\n } catch (final RocksDBException e) {\n throw new StorageException(e);\n }\n }\n\n @Override\n public void clear() throws StorageException {\n try (final RocksIterator rocksIterator = db.newIterator()) {\n rocksIterator.seekToFirst();\n if (rocksIterator.isValid()) {\n final byte[] firstKey = rocksIterator.key();\n rocksIterator.seekToLast();\n if (rocksIterator.isValid()) {\n final byte[] lastKey = rocksIterator.key();\n db.deleteRange(firstKey, lastKey);\n db.delete(lastKey);\n }\n }\n } catch (final RocksDBException e) {\n throw new StorageException(e);\n }\n }\n\n @Override\n public boolean containsKey(final byte[] key) throws StorageException {\n return get(key).isPresent();\n }\n\n @Override\n public Optional get(final byte[] key) throws StorageException {\n throwIfClosed();\n\n try (final OperationTimer.TimingContext ignored =\n rocksDBMetrics.getReadLatency().startTimer()) {\n return Optional.ofNullable(db.get(key));\n } catch (final RocksDBException e) {\n throw new StorageException(e);\n }\n }\n\n @Override\n public long removeAllKeysUnless(final Predicate retainCondition) throws StorageException {\n long removedNodeCounter = 0;\n try (final RocksIterator rocksIterator = db.newIterator()) {\n for (rocksIterator.seekToFirst(); rocksIterator.isValid(); rocksIterator.next()) {\n final byte[] key = rocksIterator.key();\n if (!retainCondition.test(key)) {\n removedNodeCounter++;\n db.delete(key);\n }\n }\n } catch (final RocksDBException e) {\n throw new StorageException(e);\n }\n return removedNodeCounter;\n }\n\n @Override\n public Set getAllKeysThat(final Predicate returnCondition) {\n final Set returnedKeys = Sets.newIdentityHashSet();\n try (final RocksIterator rocksIterator = db.newIterator()) {\n for (rocksIterator.seekToFirst(); rocksIterator.isValid(); rocksIterator.next()) {\n final byte[] key = rocksIterator.key();\n if (returnCondition.test(key)) {\n returnedKeys.add(key);\n }\n }\n }\n return returnedKeys;\n }\n\n @Override\n public KeyValueStorageTransaction startTransaction() throws StorageException {\n throwIfClosed();\n final WriteOptions options = new WriteOptions();\n return new KeyValueStorageTransactionTransitionValidatorDecorator(\n new RocksDBTransaction(db.beginTransaction(options), options, rocksDBMetrics));\n }\n\n @Override\n public void close() {\n if (closed.compareAndSet(false, true)) {\n txOptions.close();\n options.close();\n db.close();\n }\n }\n\n private BlockBasedTableConfig createBlockBasedTableConfig(final RocksDBConfiguration config) {\n final LRUCache cache = new LRUCache(config.getCacheCapacity());\n return new BlockBasedTableConfig().setBlockCache(cache);\n }\n\n private void throwIfClosed() {\n if (closed.get()) {\n LOG.error(\"Attempting to use a closed RocksDBKeyValueStorage\");\n throw new IllegalStateException(\"Storage has been closed\");\n }\n }\n}\n","lang_cluster":"Java","diff_tag":0,"review_comment":"","id":248} {"diff_hunk":"@@ -134,15 +134,15 @@ public class BigIntegerModularExponentiationPrecompiledContract\n }\n }\n \n- private static final BigInteger baseLength(final Bytes input) {\n+ public static final BigInteger baseLength(final Bytes input) {\n return extractParameter(input, BASE_LENGTH_OFFSET, PARAMETER_LENGTH);\n }\n \n- private static final BigInteger exponentLength(final Bytes input) {\n+ public static final BigInteger exponentLength(final Bytes input) {\n return extractParameter(input, EXPONENT_LENGTH_OFFSET, PARAMETER_LENGTH);\n }\n \n- private static final BigInteger modulusLength(final Bytes input) {\n+ public static final BigInteger modulusLength(final Bytes input) {\n return extractParameter(input, MODULUS_LENGTH_OFFSET, PARAMETER_LENGTH);\n }\n ","source_code":"\/*\n * Copyright ConsenSys AG.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n * the License. You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n * an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n * specific language governing permissions and limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n *\/\npackage org.hyperledger.besu.ethereum.mainnet.precompiles;\n\nimport org.hyperledger.besu.ethereum.core.Gas;\nimport org.hyperledger.besu.ethereum.mainnet.AbstractPrecompiledContract;\nimport org.hyperledger.besu.ethereum.vm.GasCalculator;\nimport org.hyperledger.besu.ethereum.vm.MessageFrame;\n\nimport java.math.BigInteger;\nimport java.util.Arrays;\n\nimport org.apache.tuweni.bytes.Bytes;\nimport org.apache.tuweni.bytes.MutableBytes;\n\n\/\/ The big integer modular exponentiation precompiled contract defined in EIP-198.\npublic class BigIntegerModularExponentiationPrecompiledContract\n extends AbstractPrecompiledContract {\n\n private static final BigInteger WORD_SIZE = BigInteger.valueOf(32);\n private static final BigInteger BITS_IN_BYTE = BigInteger.valueOf(8);\n private static final BigInteger BASE_OFFSET = BigInteger.valueOf(96);\n private static final BigInteger MAX_FIRST_EXPONENT_BYTES = BigInteger.valueOf(32);\n private static final BigInteger GQUADDIVISOR = BigInteger.valueOf(20);\n private static final int PARAMETER_LENGTH = 32;\n private static final int BASE_LENGTH_OFFSET = 0;\n private static final int EXPONENT_LENGTH_OFFSET = 32;\n private static final int MODULUS_LENGTH_OFFSET = 64;\n private static final int MAX_GAS_BITS = 255;\n\n private static final BigInteger BIGINT_4 = BigInteger.valueOf(4);\n private static final BigInteger BIGINT_16 = BigInteger.valueOf(16);\n private static final BigInteger BIGINT_64 = BigInteger.valueOf(64);\n private static final BigInteger BIGINT_96 = BigInteger.valueOf(96);\n private static final BigInteger BIGINT_480 = BigInteger.valueOf(480);\n private static final BigInteger BIGINT_1024 = BigInteger.valueOf(1_024L);\n private static final BigInteger BIGINT_3072 = BigInteger.valueOf(3_072L);\n private static final BigInteger BIGINT_199680 = BigInteger.valueOf(199_680L);\n\n public BigIntegerModularExponentiationPrecompiledContract(final GasCalculator gasCalculator) {\n super(\"BigIntModExp\", gasCalculator);\n }\n\n @Override\n public Gas gasRequirement(final Bytes input) {\n \/\/ Typically gas calculations are delegated to a GasCalculator instance,\n \/\/ but the complexity and coupling with other parts of the precompile seem\n \/\/ like reasonable reasons to do the math here instead.\n final BigInteger baseLength = baseLength(input);\n final BigInteger exponentLength = exponentLength(input);\n final BigInteger modulusLength = modulusLength(input);\n final BigInteger exponentOffset = BASE_OFFSET.add(baseLength);\n final int firstExponentBytesCap = exponentLength.min(MAX_FIRST_EXPONENT_BYTES).intValue();\n final BigInteger firstExpBytes = extractParameter(input, exponentOffset, firstExponentBytesCap);\n final BigInteger adjustedExponentLength = adjustedExponentLength(exponentLength, firstExpBytes);\n final BigInteger multiplicationComplexity =\n multiplicationComplexity(baseLength.max(modulusLength));\n final BigInteger gasRequirement =\n multiplicationComplexity\n .multiply(adjustedExponentLength.max(BigInteger.ONE))\n .divide(GQUADDIVISOR);\n\n \/\/ Gas price is so large it will not fit in a Gas type, so an\n \/\/ very very very unlikely high gas price is used instead.\n if (gasRequirement.bitLength() > MAX_GAS_BITS) {\n return Gas.of(Long.MAX_VALUE);\n } else {\n return Gas.of(gasRequirement);\n }\n }\n\n @Override\n public Bytes compute(final Bytes input, final MessageFrame messageFrame) {\n final BigInteger baseLength = baseLength(input);\n final BigInteger exponentLength = exponentLength(input);\n final BigInteger modulusLength = modulusLength(input);\n final BigInteger exponentOffset = BASE_OFFSET.add(baseLength);\n final BigInteger modulusOffset = exponentOffset.add(exponentLength);\n final BigInteger base = extractParameter(input, BASE_OFFSET, baseLength.intValue());\n final BigInteger exp = extractParameter(input, exponentOffset, exponentLength.intValue());\n final BigInteger mod = extractParameter(input, modulusOffset, modulusLength.intValue());\n\n final Bytes modExp;\n \/\/ Result must be the length of the modulus.\n final MutableBytes result = MutableBytes.create(modulusLength.intValue());\n if (mod.compareTo(BigInteger.ZERO) == 0) {\n modExp = MutableBytes.EMPTY;\n } else {\n \/\/ BigInteger zero-pads positive values whose most significant bit is a 1 if\n \/\/ the padding was not there.\n modExp = Bytes.wrap(base.modPow(exp, mod).toByteArray()).trimLeadingZeros();\n }\n\n modExp.copyTo(result, result.size() - modExp.size());\n return result;\n }\n\n \/\/ Equation to estimate the multiplication complexity.\n private static BigInteger multiplicationComplexity(final BigInteger x) {\n if (x.compareTo(BIGINT_64) <= 0) {\n return square(x);\n } else if (x.compareTo(BIGINT_1024) <= 0) {\n return square(x).divide(BIGINT_4).add(BIGINT_96.multiply(x)).subtract(BIGINT_3072);\n } else {\n return square(x).divide(BIGINT_16).add(BIGINT_480.multiply(x)).subtract(BIGINT_199680);\n }\n }\n\n private static BigInteger bitLength(final BigInteger n) {\n return n.compareTo(BigInteger.ZERO) == 0\n ? BigInteger.ZERO\n : BigInteger.valueOf(n.bitLength() - 1);\n }\n\n private static BigInteger adjustedExponentLength(\n final BigInteger exponentLength, final BigInteger firstExpBytes) {\n final BigInteger bitLength = bitLength(firstExpBytes);\n if (exponentLength.compareTo(WORD_SIZE) <= 0) {\n return bitLength;\n } else {\n return BITS_IN_BYTE.multiply(exponentLength.subtract(WORD_SIZE)).add(bitLength);\n }\n }\n\n private static final BigInteger baseLength(final Bytes input) {\n return extractParameter(input, BASE_LENGTH_OFFSET, PARAMETER_LENGTH);\n }\n\n private static final BigInteger exponentLength(final Bytes input) {\n return extractParameter(input, EXPONENT_LENGTH_OFFSET, PARAMETER_LENGTH);\n }\n\n private static final BigInteger modulusLength(final Bytes input) {\n return extractParameter(input, MODULUS_LENGTH_OFFSET, PARAMETER_LENGTH);\n }\n\n private static BigInteger extractParameter(\n final Bytes input, final int offset, final int length) {\n if (offset > input.size() || length == 0) {\n return BigInteger.ZERO;\n }\n final byte[] raw = Arrays.copyOfRange(input.toArray(), offset, offset + length);\n return new BigInteger(1, raw);\n }\n\n private static BigInteger extractParameter(\n final Bytes input, final BigInteger offset, final int length) {\n if (BigInteger.valueOf(input.size()).compareTo(offset) <= 0) {\n return BigInteger.ZERO;\n }\n return extractParameter(input, offset.intValue(), length);\n }\n\n private static BigInteger square(final BigInteger n) {\n return n.multiply(n);\n }\n}\n","lang_cluster":"Java","diff_tag":0,"review_comment":"","id":249} {"diff_hunk":"@@ -19,6 +19,7 @@ package azkaban.utils;\n import java.util.Collection;\n \n public class AbstractMailer {\n+ private static int MB_IN_BYTES = 1048576;\n private String clientHostname;\n private int clientPort;\n private boolean usesSSL;","source_code":"\/*\n * Copyright 2012 LinkedIn Corp.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n * use this file except in compliance with the License. You may obtain a copy of\n * the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n * License for the specific language governing permissions and limitations under\n * the License.\n *\/\n\npackage azkaban.utils;\n\nimport java.util.Collection;\n\npublic class AbstractMailer {\n private String clientHostname;\n private int clientPort;\n private boolean usesSSL;\n\n private String mailHost;\n private String mailUser;\n private String mailPassword;\n private String mailSender;\n private String azkabanName;\n\n private String referenceURL;\n\n public AbstractMailer(Props props) {\n this.azkabanName = props.getString(\"azkaban.name\", \"azkaban\");\n this.mailHost = props.getString(\"mail.host\", \"localhost\");\n this.mailUser = props.getString(\"mail.user\", \"\");\n this.mailPassword = props.getString(\"mail.password\", \"\");\n this.mailSender = props.getString(\"mail.sender\", \"\");\n\n this.clientHostname = props.get(\"server.hostname\");\n this.clientPort = props.getInt(\"server.port\");\n this.usesSSL = props.getBoolean(\"server.useSSL\");\n\n if (usesSSL) {\n referenceURL =\n \"https:\/\/\" + clientHostname\n + (clientPort == 443 ? \"\/\" : \":\" + clientPort + \"\/\");\n } else {\n referenceURL =\n \"http:\/\/\" + clientHostname\n + (clientPort == 80 ? \"\/\" : \":\" + clientPort + \"\/\");\n }\n }\n\n public String getReferenceURL() {\n return referenceURL;\n }\n\n protected EmailMessage createEmailMessage(String subject, String mimetype,\n Collection emailList) {\n EmailMessage message = new EmailMessage(mailHost, mailUser, mailPassword);\n message.setFromAddress(mailSender);\n message.addAllToAddress(emailList);\n message.setMimeType(mimetype);\n message.setSubject(subject);\n\n return message;\n }\n\n public EmailMessage prepareEmailMessage(String subject, String mimetype,\n Collection emailList) {\n return createEmailMessage(subject, mimetype, emailList);\n }\n\n public String getAzkabanName() {\n return azkabanName;\n }\n\n public String getMailHost() {\n return mailHost;\n }\n\n public String getMailUser() {\n return mailUser;\n }\n\n public String getMailPassword() {\n return mailPassword;\n }\n\n public String getMailSender() {\n return mailSender;\n }\n}\n","lang_cluster":"Java","diff_tag":0,"review_comment":"","id":250} {"diff_hunk":"@@ -1,6 +1,6 @@\n import options from '.\/options';\n import { defer } from '.\/util';\n-import { renderComponent } from '.\/vdom\/component';\n+import { renderComponent, catchErrorInComponent } from '.\/vdom\/component';\n \n \/** Managed queue of dirty components to be re-rendered *\/\n ","source_code":"import options from '.\/options';\nimport { defer } from '.\/util';\nimport { renderComponent } from '.\/vdom\/component';\n\n\/** Managed queue of dirty components to be re-rendered *\/\n\nlet items = [];\n\nexport function enqueueRender(component) {\n\tif (!component._dirty && (component._dirty = true) && items.push(component)==1) {\n\t\t(options.debounceRendering || defer)(rerender);\n\t}\n}\n\nexport function rerender() {\n\tlet p, list = items;\n\titems = [];\n\twhile ( (p = list.pop()) ) {\n\t\tif (p._dirty) renderComponent(p);\n\t}\n}\n","lang_cluster":"Javascript","diff_tag":0,"review_comment":"","id":251} {"diff_hunk":"@@ -123,6 +123,25 @@ describe('Core.setCellMeta', () => {\n \n hot.setCellMeta(0, 1, 'className', className);\n \n+ expect(beforeSetCellMeta).toHaveBeenCalledWith(0, 1, 'className', className, undefined, undefined);\n expect(afterSetCellMeta).toHaveBeenCalledWith(0, 1, 'className', className, undefined, undefined);\n });\n+\n+ it('should NOT call the `afterSetCellMeta` hook, if the `beforeSetCellMeta` returned false', () => {\n+ const className = 'htCenter htMiddle';\n+ const afterSetCellMeta = jasmine.createSpy('afterSetCellMeta');\n+ const hot = handsontable({\n+ minRows: 5,\n+ minCols: 5,\n+ beforeSetCellMeta: () => false,\n+ afterSetCellMeta\n+ });\n+\n+ hot.rowIndexMapper.setIndexesSequence([4, 3, 2, 1, 0]);\n+ hot.columnIndexMapper.setIndexesSequence([4, 3, 2, 1, 0]);\n+\n+ hot.setCellMeta(0, 1, 'className', className);\n+\n+ expect(afterSetCellMeta).not.toHaveBeenCalled();\n+ });\n });","source_code":"describe('Core.setCellMeta', () => {\n const id = 'testContainer';\n\n beforeEach(function() {\n this.$container = $(`
<\/div>`).appendTo('body');\n });\n\n afterEach(function() {\n if (this.$container) {\n destroy();\n this.$container.remove();\n }\n });\n\n it('should set correct meta className for cell', () => {\n\n const className = 'htCenter htMiddle';\n\n handsontable({\n afterCellMetaReset() {\n this.setCellMeta(0, 0, 'className', className);\n }\n });\n\n const cellMeta = getCellMeta(0, 0);\n\n expect(cellMeta.className).not.toBeUndefined();\n expect(cellMeta.className).toEqual(className);\n });\n\n it('should set proper cell meta when indexes was modified', () => {\n const hot = handsontable({\n minRows: 5,\n minCols: 5\n });\n\n hot.rowIndexMapper.setIndexesSequence([4, 3, 2, 1, 0]);\n hot.columnIndexMapper.setIndexesSequence([4, 3, 2, 1, 0]);\n\n setCellMeta(0, 1, 'key', 'value');\n\n expect(getCellMeta(0, 1).key).toEqual('value');\n });\n\n it('should set correct meta className for non existed cell', () => {\n const className = 'htCenter htMiddle';\n\n handsontable({\n data: Handsontable.helper.createSpreadsheetData(5, 5),\n afterCellMetaReset() {\n this.setCellMeta(100, 100, 'className', className);\n }\n });\n\n const cellMeta = getCellMeta(100, 100);\n\n expect(cellMeta.className).not.toBeUndefined();\n expect(cellMeta.className).toEqual(className);\n });\n\n it('should set correct meta classNames for cells using cell in configuration', () => {\n const classNames = [\n 'htCenter htTop',\n 'htRight htBottom'\n ];\n\n handsontable({\n cell: [\n { row: 0, col: 0, className: classNames[0] },\n { row: 1, col: 1, className: classNames[1] }\n ]\n });\n\n expect(spec().$container.find('tbody tr:eq(0) td:eq(0)')[0].className).toEqual(classNames[0]);\n expect(spec().$container.find('tbody tr:eq(1) td:eq(1)')[0].className).toEqual(classNames[1]);\n });\n\n it('should change cell meta data with updateSettings when the cell option is defined', () => {\n const classNames = [\n 'htCenter htTop',\n 'htRight htBottom'\n ];\n\n handsontable({\n cell: [\n { row: 0, col: 0, className: classNames[0] },\n { row: 1, col: 1, className: classNames[1] }\n ]\n });\n\n expect(spec().$container.find('tbody tr:eq(0) td:eq(0)')[0].className).toEqual(classNames[0]);\n expect(spec().$container.find('tbody tr:eq(1) td:eq(1)')[0].className).toEqual(classNames[1]);\n\n updateSettings({\n cell: []\n });\n\n expect(spec().$container.find('tbody tr:eq(0) td:eq(0)')[0].className).toEqual('');\n expect(spec().$container.find('tbody tr:eq(1) td:eq(1)')[0].className).toEqual('');\n\n updateSettings({\n cell: [\n { row: 0, col: 0, className: classNames[1] },\n { row: 1, col: 1, className: classNames[0] }\n ]\n });\n\n expect(spec().$container.find('tbody tr:eq(0) td:eq(0)')[0].className).toEqual(classNames[1]);\n expect(spec().$container.find('tbody tr:eq(1) td:eq(1)')[0].className).toEqual(classNames[0]);\n });\n\n it('should call `afterSetCellMeta` plugin hook with visual indexes as parameters', () => {\n const className = 'htCenter htMiddle';\n const afterSetCellMeta = jasmine.createSpy('afterSetCellMeta');\n const hot = handsontable({\n minRows: 5,\n minCols: 5,\n afterSetCellMeta\n });\n\n hot.rowIndexMapper.setIndexesSequence([4, 3, 2, 1, 0]);\n hot.columnIndexMapper.setIndexesSequence([4, 3, 2, 1, 0]);\n\n hot.setCellMeta(0, 1, 'className', className);\n\n expect(afterSetCellMeta).toHaveBeenCalledWith(0, 1, 'className', className, undefined, undefined);\n });\n});\n","lang_cluster":"Javascript","diff_tag":0,"review_comment":"","id":252} {"diff_hunk":"@@ -51,10 +51,6 @@ function DashboardBounceRateWidget() {\n \t} = useSelect( ( select ) => {\n \t\tconst store = select( STORE_NAME );\n \n-\t\tconst accountID = store.getAccountID();\n-\t\tconst profileID = store.getProfileID();\n-\t\tconst internalWebPropertyID = store.getInternalWebPropertyID();\n-\n \t\tconst args = {\n \t\t\tdateRange: select( CORE_USER ).getDateRange(),\n \t\t\tmultiDateRange: 1,","source_code":"\/**\n * DashboardAllTrafficWidget component.\n *\n * Site Kit by Google, Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\n\/**\n * WordPress dependencies\n *\/\nimport { __, _x } from '@wordpress\/i18n';\n\n\/**\n * Internal dependencies\n *\/\nimport Data from 'googlesitekit-data';\nimport { STORE_NAME } from '..\/..\/datastore\/constants';\nimport { STORE_NAME as CORE_SITE } from '..\/..\/..\/..\/googlesitekit\/datastore\/site\/constants';\nimport { STORE_NAME as CORE_USER } from '..\/..\/..\/..\/googlesitekit\/datastore\/user\/constants';\nimport whenActive from '..\/..\/..\/..\/util\/when-active';\nimport PreviewBlock from '..\/..\/..\/..\/components\/PreviewBlock';\nimport DataBlock from '..\/..\/..\/..\/components\/data-block';\nimport Sparkline from '..\/..\/..\/..\/components\/Sparkline';\nimport AnalyticsInactiveCTA from '..\/..\/..\/..\/components\/AnalyticsInactiveCTA';\nimport { changeToPercent } from '..\/..\/..\/..\/util';\nimport applyEntityToReportPath from '..\/..\/util\/applyEntityToReportPath';\nimport ReportError from '..\/..\/..\/..\/components\/ReportError';\nimport ReportZero from '..\/..\/..\/..\/components\/ReportZero';\nimport parseDimensionStringToDate from '..\/..\/util\/parseDimensionStringToDate';\nimport { isZeroReport } from '..\/..\/util';\n\nconst { useSelect } = Data;\n\nfunction DashboardBounceRateWidget() {\n\tconst {\n\t\tdata,\n\t\terror,\n\t\tloading,\n\t\tserviceURL,\n\t} = useSelect( ( select ) => {\n\t\tconst store = select( STORE_NAME );\n\n\t\tconst accountID = store.getAccountID();\n\t\tconst profileID = store.getProfileID();\n\t\tconst internalWebPropertyID = store.getInternalWebPropertyID();\n\n\t\tconst args = {\n\t\t\tdateRange: select( CORE_USER ).getDateRange(),\n\t\t\tmultiDateRange: 1,\n\t\t\tdimensions: 'ga:date',\n\t\t\tmetrics: [\n\t\t\t\t{\n\t\t\t\t\texpression: 'ga:bounceRate',\n\t\t\t\t\talias: 'Bounce Rate',\n\t\t\t\t},\n\t\t\t],\n\t\t};\n\n\t\tconst url = select( CORE_SITE ).getCurrentEntityURL();\n\t\tif ( url ) {\n\t\t\targs.url = url;\n\t\t}\n\t\treturn {\n\t\t\tdata: store.getReport( args ),\n\t\t\terror: store.getErrorForSelector( 'getReport', [ args ] ),\n\t\t\tloading: ! store.hasFinishedResolution( 'getReport', [ args ] ),\n\t\t\tserviceURL: store.getServiceURL(\n\t\t\t\t{\n\t\t\t\t\tpath: applyEntityToReportPath( url, `\/report\/visitors-overview\/a${ accountID }w${ internalWebPropertyID }p${ profileID }\/` ),\n\t\t\t\t}\n\t\t\t),\n\t\t};\n\t} );\n\n\tif ( loading ) {\n\t\treturn ;\n\t}\n\n\tif ( error ) {\n\t\treturn ;\n\t}\n\n\tif ( isZeroReport( data ) ) {\n\t\treturn ;\n\t}\n\n\tconst sparkLineData = [\n\t\t[\n\t\t\t{ type: 'date', label: 'Day' },\n\t\t\t{ type: 'number', label: 'Bounce Rate' },\n\t\t],\n\t];\n\n\tconst dataRows = data[ 0 ].data.rows;\n\t\/\/ We only want half the date range, having `multiDateRange` in the query doubles the range.\n\tfor ( let i = Math.ceil( dataRows.length \/ 2 ); i < dataRows.length; i++ ) {\n\t\tconst { values } = dataRows[ i ].metrics[ 0 ];\n\t\tconst dateString = dataRows[ i ].dimensions[ 0 ];\n\t\tconst date = parseDimensionStringToDate( dateString );\n\t\tsparkLineData.push( [\n\t\t\tdate,\n\t\t\tvalues[ 0 ],\n\t\t] );\n\t}\n\n\tconst { totals } = data[ 0 ].data;\n\tconst lastMonth = totals[ 0 ].values;\n\tconst previousMonth = totals[ 1 ].values;\n\tconst averageBounceRate = lastMonth[ 0 ];\n\tconst averageBounceRateChange = changeToPercent( previousMonth[ 0 ], lastMonth[ 0 ] );\n\n\treturn (\n\t\t\n\t\t\t}\n\t\t\/>\n\t);\n}\n\nexport default whenActive( {\n\tmoduleName: 'analytics',\n\tfallbackComponent: AnalyticsInactiveCTA,\n} )( DashboardBounceRateWidget );\n","lang_cluster":"Javascript","diff_tag":0,"review_comment":"","id":253} {"diff_hunk":"@@ -27,16 +27,16 @@ fi_FI.strings = {\n dashboardWindowTitle: 'Tiedoston latausikkuna (Paina Esc sulkeaksesi)',\n dataUploadedOfTotal: '%{complete} \/ %{total}',\n done: 'Valmis',\n- dropHereOr: 'Pudota tiedostot t\u00e4h\u00e4n tai %{browse}',\n- dropHint: 'Pudota tiedostot t\u00e4h\u00e4n',\n- dropPaste: 'Pudota tiedostot t\u00e4h\u00e4n, liit\u00e4 tai %{browse}',\n- dropPasteImport: 'Pudota tiedostot t\u00e4h\u00e4n, liit\u00e4, %{browse} tai tuo',\n+ dropHereOr: 'Raahaa tiedostot t\u00e4h\u00e4n tai %{browse}',\n+ dropHint: 'Raahaa tiedostot t\u00e4h\u00e4n',\n+ dropPaste: 'Raahaa tiedostot t\u00e4h\u00e4n, liit\u00e4 tai %{browse}',\n+ dropPasteImport: 'Raahaa tiedostot t\u00e4h\u00e4n, liit\u00e4, %{browse} tai tuo',\n edit: 'Muokkaa',\n editFile: 'Muokkaa tiedostoa',\n editing: 'Muokataan %{file}',\n emptyFolderAdded: 'Ei lis\u00e4tty tiedostoja tyhj\u00e4st\u00e4 kansiosta',\n encoding: 'Koodataan...',\n- enterCorrectUrl: 'Ep\u00e4kelpo osoita: Varmista, ett\u00e4 osoite osoittaa suoraan tiedostoon',\n+ enterCorrectUrl: 'Ep\u00e4kelpo osoite: Varmista, ett\u00e4 osoite osoittaa suoraan tiedostoon',\n enterUrlToImport: 'Anna osoite tuodaksesi tiedoston',\n exceedsSize: 'Tiedoston koko ylitt\u00e4\u00e4 sallitun maksimin',\n failedToFetch: 'Companion ei voinut ladata tiedostoa osoitteesta, onko osoite varmasti oikea?',","source_code":"const fi_FI = {}\n\nfi_FI.strings = {\n addMore: 'Lis\u00e4\u00e4',\n addMoreFiles: 'Lis\u00e4\u00e4 tiedostoja',\n addingMoreFiles: 'Lis\u00e4t\u00e4\u00e4n tiedostoja',\n allowAccessDescription: 'Jotta voit ottaa kuvia tai videota kamerallasi, sinun tulee antaa t\u00e4lle sivustolle oikeus k\u00e4ytt\u00e4\u00e4 kameraasi.',\n allowAccessTitle: 'Salli kameran k\u00e4ytt\u00f6, kiitos',\n authenticateWith: 'Mene %{pluginName}',\n authenticateWithTitle: '%{pluginName} vaadittu tunnistautumiseen, jotta voit valita tiedostoja',\n back: 'Takaisin',\n browse: 'selaa',\n cancel: 'Peruuta',\n cancelUpload: 'Peruuta l\u00e4hetys',\n chooseFiles: 'Valitse tiedostot',\n closeModal: 'Sulje ikkuna',\n companionAuthError: 'K\u00e4ytt\u00f6oikeus vaadittu',\n companionError: 'Yhdist\u00e4minen Companioniin ep\u00e4onnistui',\n complete: 'Valmis',\n connectedToInternet: 'Yhdistetty Internettiin',\n copyLink: 'Kopioi linkki',\n copyLinkToClipboardFallback: 'Kopioi alla oleva linkki',\n copyLinkToClipboardSuccess: 'Linkki kopioitu leikep\u00f6yd\u00e4lle',\n creatingAssembly: 'Valmistellaan l\u00e4hetyst\u00e4...',\n creatingAssemblyFailed: 'Transloadit: Assemblyn luonti ep\u00e4onnistui',\n dashboardTitle: 'Tiedoston Lataaja',\n dashboardWindowTitle: 'Tiedoston latausikkuna (Paina Esc sulkeaksesi)',\n dataUploadedOfTotal: '%{complete} \/ %{total}',\n done: 'Valmis',\n dropHereOr: 'Pudota tiedostot t\u00e4h\u00e4n tai %{browse}',\n dropHint: 'Pudota tiedostot t\u00e4h\u00e4n',\n dropPaste: 'Pudota tiedostot t\u00e4h\u00e4n, liit\u00e4 tai %{browse}',\n dropPasteImport: 'Pudota tiedostot t\u00e4h\u00e4n, liit\u00e4, %{browse} tai tuo',\n edit: 'Muokkaa',\n editFile: 'Muokkaa tiedostoa',\n editing: 'Muokataan %{file}',\n emptyFolderAdded: 'Ei lis\u00e4tty tiedostoja tyhj\u00e4st\u00e4 kansiosta',\n encoding: 'Koodataan...',\n enterCorrectUrl: 'Ep\u00e4kelpo osoita: Varmista, ett\u00e4 osoite osoittaa suoraan tiedostoon',\n enterUrlToImport: 'Anna osoite tuodaksesi tiedoston',\n exceedsSize: 'Tiedoston koko ylitt\u00e4\u00e4 sallitun maksimin',\n failedToFetch: 'Companion ei voinut ladata tiedostoa osoitteesta, onko osoite varmasti oikea?',\n failedToUpload: 'Ei voitu l\u00e4hett\u00e4\u00e4 tiedostoa %{file}',\n fileSource: 'Tiedoston l\u00e4hde: %{name}',\n filesUploadedOfTotal: {\n '0': '%{complete} \/ %{smart_count} tiedostosta l\u00e4hetetty',\n '1': '%{complete} \/ %{smart_count} tiedostoa l\u00e4hetetty',\n '2': '%{complete} \/ %{smart_count} tiedostoa l\u00e4hetetty'\n },\n filter: 'Suodata',\n finishEditingFile: 'Lopeta tiedoston muokkaus',\n folderAdded: {\n '0': 'Lis\u00e4tty %{smart_count} tiedosto kansiosta %{folder}',\n '1': 'Lis\u00e4tty %{smart_count} tiedostoa kansiosta %{folder}',\n '2': 'Lis\u00e4tty %{smart_count} tiedostoa kansiosta %{folder}'\n },\n import: 'Tuo',\n importFrom: 'Tuo kohteesta %{name}',\n link: 'Linkki',\n loading: 'Ladataan...',\n logOut: 'Kirjaudu ulos',\n myDevice: 'Minun laite',\n noFilesFound: 'Sinulla ei ole tiedostoja tai kansioita t\u00e4\u00e4ll\u00e4',\n noInternetConnection: 'Ei Internet-yhteytt\u00e4',\n openFolderNamed: 'Avaa kansio %{name}',\n pause: 'Pys\u00e4yt\u00e4',\n pauseUpload: 'Pys\u00e4yt\u00e4 l\u00e4hetys',\n paused: 'Pys\u00e4ytetty',\n poweredBy: 'Powered by',\n preparingUpload: 'Valmistellaan l\u00e4hetyst\u00e4...',\n processingXFiles: {\n '0': 'K\u00e4sitell\u00e4\u00e4n %{smart_count} tiedostoa',\n '1': 'K\u00e4sitell\u00e4\u00e4n %{smart_count} tiedostoa',\n '2': 'K\u00e4sitell\u00e4\u00e4n %{smart_count} tiedostoa'\n },\n removeFile: 'Poista tiedosto',\n resetFilter: 'Resetoi suodatin',\n resume: 'Jatka',\n resumeUpload: 'Jatka l\u00e4hetyst\u00e4',\n retry: 'Yrit\u00e4 uudelleen',\n retryUpload: 'Yrit\u00e4 uudelleen l\u00e4hetyst\u00e4',\n saveChanges: 'Tallenna muutokset',\n selectAllFilesFromFolderNamed: 'Valitse kaikki tiedostot kansiosta %{name}',\n selectFileNamed: 'Valitse tiedosto %{name}',\n selectX: {\n '0': 'Valitse %{smart_count}',\n '1': 'Valitse %{smart_count}',\n '2': 'Valitse %{smart_count}'\n },\n smile: 'Hymyile!',\n startRecording: 'Aloita videon tallennus',\n stopRecording: 'Pys\u00e4yt\u00e4 videon tallennus',\n takePicture: 'Ota kuva',\n timedOut: 'L\u00e4hetys jumittunut %{seconds} sekunniksi, keskeytet\u00e4\u00e4n.',\n unselectAllFilesFromFolderNamed: 'Poista tiedostojen valinta kansiossa %{name}',\n unselectFileNamed: 'Poista valinta tiedostosta %{name}',\n upload: 'L\u00e4het\u00e4',\n uploadComplete: 'L\u00e4hetys valmis',\n uploadFailed: 'L\u00e4hetys ep\u00e4onnistui',\n uploadPaused: 'L\u00e4hetys pys\u00e4ytetty',\n uploadXFiles: {\n '0': 'L\u00e4het\u00e4 %{smart_count} tiedosto',\n '1': 'L\u00e4het\u00e4 %{smart_count} tiedostoa',\n '2': 'L\u00e4het\u00e4 %{smart_count} tiedostoa'\n },\n uploadXNewFiles: {\n '0': 'L\u00e4het\u00e4 +%{smart_count} tiedosto',\n '1': 'L\u00e4het\u00e4 +%{smart_count} tiedostoa',\n '2': 'L\u00e4het\u00e4 +%{smart_count} tiedostoa'\n },\n uploading: 'Uploading',\n uploadingXFiles: {\n '0': 'L\u00e4hetet\u00e4\u00e4n %{smart_count} tiedosto',\n '1': 'L\u00e4hetet\u00e4\u00e4n %{smart_count} tiedostoa',\n '2': 'L\u00e4hetet\u00e4\u00e4n %{smart_count} tiedostoa'\n },\n xFilesSelected: {\n '0': '%{smart_count} tiedosto valittu',\n '1': '%{smart_count} tiedostoa valittu',\n '2': '%{smart_count} tiedostoa valittu'\n },\n xMoreFilesAdded: {\n '0': '%{smart_count} tiedosto added',\n '1': '%{smart_count} tiedostoa added',\n '2': '%{smart_count} tiedostoa added'\n },\n xTimeLeft: '%{time} j\u00e4ljell\u00e4',\n youCanOnlyUploadFileTypes: 'Voit l\u00e4hett\u00e4\u00e4 vain: %{types}',\n youCanOnlyUploadX: {\n '0': 'Voit l\u00e4hett\u00e4\u00e4 vain %{smart_count} tiedosto',\n '1': 'Voit l\u00e4hett\u00e4\u00e4 vain %{smart_count} tiedostoa',\n '2': 'Voit l\u00e4hett\u00e4\u00e4 vain %{smart_count} tiedostoa'\n },\n youHaveToAtLeastSelectX: {\n '0': 'Sinun pit\u00e4\u00e4 valita v\u00e4hint\u00e4\u00e4n %{smart_count} tiedosto',\n '1': 'Sinun pit\u00e4\u00e4 valita v\u00e4hint\u00e4\u00e4n %{smart_count} tiedostoa',\n '2': 'Sinun pit\u00e4\u00e4 valita v\u00e4hint\u00e4\u00e4n %{smart_count} tiedostoa'\n }\n}\n\nfi_FI.pluralize = function (n) {\n if (n === 1) {\n return 0\n }\n return 1\n}\n\nif (typeof window !== 'undefined' && typeof window.Uppy !== 'undefined') {\n window.Uppy.locales.fi_FI = fi_FI\n}\n\nmodule.exports = fi_FI\n","lang_cluster":"Javascript","diff_tag":0,"review_comment":"","id":254} {"diff_hunk":"@@ -93,5 +93,27 @@ describe('Transactions', function() {\n });\n }\n });\n+\n+ it('should not error if transactions are supported', {\n+ metadata: { requires: { topology: ['sharded'], mongodb: '>=4.1.0' } },\n+ test: function(done) {\n+ const configuration = this.configuration;\n+ const client = configuration.newClient(configuration.url());\n+\n+ client.connect((err, client) => {\n+ const session = client.startSession();\n+ const db = client.db(configuration.db);\n+ const coll = db.collection('transaction_error_test');\n+ coll.insertOne({ a: 1 }, err => {\n+ expect(err).to.not.exist;\n+ expect(() => session.startTransaction()).to.not.throw();\n+\n+ session.endSession(() => {\n+ client.close(done);\n+ });\n+ });\n+ });\n+ }\n+ });\n });\n });","source_code":"'use strict';\n\nconst chai = require('chai');\nconst expect = chai.expect;\nconst core = require('..\/..\/lib\/core');\nconst sessions = core.Sessions;\nconst TestRunnerContext = require('.\/runner').TestRunnerContext;\nconst gatherTestSuites = require('.\/runner').gatherTestSuites;\nconst generateTopologyTests = require('.\/runner').generateTopologyTests;\n\ndescribe('Transactions', function() {\n const testContext = new TestRunnerContext();\n\n [\n { name: 'spec tests', specPath: `${__dirname}\/spec\/transactions` },\n {\n name: 'withTransaction spec tests',\n specPath: `${__dirname}\/spec\/transactions\/convenient-api`\n }\n ].forEach(suiteSpec => {\n describe(suiteSpec.name, function() {\n const testSuites = gatherTestSuites(suiteSpec.specPath);\n after(() => testContext.teardown());\n before(function() {\n return testContext.setup(this.configuration);\n });\n\n function testFilter(spec) {\n const SKIP_TESTS = [\n \/\/ commitTransaction retry seems to be swallowed by mongos in these three cases\n 'commitTransaction retry succeeds on new mongos',\n 'commitTransaction retry fails on new mongos',\n 'unpin after transient error within a transaction and commit',\n 'count'\n ];\n\n return SKIP_TESTS.indexOf(spec.description) === -1;\n }\n\n generateTopologyTests(testSuites, testContext, testFilter);\n });\n });\n\n describe('withTransaction', function() {\n let session, sessionPool;\n beforeEach(() => {\n const topology = new core.Server();\n sessionPool = new sessions.ServerSessionPool(topology);\n session = new sessions.ClientSession(topology, sessionPool);\n });\n\n afterEach(() => {\n sessionPool.endAllPooledSessions();\n });\n\n it('should provide a useful error if a Promise is not returned', {\n metadata: { requires: { topology: ['replicaset', 'sharded'], mongodb: '>=4.1.5' } },\n test: function(done) {\n function fnThatDoesntReturnPromise() {\n return false;\n }\n\n expect(() => session.withTransaction(fnThatDoesntReturnPromise)).to.throw(\n \/must return a Promise\/\n );\n\n session.endSession(done);\n }\n });\n });\n\n describe('startTransaction', function() {\n it('should error if transactions are not supported', {\n metadata: { requires: { topology: ['sharded'], mongodb: '>4.0.0' } },\n test: function(done) {\n const configuration = this.configuration;\n const client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });\n\n client.connect((err, client) => {\n const session = client.startSession();\n const db = client.db(configuration.db);\n const coll = db.collection('transaction_error_test');\n coll.insertOne({ a: 1 }, err => {\n expect(err).to.not.exist;\n expect(() => session.startTransaction()).to.throw(\n 'Transactions are not supported on sharded clusters in MongoDB < 4.2.'\n );\n\n session.endSession(() => {\n client.close(done);\n });\n });\n });\n }\n });\n });\n});\n","lang_cluster":"Javascript","diff_tag":0,"review_comment":"","id":255} {"diff_hunk":"@@ -14,6 +14,8 @@ module.exports = class RequestClient {\n this.uppy = uppy\n this.opts = opts\n this.onReceiveResponse = this.onReceiveResponse.bind(this)\n+ this.allowedHeaders = []\n+ this.preflightDone = false\n }\n \n get hostname () {","source_code":"'use strict'\n\nconst AuthError = require('.\/AuthError')\n\n\/\/ Remove the trailing slash so we can always safely append \/xyz.\nfunction stripSlash (url) {\n return url.replace(\/\\\/$\/, '')\n}\n\nmodule.exports = class RequestClient {\n static VERSION = require('..\/package.json').version\n\n constructor (uppy, opts) {\n this.uppy = uppy\n this.opts = opts\n this.onReceiveResponse = this.onReceiveResponse.bind(this)\n }\n\n get hostname () {\n const { companion } = this.uppy.getState()\n const host = this.opts.companionUrl\n return stripSlash(companion && companion[host] ? companion[host] : host)\n }\n\n get defaultHeaders () {\n return {\n 'Accept': 'application\/json',\n 'Content-Type': 'application\/json'\n }\n }\n\n headers () {\n return Promise.resolve(Object.assign({}, this.defaultHeaders, this.opts.serverHeaders || {}))\n }\n\n _getPostResponseFunc (skip) {\n return (response) => {\n if (!skip) {\n return this.onReceiveResponse(response)\n }\n\n return response\n }\n }\n\n onReceiveResponse (response) {\n const state = this.uppy.getState()\n const companion = state.companion || {}\n const host = this.opts.companionUrl\n const headers = response.headers\n \/\/ Store the self-identified domain name for the Companion instance we just hit.\n if (headers.has('i-am') && headers.get('i-am') !== companion[host]) {\n this.uppy.setState({\n companion: Object.assign({}, companion, {\n [host]: headers.get('i-am')\n })\n })\n }\n return response\n }\n\n _getUrl (url) {\n if (\/^(https?:|)\\\/\\\/\/.test(url)) {\n return url\n }\n return `${this.hostname}\/${url}`\n }\n\n _json (res) {\n if (res.status === 401) {\n throw new AuthError()\n }\n\n if (res.status < 200 || res.status > 300) {\n throw new Error(`Failed request to ${res.url}. ${res.statusText}`)\n }\n return res.json()\n }\n\n get (path, skipPostResponse) {\n return new Promise((resolve, reject) => {\n this.headers().then((headers) => {\n fetch(this._getUrl(path), {\n method: 'get',\n headers: headers,\n credentials: 'same-origin'\n })\n .then(this._getPostResponseFunc(skipPostResponse))\n .then((res) => this._json(res).then(resolve))\n .catch((err) => {\n err = err.isAuthError ? err : new Error(`Could not get ${this._getUrl(path)}. ${err}`)\n reject(err)\n })\n })\n })\n }\n\n post (path, data, skipPostResponse) {\n return new Promise((resolve, reject) => {\n this.headers().then((headers) => {\n fetch(this._getUrl(path), {\n method: 'post',\n headers: headers,\n credentials: 'same-origin',\n body: JSON.stringify(data)\n })\n .then(this._getPostResponseFunc(skipPostResponse))\n .then((res) => this._json(res).then(resolve))\n .catch((err) => {\n err = err.isAuthError ? err : new Error(`Could not post ${this._getUrl(path)}. ${err}`)\n reject(err)\n })\n })\n })\n }\n\n delete (path, data, skipPostResponse) {\n return new Promise((resolve, reject) => {\n this.headers().then((headers) => {\n fetch(`${this.hostname}\/${path}`, {\n method: 'delete',\n headers: headers,\n credentials: 'same-origin',\n body: data ? JSON.stringify(data) : null\n })\n .then(this._getPostResponseFunc(skipPostResponse))\n .then((res) => this._json(res).then(resolve))\n .catch((err) => {\n err = err.isAuthError ? err : new Error(`Could not delete ${this._getUrl(path)}. ${err}`)\n reject(err)\n })\n })\n })\n }\n}\n","lang_cluster":"Javascript","diff_tag":0,"review_comment":"","id":256} {"diff_hunk":"@@ -80,7 +80,7 @@ define(['loading', 'libraryMenu', 'globalize', 'emby-checkbox', 'emby-select'],\n \n function showAlertText(options) {\n return new Promise(function (resolve, reject) {\n- require(['alert'], function (alert) {\n+ import('alert').then(({default: alert}) => {\n alert(options).then(resolve, reject);\n });\n });","source_code":"define(['loading', 'libraryMenu', 'globalize', 'emby-checkbox', 'emby-select'], function (loading, libraryMenu, globalize) {\n 'use strict';\n\n function onSubmit(e) {\n var form = this;\n var localAddress = form.querySelector('#txtLocalAddress').value;\n var enableUpnp = form.querySelector('#chkEnableUpnp').checked;\n confirmSelections(localAddress, enableUpnp, function () {\n var validationResult = getValidationAlert(form);\n\n if (validationResult) {\n showAlertText(validationResult);\n return;\n }\n\n validateHttps(form).then(function () {\n loading.show();\n ApiClient.getServerConfiguration().then(function (config) {\n config.LocalNetworkSubnets = form.querySelector('#txtLanNetworks').value.split(',').map(function (s) {\n return s.trim();\n }).filter(function (s) {\n return s.length > 0;\n });\n config.RemoteIPFilter = form.querySelector('#txtExternalAddressFilter').value.split(',').map(function (s) {\n return s.trim();\n }).filter(function (s) {\n return s.length > 0;\n });\n config.IsRemoteIPFilterBlacklist = 'blacklist' === form.querySelector('#selectExternalAddressFilterMode').value;\n config.PublicPort = form.querySelector('#txtPublicPort').value;\n config.PublicHttpsPort = form.querySelector('#txtPublicHttpsPort').value;\n config.HttpServerPortNumber = form.querySelector('#txtPortNumber').value;\n config.HttpsPortNumber = form.querySelector('#txtHttpsPort').value;\n config.EnableHttps = form.querySelector('#chkEnableHttps').checked;\n config.RequireHttps = form.querySelector('#chkRequireHttps').checked;\n config.EnableUPnP = enableUpnp;\n config.BaseUrl = form.querySelector('#txtBaseUrl').value;\n config.EnableRemoteAccess = form.querySelector('#chkRemoteAccess').checked;\n config.CertificatePath = form.querySelector('#txtCertificatePath').value || null;\n config.CertificatePassword = form.querySelector('#txtCertPassword').value || null;\n config.LocalNetworkAddresses = localAddress ? [localAddress] : [];\n ApiClient.updateServerConfiguration(config).then(Dashboard.processServerConfigurationUpdateResult, Dashboard.processErrorResponse);\n });\n });\n });\n e.preventDefault();\n }\n\n function triggerChange(select) {\n var evt = document.createEvent('HTMLEvents');\n evt.initEvent('change', false, true);\n select.dispatchEvent(evt);\n }\n\n function getValidationAlert(form) {\n if (form.querySelector('#txtPublicPort').value === form.querySelector('#txtPublicHttpsPort').value) {\n return 'The public http and https ports must be different.';\n }\n\n if (form.querySelector('#txtPortNumber').value === form.querySelector('#txtHttpsPort').value) {\n return 'The http and https ports must be different.';\n }\n\n return null;\n }\n\n function validateHttps(form) {\n var certPath = form.querySelector('#txtCertificatePath').value || null;\n var httpsEnabled = form.querySelector('#chkEnableHttps').checked;\n\n if (httpsEnabled && !certPath) {\n return showAlertText({\n title: globalize.translate('TitleHostingSettings'),\n text: globalize.translate('HttpsRequiresCert')\n }).then(Promise.reject);\n }\n\n return Promise.resolve();\n }\n\n function showAlertText(options) {\n return new Promise(function (resolve, reject) {\n require(['alert'], function (alert) {\n alert(options).then(resolve, reject);\n });\n });\n }\n\n function confirmSelections(localAddress, enableUpnp, callback) {\n if (localAddress || !enableUpnp) {\n showAlertText({\n title: globalize.translate('TitleHostingSettings'),\n text: globalize.translate('SettingsWarning')\n }).then(callback);\n } else {\n callback();\n }\n }\n\n return function (view, params) {\n function loadPage(page, config) {\n page.querySelector('#txtPortNumber').value = config.HttpServerPortNumber;\n page.querySelector('#txtPublicPort').value = config.PublicPort;\n page.querySelector('#txtPublicHttpsPort').value = config.PublicHttpsPort;\n page.querySelector('#txtLocalAddress').value = config.LocalNetworkAddresses[0] || '';\n page.querySelector('#txtLanNetworks').value = (config.LocalNetworkSubnets || []).join(', ');\n page.querySelector('#txtExternalAddressFilter').value = (config.RemoteIPFilter || []).join(', ');\n page.querySelector('#selectExternalAddressFilterMode').value = config.IsRemoteIPFilterBlacklist ? 'blacklist' : 'whitelist';\n page.querySelector('#chkRemoteAccess').checked = null == config.EnableRemoteAccess || config.EnableRemoteAccess;\n page.querySelector('#txtHttpsPort').value = config.HttpsPortNumber;\n page.querySelector('#chkEnableHttps').checked = config.EnableHttps;\n page.querySelector('#chkRequireHttps').checked = config.RequireHttps;\n page.querySelector('#txtBaseUrl').value = config.BaseUrl || '';\n var txtCertificatePath = page.querySelector('#txtCertificatePath');\n txtCertificatePath.value = config.CertificatePath || '';\n page.querySelector('#txtCertPassword').value = config.CertificatePassword || '';\n page.querySelector('#chkEnableUpnp').checked = config.EnableUPnP;\n triggerChange(page.querySelector('#chkRemoteAccess'));\n loading.hide();\n }\n\n view.querySelector('#chkRemoteAccess').addEventListener('change', function () {\n if (this.checked) {\n view.querySelector('.fldExternalAddressFilter').classList.remove('hide');\n view.querySelector('.fldExternalAddressFilterMode').classList.remove('hide');\n view.querySelector('.fldPublicPort').classList.remove('hide');\n view.querySelector('.fldPublicHttpsPort').classList.remove('hide');\n view.querySelector('.fldEnableUpnp').classList.remove('hide');\n } else {\n view.querySelector('.fldExternalAddressFilter').classList.add('hide');\n view.querySelector('.fldExternalAddressFilterMode').classList.add('hide');\n view.querySelector('.fldPublicPort').classList.add('hide');\n view.querySelector('.fldPublicHttpsPort').classList.add('hide');\n view.querySelector('.fldEnableUpnp').classList.add('hide');\n }\n });\n view.querySelector('#btnSelectCertPath').addEventListener('click', function () {\n require(['directorybrowser'], function (directoryBrowser) {\n var picker = new directoryBrowser();\n picker.show({\n includeFiles: true,\n includeDirectories: true,\n callback: function (path) {\n if (path) {\n view.querySelector('#txtCertificatePath').value = path;\n }\n\n picker.close();\n },\n header: globalize.translate('HeaderSelectCertificatePath')\n });\n });\n });\n view.querySelector('.dashboardHostingForm').addEventListener('submit', onSubmit);\n view.addEventListener('viewshow', function (e) {\n loading.show();\n ApiClient.getServerConfiguration().then(function (config) {\n loadPage(view, config);\n });\n });\n };\n});\n","lang_cluster":"Javascript","diff_tag":0,"review_comment":"","id":257} {"diff_hunk":"@@ -61,6 +61,24 @@ function colorContrastEvaluate(node, options, virtualNode) {\n \t\t: contrastRatio.large;\n \tconst isValid = contrast > expected;\n \n+\t\/\/ if element or a parent has pseudo content then we need to mark\n+\t\/\/ as needs review\n+\tlet parentNode = node.parentElement;\n+\twhile (parentNode) {\n+\t\tif (\n+\t\t\thasPsuedoElement(parentNode, ':before') ||\n+\t\t\thasPsuedoElement(parentNode, ':after')\n+\t\t) {\n+\t\t\tthis.data({\n+\t\t\t\tmessageKey: 'pseudoContent'\n+\t\t\t});\n+\t\t\tthis.relatedNodes(parentNode);\n+\t\t\treturn undefined;\n+\t\t}\n+\n+\t\tparentNode = parentNode.parentElement;\n+\t}\n+\n \t\/\/ ratio is outside range\n \tif (\n \t\t(typeof minThreshold === 'number' && contrast < minThreshold) ||","source_code":"import { isVisible } from '..\/..\/commons\/dom';\nimport {\n\tvisibleVirtual,\n\thasUnicode,\n\tsanitize,\n\tremoveUnicode\n} from '..\/..\/commons\/text';\nimport {\n\tgetBackgroundColor,\n\tgetForegroundColor,\n\tincompleteData,\n\tgetContrast\n} from '..\/..\/commons\/color';\n\nfunction colorContrastEvaluate(node, options, virtualNode) {\n\tif (!isVisible(node, false)) {\n\t\treturn true;\n\t}\n\n\tconst {\n\t\tignoreUnicode,\n\t\tignoreLength,\n\t\tboldValue,\n\t\tboldTextPt,\n\t\tlargeTextPt,\n\t\tcontrastRatio\n\t} = options;\n\n\tconst visibleText = visibleVirtual(virtualNode, false, true);\n\tconst textContainsOnlyUnicode =\n\t\thasUnicode(visibleText, {\n\t\t\tnonBmp: true\n\t\t}) &&\n\t\tsanitize(\n\t\t\tremoveUnicode(visibleText, {\n\t\t\t\tnonBmp: true\n\t\t\t})\n\t\t) === '';\n\n\tif (textContainsOnlyUnicode && ignoreUnicode) {\n\t\tthis.data({ messageKey: 'nonBmp' });\n\t\treturn undefined;\n\t}\n\n\tconst bgNodes = [];\n\tconst bgColor = getBackgroundColor(node, bgNodes);\n\tconst fgColor = getForegroundColor(node, false, bgColor);\n\n\tconst nodeStyle = window.getComputedStyle(node);\n\tconst fontSize = parseFloat(nodeStyle.getPropertyValue('font-size'));\n\tconst fontWeight = nodeStyle.getPropertyValue('font-weight');\n\tconst bold = parseFloat(fontWeight) >= boldValue || fontWeight === 'bold';\n\n\tconst contrast = getContrast(bgColor, fgColor);\n\tconst ptSize = Math.ceil(fontSize * 72) \/ 96;\n\tconst isSmallFont =\n\t\t(bold && ptSize < boldTextPt) || (!bold && ptSize < largeTextPt);\n\n\tconst { expected, minThreshold, maxThreshold } = isSmallFont\n\t\t? contrastRatio.normal\n\t\t: contrastRatio.large;\n\tconst isValid = contrast > expected;\n\n\t\/\/ ratio is outside range\n\tif (\n\t\t(typeof minThreshold === 'number' && contrast < minThreshold) ||\n\t\t(typeof maxThreshold === 'number' && contrast > maxThreshold)\n\t) {\n\t\treturn true;\n\t}\n\n\t\/\/ truncate ratio to three digits while rounding down\n\t\/\/ 4.499 = 4.49, 4.019 = 4.01\n\tconst truncatedResult = Math.floor(contrast * 100) \/ 100;\n\n\t\/\/ if fgColor or bgColor are missing, get more information.\n\tlet missing;\n\tif (bgColor === null) {\n\t\tmissing = incompleteData.get('bgColor');\n\t}\n\n\tconst equalRatio = truncatedResult === 1;\n\tconst shortTextContent = visibleText.length === 1;\n\tif (equalRatio) {\n\t\tmissing = incompleteData.set('bgColor', 'equalRatio');\n\t} else if (shortTextContent && !ignoreLength) {\n\t\t\/\/ Check that the text content is a single character long\n\t\tmissing = 'shortTextContent';\n\t}\n\n\t\/\/ need both independently in case both are missing\n\tconst data = {\n\t\tfgColor: fgColor ? fgColor.toHexString() : undefined,\n\t\tbgColor: bgColor ? bgColor.toHexString() : undefined,\n\t\tcontrastRatio: truncatedResult,\n\t\tfontSize: `${((fontSize * 72) \/ 96).toFixed(1)}pt (${fontSize}px)`,\n\t\tfontWeight: bold ? 'bold' : 'normal',\n\t\tmessageKey: missing,\n\t\texpectedContrastRatio: expected + ':1'\n\t};\n\n\tthis.data(data);\n\n\t\/\/ We don't know, so we'll put it into Can't Tell\n\tif (\n\t\tfgColor === null ||\n\t\tbgColor === null ||\n\t\tequalRatio ||\n\t\t(shortTextContent && !ignoreLength && !isValid)\n\t) {\n\t\tmissing = null;\n\t\tincompleteData.clear();\n\t\tthis.relatedNodes(bgNodes);\n\t\treturn undefined;\n\t}\n\n\tif (!isValid) {\n\t\tthis.relatedNodes(bgNodes);\n\t}\n\n\treturn isValid;\n}\n\nexport default colorContrastEvaluate;\n","lang_cluster":"Javascript","diff_tag":0,"review_comment":"","id":258} {"diff_hunk":"@@ -1,5 +1,8 @@\n 'use strict';\n \n+const parsePackageVersion = require('..\/..\/utils').parsePackageVersion;\n+const emitWarningOnce = require('..\/..\/utils').emitWarningOnce;\n+\n const require_optional = require('optional-require')(require);\n \n function debugOptions(debugFields, options) {","source_code":"'use strict';\n\nconst require_optional = require('optional-require')(require);\n\nfunction debugOptions(debugFields, options) {\n const finaloptions = {};\n debugFields.forEach(function(n) {\n finaloptions[n] = options[n];\n });\n\n return finaloptions;\n}\n\nfunction retrieveBSON() {\n const BSON = require('bson');\n BSON.native = false;\n\n const optionalBSON = require_optional('bson-ext');\n if (optionalBSON) {\n optionalBSON.native = true;\n return optionalBSON;\n }\n\n return BSON;\n}\n\n\/\/ Throw an error if an attempt to use Snappy is made when Snappy is not installed\nfunction noSnappyWarning() {\n throw new Error(\n 'Attempted to use Snappy compression, but Snappy is not installed. Install or disable Snappy compression and try again.'\n );\n}\n\n\/\/ Facilitate loading Snappy optionally\nfunction retrieveSnappy() {\n let snappy = require_optional('snappy');\n if (!snappy) {\n snappy = {\n compress: noSnappyWarning,\n uncompress: noSnappyWarning,\n compressSync: noSnappyWarning,\n uncompressSync: noSnappyWarning\n };\n }\n return snappy;\n}\n\nmodule.exports = {\n debugOptions,\n retrieveBSON,\n retrieveSnappy\n};\n","lang_cluster":"Javascript","diff_tag":0,"review_comment":"","id":259} {"diff_hunk":"@@ -33,6 +33,7 @@ import { ESCAPE } from '@wordpress\/keycodes';\n *\/\n import Data from 'googlesitekit-data';\n import { CORE_MODULES } from '..\/..\/..\/googlesitekit\/modules\/datastore\/constants';\n+import { CORE_SITE } from '..\/..\/..\/googlesitekit\/datastore\/site\/constants';\n import { clearWebStorage } from '..\/..\/..\/util';\n import Dialog from '..\/..\/Dialog';\n const { useSelect, useDispatch } = Data;","source_code":"\/**\n * ConfirmDisconnect component for SettingsActiveModule.\n *\n * Site Kit by Google, Copyright 2021 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\n\/**\n * External dependencies\n *\/\nimport PropTypes from 'prop-types';\n\n\/**\n * WordPress dependencies\n *\/\nimport { __, sprintf } from '@wordpress\/i18n';\nimport { useState, useEffect, useCallback } from '@wordpress\/element';\nimport { ESCAPE } from '@wordpress\/keycodes';\n\n\/**\n * Internal dependencies\n *\/\nimport Data from 'googlesitekit-data';\nimport { CORE_MODULES } from '..\/..\/..\/googlesitekit\/modules\/datastore\/constants';\nimport { clearWebStorage } from '..\/..\/..\/util';\nimport Dialog from '..\/..\/Dialog';\nconst { useSelect, useDispatch } = Data;\n\nexport default function ConfirmDisconnect( { slug, handleDialog } ) {\n\tconst [ isDeactivating, setIsDeactivating ] = useState( false );\n\n\tconst dependentModules = useSelect( ( select ) => select( CORE_MODULES ).getModuleDependantNames( slug ) );\n\tconst provides = useSelect( ( select ) => select( CORE_MODULES ).getModuleFeatures( slug ) );\n\tconst module = useSelect( ( select ) => select( CORE_MODULES ).getModule( slug ) );\n\tconst moduleStoreName = useSelect( ( select ) => select( CORE_MODULES ).getModuleStoreName( slug ) );\n\tconst adminReauthURL = useSelect( ( select ) => select( moduleStoreName )?.getAdminReauthURL( false ) );\n\n\tuseEffect( () => {\n\t\tconst onKeyPress = ( e ) => {\n\t\t\tif ( ESCAPE === e.keyCode ) {\n\t\t\t\thandleDialog();\n\t\t\t}\n\t\t};\n\n\t\tglobal.addEventListener( 'keydown', onKeyPress );\n\t\treturn () => {\n\t\t\tglobal.removeEventListener( 'keydown', onKeyPress );\n\t\t};\n\t}, [ handleDialog ] );\n\n\tconst { deactivateModule } = useDispatch( CORE_MODULES );\n\tconst handleDisconnect = useCallback( async () => {\n\t\tif ( module.forceActive ) {\n\t\t\treturn;\n\t\t}\n\n\t\tsetIsDeactivating( true );\n\t\tconst { error } = await deactivateModule( slug );\n\t\tsetIsDeactivating( false );\n\n\t\tif ( ! error ) {\n\t\t\tclearWebStorage();\n\t\t\tglobal.location.assign( adminReauthURL );\n\t\t}\n\t}, [ module?.slug ] );\n\n\tif ( ! module ) {\n\t\treturn null;\n\t}\n\n\tconst { name } = module;\n\n\tconst title = sprintf(\n\t\t\/* translators: %s: module name *\/\n\t\t__( 'Disconnect %s from Site Kit?', 'google-site-kit' ),\n\t\tname,\n\t);\n\n\tconst subtitle = sprintf(\n\t\t\/* translators: %s: module name *\/\n\t\t__( 'By disconnecting the %s module from Site Kit, you will no longer have access to:', 'google-site-kit' ),\n\t\tname,\n\t);\n\n\tlet dependentModulesText = null;\n\tif ( dependentModules.length > 0 ) {\n\t\tdependentModulesText = sprintf(\n\t\t\t\/* translators: %1$s: module name, %2$s: list of dependent modules *\/\n\t\t\t__( 'these active modules depend on %1$s and will also be disconnected: %2$s', 'google-site-kit' ),\n\t\t\tname,\n\t\t\tdependentModules,\n\t\t);\n\t}\n\n\treturn (\n\t\t\n\t);\n}\n\nConfirmDisconnect.propTypes = {\n\tslug: PropTypes.string.isRequired,\n\thandleDialog: PropTypes.func.isRequired,\n};\n","lang_cluster":"Javascript","diff_tag":0,"review_comment":"","id":260} {"diff_hunk":"@@ -132,7 +132,7 @@\n $scope.expandableRow = {};\n \n $scope.expandableRow.shouldRenderExpand = function () {\n- var ret = $scope.colContainer.name === 'body' && $scope.row.isExpanded && (!$scope.grid.isScrollingVertically || $scope.row.expandedRendered);\n+ var ret = $scope.colContainer.name === 'body' && $scope.grid.options.enableExpandable !== false && $scope.row.isExpanded && (!$scope.grid.isScrollingVertically || $scope.row.expandedRendered);\n return ret;\n };\n ","source_code":"(function () {\n 'use strict';\n\n var module = angular.module('ui.grid.expandable', ['ui.grid']);\n\n module.service('uiGridExpandableService', ['gridUtil', '$log', '$compile', function (gridUtil, $log, $compile) {\n var service = {\n initializeGrid: function (grid) {\n var publicApi = {\n events: {\n expandable: {\n rowExpandedStateChanged: function (scope, row) {\n }\n }\n },\n methods: {\n expandable: {\n toggleRowExpansion: function (rowEntity) {\n var row = grid.getRow(rowEntity);\n if (row !== null) {\n service.toggleRowExpansion(grid, row);\n }\n },\n expandAllRows: function() {\n service.expandAllRows(grid);\n },\n collapseAllRows: function() {\n service.collapseAllRows(grid);\n }\n }\n }\n };\n grid.api.registerEventsFromObject(publicApi.events);\n grid.api.registerMethodsFromObject(publicApi.methods);\n },\n toggleRowExpansion: function (grid, row) {\n row.isExpanded = !row.isExpanded;\n\n if (row.isExpanded) {\n row.height = row.grid.options.rowHeight + grid.options.expandable.expandableRowHeight;\n }\n else {\n row.height = row.grid.options.rowHeight;\n }\n\n grid.api.expandable.raise.rowExpandedStateChanged(row);\n },\n expandAllRows: function(grid, $scope) {\n angular.forEach(grid.renderContainers.body.visibleRowCache, function(row) {\n if (!row.isExpanded) {\n service.toggleRowExpansion(grid, row);\n }\n });\n grid.refresh();\n },\n collapseAllRows: function(grid) {\n angular.forEach(grid.renderContainers.body.visibleRowCache, function(row) {\n if (row.isExpanded) {\n service.toggleRowExpansion(grid, row);\n }\n });\n grid.refresh();\n }\n };\n return service;\n }]);\n\n module.directive('uiGridExpandable', ['$log', 'uiGridExpandableService', '$templateCache',\n function ($log, uiGridExpandableService, $templateCache) {\n return {\n replace: true,\n priority: 0,\n require: '^uiGrid',\n scope: false,\n compile: function () {\n return {\n pre: function ($scope, $elm, $attrs, uiGridCtrl) {\n if (uiGridCtrl.grid.options.expandable.enableExpandableRowHeader ) {\n var expandableRowHeaderColDef = {name: 'expandableButtons', width: 40};\n expandableRowHeaderColDef.cellTemplate = $templateCache.get('ui-grid\/expandableRowHeader');\n uiGridCtrl.grid.addRowHeaderColumn(expandableRowHeaderColDef);\n }\n uiGridExpandableService.initializeGrid(uiGridCtrl.grid);\n },\n post: function ($scope, $elm, $attrs, uiGridCtrl) {\n }\n };\n }\n };\n }]);\n\n module.directive('uiGridExpandableRow',\n ['uiGridExpandableService', '$timeout', '$log', '$compile', 'uiGridConstants','gridUtil','$interval',\n function (uiGridExpandableService, $timeout, $log, $compile, uiGridConstants, gridUtil, $interval) {\n\n return {\n replace: false,\n priority: 0,\n scope: false,\n\n compile: function () {\n return {\n pre: function ($scope, $elm, $attrs, uiGridCtrl) {\n gridUtil.getTemplate($scope.grid.options.expandable.rowExpandableTemplate).then(\n function (template) {\n var expandedRowElement = $compile(template)($scope);\n $elm.append(expandedRowElement);\n $scope.row.expandedRendered = true;\n });\n },\n\n post: function ($scope, $elm, $attrs, uiGridCtrl) {\n $scope.$on('$destroy', function() {\n $scope.row.expandedRendered = false;\n });\n }\n };\n }\n };\n }]);\n\n module.directive('uiGridRow',\n ['$compile', '$log', '$templateCache',\n function ($compile, $log, $templateCache) {\n return {\n priority: -200,\n scope: false,\n compile: function ($elm, $attrs) {\n return {\n pre: function ($scope, $elm, $attrs, controllers) {\n\n $scope.expandableRow = {};\n\n $scope.expandableRow.shouldRenderExpand = function () {\n var ret = $scope.colContainer.name === 'body' && $scope.row.isExpanded && (!$scope.grid.isScrollingVertically || $scope.row.expandedRendered);\n return ret;\n };\n\n $scope.expandableRow.shouldRenderFiller = function () {\n var ret = $scope.row.isExpanded && ( $scope.colContainer.name !== 'body' || ($scope.grid.isScrollingVertically && !$scope.row.expandedRendered));\n return ret;\n };\n\n function updateRowContainerWidth() {\n var grid = $scope.grid;\n var colWidth = grid.getColumn('expandableButtons').width;\n return '.grid' + grid.id + ' .ui-grid-pinned-container-' + $scope.colContainer.name + ', .grid' + grid.id +\n ' .ui-grid-pinned-container-' + $scope.colContainer.name + ' .ui-grid-render-container-' + $scope.colContainer.name +\n ' .ui-grid-viewport .ui-grid-canvas .ui-grid-row { width: ' + colWidth + 'px; }';\n }\n\n if ($scope.colContainer.name === 'left') {\n $scope.grid.registerStyleComputation({\n priority: 15,\n func: updateRowContainerWidth\n });\n }\n\n },\n post: function ($scope, $elm, $attrs, controllers) {\n }\n };\n }\n };\n }]);\n\n module.directive('uiGridViewport',\n ['$compile', '$log', '$templateCache',\n function ($compile, $log, $templateCache) {\n return {\n priority: -200,\n scope: false,\n compile: function ($elm, $attrs) {\n var rowRepeatDiv = angular.element($elm.children().children()[0]);\n var expandedRowFillerElement = $templateCache.get('ui-grid\/expandableScrollFiller');\n var expandedRowElement = $templateCache.get('ui-grid\/expandableRow');\n rowRepeatDiv.append(expandedRowElement);\n rowRepeatDiv.append(expandedRowFillerElement);\n return {\n pre: function ($scope, $elm, $attrs, controllers) {\n },\n post: function ($scope, $elm, $attrs, controllers) {\n }\n };\n }\n };\n }]);\n\n})();\n","lang_cluster":"Javascript","diff_tag":0,"review_comment":"","id":261} {"diff_hunk":"@@ -98,7 +98,7 @@ const WPDashboardPopularPages = ( { WidgetReportZero, WidgetReportError } ) => {\n \t\t\t<\/h2>\n \t\t\t\n \t\t\t\t","source_code":"\/**\n * WPDashboardPopularPages component.\n *\n * Site Kit by Google, Copyright 2021 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\n\/**\n * WordPress dependencies\n *\/\nimport { __ } from '@wordpress\/i18n';\n\n\/**\n * Internal dependencies\n *\/\nimport Data from 'googlesitekit-data';\nimport {\n\tMODULES_ANALYTICS,\n\tDATE_RANGE_OFFSET,\n} from '..\/..\/modules\/analytics\/datastore\/constants';\nimport { CORE_USER } from '..\/..\/googlesitekit\/datastore\/user\/constants';\nimport PreviewTable from '..\/..\/components\/PreviewTable';\nimport TableOverflowContainer from '..\/..\/components\/TableOverflowContainer';\nimport { isZeroReport } from '..\/..\/modules\/analytics\/util\/is-zero-report';\nimport ReportTable from '..\/ReportTable';\nimport DetailsPermaLinks from '..\/DetailsPermaLinks';\nimport { numFmt } from '..\/..\/util';\nconst { useSelect } = Data;\n\nconst WPDashboardPopularPages = ( { WidgetReportZero, WidgetReportError } ) => {\n\tconst dateRangeDates = useSelect( ( select ) =>\n\t\tselect( CORE_USER ).getDateRangeDates( {\n\t\t\tcompare: true,\n\t\t\toffsetDays: DATE_RANGE_OFFSET,\n\t\t} )\n\t);\n\n\tconst reportArgs = {\n\t\t...dateRangeDates,\n\t\tmetrics: [\n\t\t\t{\n\t\t\t\texpression: 'ga:pageviews',\n\t\t\t\talias: 'Pageviews',\n\t\t\t},\n\t\t],\n\t\tdimensions: [ 'ga:pageTitle', 'ga:pagePath' ],\n\t\torderby: [\n\t\t\t{\n\t\t\t\tfieldName: 'ga:pageviews',\n\t\t\t\tsortOrder: 'DESCENDING',\n\t\t\t},\n\t\t],\n\t\tlimit: 5,\n\t};\n\n\tconst data = useSelect( ( select ) =>\n\t\tselect( MODULES_ANALYTICS ).getReport( reportArgs )\n\t);\n\tconst error = useSelect( ( select ) =>\n\t\tselect( MODULES_ANALYTICS ).getErrorForSelector( 'getReport', [\n\t\t\treportArgs,\n\t\t] )\n\t);\n\tconst loading = useSelect(\n\t\t( select ) =>\n\t\t\t! select( MODULES_ANALYTICS ).hasFinishedResolution( 'getReport', [\n\t\t\t\treportArgs,\n\t\t\t] )\n\t);\n\n\tif ( loading ) {\n\t\treturn ;\n\t}\n\n\tif ( error ) {\n\t\treturn ;\n\t}\n\n\tif ( isZeroReport( data ) ) {\n\t\treturn ;\n\t}\n\n\treturn (\n\t\t
\n\t\t\t

\n\t\t\t\t{ __( 'Top content over the last 28 days', 'google-site-kit' ) }\n\t\t\t<\/h2>\n\t\t\t\n\t\t\t\t\n\t\t\t<\/TableOverflowContainer>\n\t\t<\/div>\n\t);\n};\n\nconst tableColumns = [\n\t{\n\t\ttitle: __( 'Title', 'google-site-kit' ),\n\t\tdescription: __( 'Page Title', 'google-site-kit' ),\n\t\tprimary: true,\n\t\tComponent: ( { row } ) => {\n\t\t\tconst [ title, path ] = row.dimensions;\n\t\t\treturn ;\n\t\t},\n\t},\n\t{\n\t\ttitle: __( 'Pageviews', 'google-site-kit' ),\n\t\tdescription: __( 'Pageviews', 'google-site-kit' ),\n\t\tfield: 'metrics.0.values.0',\n\t\tComponent: ( { fieldValue } ) => (\n\t\t\t{ numFmt( fieldValue, { style: 'decimal' } ) }<\/span>\n\t\t),\n\t},\n];\n\nexport default WPDashboardPopularPages;\n","lang_cluster":"Javascript","diff_tag":0,"review_comment":"","id":262} {"diff_hunk":"@@ -128,7 +128,7 @@ class ConditionUpdateObserver {\n }\n \n const visibleDataFactory = curry((curriedConditionsBefore, curriedColumn, conditionsStack = []) => {\n- const splitConditionCollection = new ConditionCollection();\n+ const splitConditionCollection = new ConditionCollection(new IndexToValueMap().init(this.getNumberOfColumns()));\n const curriedConditionsBeforeArray = [].concat(curriedConditionsBefore, conditionsStack);\n \n \/\/ Create new condition collection to determine what rows should be visible in \"filter by value\" box","source_code":"import { arrayEach, arrayMap, arrayFilter } from '..\/..\/helpers\/array';\nimport { mixin, objectEach } from '..\/..\/helpers\/object';\nimport { curry } from '..\/..\/helpers\/function';\nimport localHooks from '..\/..\/mixins\/localHooks';\nimport ConditionCollection from '.\/conditionCollection';\nimport DataFilter from '.\/dataFilter';\nimport { createArrayAssertion } from '.\/utils';\n\n\/**\n * Class which is designed for observing changes in condition collection. When condition is changed by user at specified\n * column it's necessary to update all conditions defined after this edited one.\n *\n * Object fires `update` hook for every column conditions change.\n *\n * @class ConditionUpdateObserver\n * @plugin Filters\n *\/\nclass ConditionUpdateObserver {\n constructor(conditionCollection, columnDataFactory = () => []) {\n \/**\n * Reference to the instance of {@link ConditionCollection}.\n *\n * @type {ConditionCollection}\n *\/\n this.conditionCollection = conditionCollection;\n \/**\n * Function which provide source data factory for specified column.\n *\n * @type {Function}\n *\/\n this.columnDataFactory = columnDataFactory;\n \/**\n * Collected changes when grouping is enabled.\n *\n * @type {Array}\n * @default []\n *\/\n this.changes = [];\n \/**\n * Flag which determines if grouping events is enabled.\n *\n * @type {boolean}\n *\/\n this.grouping = false;\n \/**\n * The latest known position of edited conditions at specified column index.\n *\n * @type {number}\n * @default -1\n *\/\n this.latestEditedColumnPosition = -1;\n \/**\n * The latest known order of conditions stack.\n *\n * @type {Array}\n *\/\n this.latestOrderStack = [];\n\n this.conditionCollection.addLocalHook('beforeRemove', column => this._onConditionBeforeModify(column));\n this.conditionCollection.addLocalHook('afterAdd', column => this.updateStatesAtColumn(column));\n this.conditionCollection.addLocalHook('afterClear', column => this.updateStatesAtColumn(column));\n this.conditionCollection.addLocalHook('beforeClean', () => this._onConditionBeforeClean());\n this.conditionCollection.addLocalHook('afterClean', () => this._onConditionAfterClean());\n }\n\n \/**\n * Enable grouping changes. Grouping is helpful in situations when a lot of conditions is added in one moment. Instead of\n * trigger `update` hook for every condition by adding\/removing you can group this changes and call `flush` method to trigger\n * it once.\n *\/\n groupChanges() {\n this.grouping = true;\n }\n\n \/**\n * Flush all collected changes. This trigger `update` hook for every previously collected change from condition collection.\n *\/\n flush() {\n this.grouping = false;\n\n arrayEach(this.changes, (column) => {\n this.updateStatesAtColumn(column);\n });\n this.changes.length = 0;\n }\n\n \/**\n * On before modify condition (add or remove from collection),.\n *\n * @param {number} column Column index.\n * @private\n *\/\n _onConditionBeforeModify(column) {\n this.latestEditedColumnPosition = this.conditionCollection.orderStack.indexOf(column);\n }\n\n \/**\n * Update all related states which should be changed after invoking changes applied to current column.\n *\n * @param {number} column The column index.\n * @param {object} conditionArgsChange Object describing condition changes which can be handled by filters on `update` hook.\n * It contains keys `conditionKey` and `conditionValue` which refers to change specified key of condition to specified value\n * based on referred keys.\n *\/\n updateStatesAtColumn(column, conditionArgsChange) {\n if (this.grouping) {\n if (this.changes.indexOf(column) === -1) {\n this.changes.push(column);\n }\n\n return;\n }\n const allConditions = this.conditionCollection.exportAllConditions();\n let editedColumnPosition = this.conditionCollection.orderStack.indexOf(column);\n\n if (editedColumnPosition === -1) {\n editedColumnPosition = this.latestEditedColumnPosition;\n }\n\n \/\/ Collection of all conditions defined before currently edited `column` (without edited one)\n const conditionsBefore = allConditions.slice(0, editedColumnPosition);\n \/\/ Collection of all conditions defined after currently edited `column` (without edited one)\n const conditionsAfter = allConditions.slice(editedColumnPosition);\n\n \/\/ Make sure that conditionAfter doesn't contain edited column conditions\n if (conditionsAfter.length && conditionsAfter[0].column === column) {\n conditionsAfter.shift();\n }\n\n const visibleDataFactory = curry((curriedConditionsBefore, curriedColumn, conditionsStack = []) => {\n const splitConditionCollection = new ConditionCollection();\n const curriedConditionsBeforeArray = [].concat(curriedConditionsBefore, conditionsStack);\n\n \/\/ Create new condition collection to determine what rows should be visible in \"filter by value\" box\n \/\/ in the next conditions in the chain\n splitConditionCollection.importAllConditions(curriedConditionsBeforeArray);\n\n const allRows = this.columnDataFactory(curriedColumn);\n let visibleRows;\n\n if (splitConditionCollection.isEmpty()) {\n visibleRows = allRows;\n } else {\n visibleRows = (new DataFilter(\n splitConditionCollection,\n columnData => this.columnDataFactory(columnData)\n )).filter();\n }\n visibleRows = arrayMap(visibleRows, rowData => rowData.meta.visualRow);\n\n const visibleRowsAssertion = createArrayAssertion(visibleRows);\n\n return arrayFilter(allRows, rowData => visibleRowsAssertion(rowData.meta.visualRow));\n })(conditionsBefore);\n\n const editedConditions = [].concat(this.conditionCollection.getConditions(column));\n\n this.runLocalHooks('update', {\n editedConditionStack: { column, conditions: editedConditions },\n dependentConditionStacks: conditionsAfter,\n filteredRowsFactory: visibleDataFactory,\n conditionArgsChange\n });\n }\n\n \/**\n * On before conditions clean listener.\n *\n * @private\n *\/\n _onConditionBeforeClean() {\n this.latestOrderStack = [].concat(this.conditionCollection.orderStack);\n }\n\n \/**\n * On after conditions clean listener.\n *\n * @private\n *\/\n _onConditionAfterClean() {\n arrayEach(this.latestOrderStack, (column) => {\n this.updateStatesAtColumn(column);\n });\n }\n\n \/**\n * Destroy instance.\n *\/\n destroy() {\n this.clearLocalHooks();\n\n objectEach(this, (value, property) => {\n this[property] = null;\n });\n }\n}\n\nmixin(ConditionUpdateObserver, localHooks);\n\nexport default ConditionUpdateObserver;\n","lang_cluster":"Javascript","diff_tag":0,"review_comment":"","id":263} {"diff_hunk":"@@ -19,12 +19,13 @@\n \/**\n * External dependencies\n *\/\n+import classnames from 'classnames';\n import PropTypes from 'prop-types';\n \n \/**\n * WordPress dependencies\n *\/\n-import { useCallback } from '@wordpress\/element';\n+import { useCallback, useContext } from '@wordpress\/element';\n import { __, sprintf } from '@wordpress\/i18n';\n \n \/**","source_code":"\/**\n * GA4 Property Select component.\n *\n * Site Kit by Google, Copyright 2021 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\n\/**\n * External dependencies\n *\/\nimport PropTypes from 'prop-types';\n\n\/**\n * WordPress dependencies\n *\/\nimport { useCallback } from '@wordpress\/element';\nimport { __, sprintf } from '@wordpress\/i18n';\n\n\/**\n * Internal dependencies\n *\/\nimport Data from 'googlesitekit-data';\nimport { Select, Option } from '..\/..\/..\/..\/material-components';\nimport ProgressBar from '..\/..\/..\/..\/components\/ProgressBar';\nimport {\n\tMODULES_ANALYTICS_4,\n\tPROPERTY_CREATE,\n} from '..\/..\/datastore\/constants';\nimport { MODULES_ANALYTICS } from '..\/..\/..\/analytics\/datastore\/constants';\nimport { isValidAccountID } from '..\/..\/..\/analytics\/util';\nimport { trackEvent } from '..\/..\/..\/..\/util';\nconst { useSelect, useDispatch } = Data;\n\nexport default function PropertySelect( { label } ) {\n\t\/\/ TODO: Update this select hook to pull accountID from the modules\/analytics-4 datastore when GA4 module becomes separated from the Analytics one\n\tconst accountID = useSelect( ( select ) =>\n\t\tselect( MODULES_ANALYTICS ).getAccountID()\n\t);\n\tconst properties = useSelect(\n\t\t( select ) =>\n\t\t\tselect( MODULES_ANALYTICS_4 ).getProperties( accountID ) || []\n\t);\n\tconst propertyID = useSelect( ( select ) =>\n\t\tselect( MODULES_ANALYTICS_4 ).getPropertyID()\n\t);\n\tconst isLoading = useSelect(\n\t\t( select ) =>\n\t\t\t! select( MODULES_ANALYTICS ).hasFinishedResolution(\n\t\t\t\t'getAccounts'\n\t\t\t) ||\n\t\t\t! select(\n\t\t\t\tMODULES_ANALYTICS_4\n\t\t\t).hasFinishedResolution( 'getProperties', [ accountID ] )\n\t);\n\n\tconst { selectProperty } = useDispatch( MODULES_ANALYTICS_4 );\n\n\tconst onChange = useCallback(\n\t\t( index, item ) => {\n\t\t\tconst newPropertyID = item.dataset.value;\n\t\t\tif ( propertyID !== newPropertyID ) {\n\t\t\t\tselectProperty( newPropertyID );\n\t\t\t\ttrackEvent(\n\t\t\t\t\t'analytics_setup',\n\t\t\t\t\t'property_change',\n\t\t\t\t\tnewPropertyID\n\t\t\t\t);\n\t\t\t}\n\t\t},\n\t\t[ propertyID, selectProperty ]\n\t);\n\n\tif ( ! isValidAccountID( accountID ) ) {\n\t\treturn null;\n\t}\n\n\tif ( isLoading ) {\n\t\treturn ;\n\t}\n\n\treturn (\n\t\t\n\t\t\t{ ( properties || [] )\n\t\t\t\t.concat( {\n\t\t\t\t\t_id: PROPERTY_CREATE,\n\t\t\t\t\tdisplayName: __(\n\t\t\t\t\t\t'Set up a new property',\n\t\t\t\t\t\t'google-site-kit'\n\t\t\t\t\t),\n\t\t\t\t} )\n\t\t\t\t.map( ( { _id, displayName }, index ) => (\n\t\t\t\t\t