content
stringlengths
86
88.9k
title
stringlengths
0
150
question
stringlengths
1
35.8k
answers
sequence
answers_scores
sequence
non_answers
sequence
non_answers_scores
sequence
tags
sequence
name
stringlengths
30
130
Q: PHP routing - How can I implement 404 page on wrong url routes? everyone. I have a basic router created in PHP. I can redirect to any page I want, if there is a callback function the callback function gets executed and if there is a page (String instead of a function) the page loads the correct file. However I can't figure out how to implement 404 page on non-existing route. I tried to reuse the preg_match() function, but that gave me no results and if I place the notFound() (404 page) in the else block, it always gets executed regardless of the correct url or not. if(preg_match($pattern, $path, $matches) && $httpMethod === $route['method']) { }else{ self::notFound(); //THIS GETS EXECUTED ON EVERY ROUTE } This is my Code. <?php class Router{ public static $routes = []; public static function get($route, $callback){ self::$routes[] = [ 'route' => $route, 'callback' => $callback, 'method' => 'GET' ]; } public static function resolve(){ $path = $_SERVER['REQUEST_URI']; $httpMethod = $_SERVER['REQUEST_METHOD']; $methodMatch = false; $routeMatch = false; foreach(self::$routes as $route){ // convert urls like '/users/:uid/posts/:pid' to regular expression $pattern = "@^" . preg_replace('/\\\:[a-zA-Z0-9\_\-]+/', '([a-zA-Z0-9\-\_]+)', preg_quote($route['route'])) . "$@D"; $matches = Array(); // check if the current request matches the expression if(preg_match($pattern, $path, $matches) && $httpMethod === $route['method']) { // remove the first match array_shift($matches); // call the callback with the matched positions as params if(is_callable($route['callback'])){ call_user_func_array($route['callback'], $matches); }else{ self::render($route['callback']); } } } } public static function render($file, $viewsFolder='./views/'){ include($viewsFolder . $file); } public static function notFound(){ http_response_code(400); include('./views/404.php'); exit(); } } Router::get("/", "home.php"); Router::get("/user/:id", function($val1) { $data = array( "Nicole", "Sarah", "Jinx", "Sarai" ); echo $data[$val1] ?? "No data"; }); Router::get("/user/profile/:id", "admin.php"); Router::resolve(); ?> A: To implement a 404 page for non-existing routes, you can add a check at the end of your resolve() method to see if no routes have been matched. If no routes were matched, then you can call the notFound() method to display the 404 page. public static function resolve(){ $path = $_SERVER['REQUEST_URI']; $httpMethod = $_SERVER['REQUEST_METHOD']; $methodMatch = false; $routeMatch = false; // initialize a flag to track if a route has been matched $routeMatched = false; foreach(self::$routes as $route){ // convert urls like '/users/:uid/posts/:pid' to regular expression $pattern = "@^" . preg_replace('/\\\:[a-zA-Z0-9\_\-]+/', '([a-zA-Z0-9\-\_]+)', preg_quote($route['route'])) . "$@D"; $matches = Array(); // check if the current request matches the expression if(preg_match($pattern, $path, $matches) && $httpMethod === $route['method']) { // remove the first match array_shift($matches); // call the callback with the matched positions as params if(is_callable($route['callback'])){ call_user_func_array($route['callback'], $matches); }else{ self::render($route['callback']); } // set the routeMatched flag to true $routeMatched = true; // no need to continue looping through the routes break; } } // if no routes were matched, then call the notFound() method if (!$routeMatched) { self::notFound(); } } In this code, we add a $routeMatched flag that is initially set to false. If a route is matched, we set this flag to true and break out of the loop, since there's no need to continue looping through the routes. Finally, after the loop is finished, we check if the $routeMatched flag is still false. If it is, then we call the notFound() method to display the 404 page.
PHP routing - How can I implement 404 page on wrong url routes?
everyone. I have a basic router created in PHP. I can redirect to any page I want, if there is a callback function the callback function gets executed and if there is a page (String instead of a function) the page loads the correct file. However I can't figure out how to implement 404 page on non-existing route. I tried to reuse the preg_match() function, but that gave me no results and if I place the notFound() (404 page) in the else block, it always gets executed regardless of the correct url or not. if(preg_match($pattern, $path, $matches) && $httpMethod === $route['method']) { }else{ self::notFound(); //THIS GETS EXECUTED ON EVERY ROUTE } This is my Code. <?php class Router{ public static $routes = []; public static function get($route, $callback){ self::$routes[] = [ 'route' => $route, 'callback' => $callback, 'method' => 'GET' ]; } public static function resolve(){ $path = $_SERVER['REQUEST_URI']; $httpMethod = $_SERVER['REQUEST_METHOD']; $methodMatch = false; $routeMatch = false; foreach(self::$routes as $route){ // convert urls like '/users/:uid/posts/:pid' to regular expression $pattern = "@^" . preg_replace('/\\\:[a-zA-Z0-9\_\-]+/', '([a-zA-Z0-9\-\_]+)', preg_quote($route['route'])) . "$@D"; $matches = Array(); // check if the current request matches the expression if(preg_match($pattern, $path, $matches) && $httpMethod === $route['method']) { // remove the first match array_shift($matches); // call the callback with the matched positions as params if(is_callable($route['callback'])){ call_user_func_array($route['callback'], $matches); }else{ self::render($route['callback']); } } } } public static function render($file, $viewsFolder='./views/'){ include($viewsFolder . $file); } public static function notFound(){ http_response_code(400); include('./views/404.php'); exit(); } } Router::get("/", "home.php"); Router::get("/user/:id", function($val1) { $data = array( "Nicole", "Sarah", "Jinx", "Sarai" ); echo $data[$val1] ?? "No data"; }); Router::get("/user/profile/:id", "admin.php"); Router::resolve(); ?>
[ "To implement a 404 page for non-existing routes, you can add a check at the end of your resolve() method to see if no routes have been matched. If no routes were matched, then you can call the notFound() method to display the 404 page.\npublic static function resolve(){\n $path = $_SERVER['REQUEST_URI'];\n $httpMethod = $_SERVER['REQUEST_METHOD'];\n\n $methodMatch = false;\n $routeMatch = false;\n\n // initialize a flag to track if a route has been matched\n $routeMatched = false;\n\n foreach(self::$routes as $route){\n\n // convert urls like '/users/:uid/posts/:pid' to regular expression\n $pattern = \"@^\" . preg_replace('/\\\\\\:[a-zA-Z0-9\\_\\-]+/', '([a-zA-Z0-9\\-\\_]+)', preg_quote($route['route'])) . \"$@D\";\n $matches = Array();\n\n\n // check if the current request matches the expression\n if(preg_match($pattern, $path, $matches) && $httpMethod === $route['method']) {\n // remove the first match\n array_shift($matches);\n // call the callback with the matched positions as params\n\n if(is_callable($route['callback'])){\n call_user_func_array($route['callback'], $matches);\n }else{\n self::render($route['callback']);\n\n }\n\n // set the routeMatched flag to true\n $routeMatched = true;\n\n // no need to continue looping through the routes\n break;\n }\n }\n\n // if no routes were matched, then call the notFound() method\n if (!$routeMatched) {\n self::notFound();\n }\n}\n\nIn this code, we add a $routeMatched flag that is initially set to false. If a route is matched, we set this flag to true and break out of the loop, since there's no need to continue looping through the routes. Finally, after the loop is finished, we check if the $routeMatched flag is still false. If it is, then we call the notFound() method to display the 404 page.\n" ]
[ 0 ]
[]
[]
[ "frameworks", "model_view_controller", "php", "routes", "url_routing" ]
stackoverflow_0074679361_frameworks_model_view_controller_php_routes_url_routing.txt
Q: What are these weird symbols in bash terminal in vs code. I am working with Django. How to fixed it? $ python manage.py migrate ?[36;1mOperations to perform:?[0m ?[1m Apply all migrations: ?[0madmin, auth, contenttypes, my_app, sessions ?[36;1mRunning migrations:?[0m Applying my_app.0001_initial...?[32;1m OK?[0m In a course i am following the guy terminal doesn't show those kind of messed up characters. A: The reason for this is, that Django tries to dye your text, for estatic reasons. See here for more info about the coloring. There a various Option to fix this: 1. Use another terminal It might help to use another terminal tool that interpretes the color orders by Django. 2. Disable colors You can also disable the color feature. Add USE_TERMINAL_COLORS = False to your settings file. or Run export DJANGO_NOCOLOR in your terminal. For more information see this Django isssue. Hope that helps.
What are these weird symbols in bash terminal in vs code. I am working with Django. How to fixed it?
$ python manage.py migrate ?[36;1mOperations to perform:?[0m ?[1m Apply all migrations: ?[0madmin, auth, contenttypes, my_app, sessions ?[36;1mRunning migrations:?[0m Applying my_app.0001_initial...?[32;1m OK?[0m In a course i am following the guy terminal doesn't show those kind of messed up characters.
[ "The reason for this is, that Django tries to dye your text, for estatic reasons.\nSee here for more info about the coloring.\nThere a various Option to fix this:\n1. Use another terminal\nIt might help to use another terminal tool that interpretes the color orders by Django.\n2. Disable colors\nYou can also disable the color feature.\nAdd USE_TERMINAL_COLORS = False to your settings file.\nor\nRun export DJANGO_NOCOLOR in your terminal.\nFor more information see this Django isssue.\nHope that helps.\n" ]
[ 0 ]
[]
[]
[ "django", "django_migrations", "symbols", "terminal" ]
stackoverflow_0074680517_django_django_migrations_symbols_terminal.txt
Q: Create 48 plots by iterating through columns of pandas dataframe - How to increase column index in a loop? I have a Pandas Dataframe with 96 columns x 100 rows which looks like this: x1, y1, x2, y2, x3, y3, ..., x48, y48 0.5, 521, 1.8, 625, 5.5, 856, ..., 2.5, 453 0.6, 556, 1.9, 695, 5.6, 1023, ..., 2.6, 569 I want to plot y1 against x1, then y2 against x2, y3 against x3, and so on. I'm really struggling with increasing the column indices. This code doesn't work: df = pd.read_csv(r'xxx.csv', delimiter=';') for col in df: x=col y=col+1 df.plot(x, y) plt.show() TypeError: can only concatenate str (not "int") to str I get the problem, but don't know how to solve it :( A: Here is an approach (highly inspired by Trenton McKinney). We start by selecting/zipping the x columns and y columns with pandas.DataFrame.filter and pandas.DataFrame.columns, then we use matplotlib.axes.Axes.scatter (feel free to put any other kind of plots). Try this : import pandas as pd import matplotlib.pyplot as plt # df = pd.read_csv(r'xxx.csv', delimiter=';') combos = list(zip(df.filter(like="x").columns, df.filter(like="y").columns)) fig, axes = ( plt.subplots(nrows=int(len(df.filter(like="x").columns)/2), ncols=int(len(df.filter(like="y").columns)/2), figsize=(8, 4)) ) axes = axes.flat for (x, y), ax in zip(combos, axes): ax.scatter(df[x], df[y]) ax.set(title=f'{x} vs. {y}', xlabel=x, ylabel=y) plt.tight_layout() plt.show() # Output : # Used Input: print(df) x1 y1 x2 y2 x3 y3 x48 y48 0 0.5 521 1.8 625 5.5 856 2.5 453 1 0.6 556 1.9 695 5.6 1023 2.6 569
Create 48 plots by iterating through columns of pandas dataframe - How to increase column index in a loop?
I have a Pandas Dataframe with 96 columns x 100 rows which looks like this: x1, y1, x2, y2, x3, y3, ..., x48, y48 0.5, 521, 1.8, 625, 5.5, 856, ..., 2.5, 453 0.6, 556, 1.9, 695, 5.6, 1023, ..., 2.6, 569 I want to plot y1 against x1, then y2 against x2, y3 against x3, and so on. I'm really struggling with increasing the column indices. This code doesn't work: df = pd.read_csv(r'xxx.csv', delimiter=';') for col in df: x=col y=col+1 df.plot(x, y) plt.show() TypeError: can only concatenate str (not "int") to str I get the problem, but don't know how to solve it :(
[ "Here is an approach (highly inspired by Trenton McKinney).\nWe start by selecting/zipping the x columns and y columns with pandas.DataFrame.filter and pandas.DataFrame.columns, then we use matplotlib.axes.Axes.scatter (feel free to put any other kind of plots).\nTry this :\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n# df = pd.read_csv(r'xxx.csv', delimiter=';')\n\ncombos = list(zip(df.filter(like=\"x\").columns, df.filter(like=\"y\").columns))\n\nfig, axes = (\n plt.subplots(nrows=int(len(df.filter(like=\"x\").columns)/2),\n ncols=int(len(df.filter(like=\"y\").columns)/2),\n figsize=(8, 4))\n )\n\naxes = axes.flat\n\nfor (x, y), ax in zip(combos, axes):\n ax.scatter(df[x], df[y])\n ax.set(title=f'{x} vs. {y}', xlabel=x, ylabel=y)\n \nplt.tight_layout()\nplt.show()\n\n# Output :\n\n# Used Input:\nprint(df)\n x1 y1 x2 y2 x3 y3 x48 y48\n0 0.5 521 1.8 625 5.5 856 2.5 453\n1 0.6 556 1.9 695 5.6 1023 2.6 569\n\n" ]
[ 0 ]
[]
[]
[ "dataframe", "indexing", "loops", "pandas", "plot" ]
stackoverflow_0074681072_dataframe_indexing_loops_pandas_plot.txt
Q: Web Audio API - Stereo to Mono I need to convert an input stereo (channelCount: 2) stream to mono. I've tried several things but the destination.stream always has 2 channels. const context = new AudioContext() const splitter = context.createChannelSplitter(1) const merger = context.createChannelMerger(1) const source = context.createMediaStreamSource(stream) const dest = context.createMediaStreamDestination() source.connect(splitter) splitter.connect(merger) merger.connect(context.destination) merger.connect(dest) console.log(dest.stream.getAudioTracks()[0].getSettings()) // channelCount: 2 I've also tried this: const context = new AudioContext() const merger = context.createChannelMerger(1) const source = context.createMediaStreamSource(stream) const dest = context.createMediaStreamDestination() source.connect(merger) merger.connect(context.destination) merger.connect(dest) console.log(dest.stream.getAudioTracks()[0].getSettings()) // channelCount: 2 there has to be an easy way to achieve this... thanks! A: To convert the stereo audio stream to mono, you can use the createChannelSplitter and createChannelMerger methods of the AudioContext object, as you have done in your code. However, instead of connecting the splitter directly to the merger, you will need to use the splitter to split the stereo audio into two separate mono channels, and then use the merger to combine these two mono channels into a single mono channel. Here is an example of how you can do this: const context = new AudioContext(); const splitter = context.createChannelSplitter(2); const merger = context.createChannelMerger(1); const source = context.createMediaStreamSource(stream); const dest = context.createMediaStreamDestination(); source.connect(splitter); // Connect the first channel of the splitter to the first input of the merger. splitter.connect(merger, 0, 0); // Connect the second channel of the splitter to the second input of the merger. splitter.connect(merger, 1, 0); merger.connect(context.destination); merger.connect(dest); console.log(dest.stream.getAudioTracks()[0].getSettings()); After running this code, the dest.stream will contain a mono audio stream.
Web Audio API - Stereo to Mono
I need to convert an input stereo (channelCount: 2) stream to mono. I've tried several things but the destination.stream always has 2 channels. const context = new AudioContext() const splitter = context.createChannelSplitter(1) const merger = context.createChannelMerger(1) const source = context.createMediaStreamSource(stream) const dest = context.createMediaStreamDestination() source.connect(splitter) splitter.connect(merger) merger.connect(context.destination) merger.connect(dest) console.log(dest.stream.getAudioTracks()[0].getSettings()) // channelCount: 2 I've also tried this: const context = new AudioContext() const merger = context.createChannelMerger(1) const source = context.createMediaStreamSource(stream) const dest = context.createMediaStreamDestination() source.connect(merger) merger.connect(context.destination) merger.connect(dest) console.log(dest.stream.getAudioTracks()[0].getSettings()) // channelCount: 2 there has to be an easy way to achieve this... thanks!
[ "To convert the stereo audio stream to mono, you can use the createChannelSplitter and createChannelMerger methods of the AudioContext object, as you have done in your code. However, instead of connecting the splitter directly to the merger, you will need to use the splitter to split the stereo audio into two separate mono channels, and then use the merger to combine these two mono channels into a single mono channel.\nHere is an example of how you can do this:\nconst context = new AudioContext();\n\nconst splitter = context.createChannelSplitter(2);\nconst merger = context.createChannelMerger(1);\nconst source = context.createMediaStreamSource(stream);\nconst dest = context.createMediaStreamDestination();\n\nsource.connect(splitter);\n\n// Connect the first channel of the splitter to the first input of the merger.\nsplitter.connect(merger, 0, 0);\n\n// Connect the second channel of the splitter to the second input of the merger.\nsplitter.connect(merger, 1, 0);\n\nmerger.connect(context.destination);\nmerger.connect(dest);\n\nconsole.log(dest.stream.getAudioTracks()[0].getSettings());\n\nAfter running this code, the dest.stream will contain a mono audio stream.\n" ]
[ 0 ]
[]
[]
[ "audio", "google_chrome_extension", "javascript", "webrtc" ]
stackoverflow_0074681121_audio_google_chrome_extension_javascript_webrtc.txt
Q: Trying update and save a value for product meta data on woocommerce cart I've been trying to figure out how can I make the value on the product meta data shown on the cart to be editable and saved when clicking the update quote button. When I try to update the value on the BOP field for each product, it does not reflect properly I've made some work in progress but can never seem to make it work cart.php <?php foreach ( WC()->cart->get_cart() as $cart_item_key => $cart_item ) { $_product = apply_filters( 'woocommerce_cart_item_product', $cart_item['data'], $cart_item, $cart_item_key ); $product_id = apply_filters( 'woocommerce_cart_item_product_id', $cart_item['product_id'], $cart_item, $cart_item_key ); if ( $_product && $_product->exists() && $cart_item['quantity'] > 0 && apply_filters( 'woocommerce_cart_item_visible', true, $cart_item, $cart_item_key ) ) { $product_permalink = apply_filters( 'woocommerce_cart_item_permalink', $_product->is_visible() ? $_product->get_permalink( $cart_item ) : '', $cart_item, $cart_item_key ); ?> <tr class="woocommerce-cart-form__cart-item <?php echo esc_attr( apply_filters( 'woocommerce_cart_item_class', 'cart_item', $cart_item, $cart_item_key ) ); ?>"> <td class="product-remove"> <?php echo apply_filters( // phpcs:ignore WordPress.Security.EscapeOutput.OutputNotEscaped 'woocommerce_cart_item_remove_link', sprintf( '<a href="%s" class="remove" aria-label="%s" data-product_id="%s" data-product_sku="%s">&times;</a>', esc_url( wc_get_cart_remove_url( $cart_item_key ) ), esc_html__( 'Remove this item', 'woocommerce' ), esc_attr( $product_id ), esc_attr( $_product->get_sku() ) ), $cart_item_key ); ?> </td> <td class="product-thumbnail"> <?php $thumbnail = apply_filters( 'woocommerce_cart_item_thumbnail', $_product->get_image(), $cart_item, $cart_item_key ); if ( ! $product_permalink ) { echo $thumbnail; // PHPCS: XSS ok. } else { printf( '<a href="%s">%s</a>', esc_url( $product_permalink ), $thumbnail ); // PHPCS: XSS ok. } ?> </td> <td class="product-name" data-title="<?php esc_attr_e( 'Product', 'woocommerce' ); ?>"> <?php if ( ! $product_permalink ) { echo wp_kses_post( apply_filters( 'woocommerce_cart_item_name', $_product->get_name(), $cart_item, $cart_item_key ) . '&nbsp;' ); } else { echo wp_kses_post( apply_filters( 'woocommerce_cart_item_name', sprintf( '<a href="%s">%s</a>', esc_url( $product_permalink ), $_product->get_name() ), $cart_item, $cart_item_key ) ); } do_action( 'woocommerce_after_cart_item_name', $cart_item, $cart_item_key ); ?> <?php // Meta data. // echo wc_get_formatted_cart_item_data( $cart_item ); // PHPCS: XSS ok. ?> <div class="box-type-field"> <span>BOP: <input class="box-type" cart_item_key="<?php echo $cart_item['key'] ?>" type="number" value="<?php echo $cart_item['BOP'] ?>" /></span> </div> <?php /* foreach ( WC()->cart->get_cart() as $cart_item_key => $cart_item ) { ?> <!-- place it anywhere within the foreach --> <div class="box-type-field"> <select class="box-type" name="box-type" cart_item_key="<?php echo $cart_item_key ?>"> <option <?php echo $cart_item['BOP'] ?> value="boxes">1</option> <option <?php echo $cart_item['BOP'] ?> value="boxes">2</option> </select> </div> <?php } */ ?> <?php // Backorder notification. if ( $_product->backorders_require_notification() && $_product->is_on_backorder( $cart_item['quantity'] ) ) { echo wp_kses_post( apply_filters( 'woocommerce_cart_item_backorder_notification', '<p class="backorder_notification">' . esc_html__( 'Available on backorder', 'woocommerce' ) . '</p>', $product_id ) ); } ?> </td> <td class="product-quantity" data-title="<?php esc_attr_e( 'Quantity', 'woocommerce' ); ?>"> <?php if ( $_product->is_sold_individually() ) { $product_quantity = sprintf( '1 <input type="hidden" name="cart[%s][qty]" value="1" />', $cart_item_key ); } else { $product_quantity = woocommerce_quantity_input( array( 'input_name' => "cart[{$cart_item_key}][qty]", 'input_value' => $cart_item['quantity'], 'max_value' => $_product->get_max_purchase_quantity(), 'min_value' => '0', 'product_name' => $_product->get_name(), ), $_product, false ); } echo apply_filters( 'woocommerce_cart_item_quantity', $product_quantity, $cart_item_key, $cart_item ); // PHPCS: XSS ok. ?> </td> </tr> <?php } } ?> JS jQuery('.box-type-field .box-type').on('change', function () { var cartItemKey = jQuery(this).attr("cart_item_key"); var boxType = jQuery(this).val(); jQuery.ajax({ type : "post", url : '/wp-admin/admin-ajax.php', datatype: 'json', data : { action : "update_cart_boxtype", cart_item_key : cartItemKey, box_type : boxType, }, success: function(cartItem) { cartItemKey = cartItem[0]; cartItemQty = cartItem[1]; if (cartItem) jQuery('input[name="cart['+cartItemKey+'][qty]"]').val(cartItemQty); // update quantity jQuery('.woocommerce-cart-form button[type="submit"]').click(); // submit form } }) }) PHP (functions.php) function update_cart_boxtype_init() { if ( ! WC()->cart->is_empty() ) { $cart_item_key = (isset($_POST['cart_item_key']))?$_POST['cart_item_key'] : ''; $cart_item = WC()->cart->cart_contents[ $cart_item_key ]; $box_type = (isset($_POST['box_type']))?$_POST['box_type'] : ''; $cart_updated = false; $cart_item_key_new = WC()->cart->generate_cart_id( $cart_item['product_id'], $cart_item['variation_id'], $cart_item['variation'], ['box-type'=>$box_type] ); $found = WC()->cart->find_product_in_cart( $cart_item_key_new ); if ($found != '') { $new_qty = $cart_item['quantity'] + WC()->cart->cart_contents[ $cart_item_key_new ]['quantity']; WC()->cart->remove_cart_item($cart_item_key); wp_send_json_success([$cart_item_key_new, $new_qty]); } else { WC()->cart->add_to_cart($cart_item['product_id'], $cart_item['quantity'], $cart_item['variation_id'], $cart_item['variation'], ['box-type' => $box_type]); $cart_updated = true; WC()->cart->remove_cart_item($cart_item_key); wp_send_json_success(false); } } wp_die(); } add_action( 'wp_ajax_update_cart_boxtype', 'update_cart_boxtype_init' ); add_action( 'wp_ajax_nopriv_update_cart_boxtype', 'update_cart_boxtype_init' ); A: // To make the value on the product meta data editable and savable in the cart, you can add a form input field for the BOP value in the cart.php file. For example: <input type="text" name="bop_value" value="<?php echo $bop_value; ?>" /> // Then, you can update the value of the BOP field when the update cart button is clicked by using the woocommerce_update_cart_action_cart_updated action hook. In the hooked function, you can retrieve the new value of the BOP field from the $_POST variable and update the cart item data accordingly. For example: add_action( 'woocommerce_update_cart_action_cart_updated', 'update_bop_value' ); function update_bop_value( $cart_updated ) { if ( ! $cart_updated ) { return; } // Loop through cart items and update BOP value foreach ( WC()->cart->get_cart() as $cart_item_key => $cart_item ) { $bop_value = isset( $_POST['bop_value'] ) ? sanitize_text_field( $_POST['bop_value'] ) : ''; WC()->cart->cart_contents[$cart_item_key]['bop_value'] = $bop_value; } } // Finally, you can display the updated BOP value in the cart by modifying the code in the cart.php file to retrieve and display the updated value from the cart item data. For example: <td class="product-name" data-title="<?php esc_attr_e( 'Product', 'woocommerce' ); ?>"> <?php if ( ! $product_permalink ) { echo wp_kses_post( apply_filters( 'woocommerce_cart_item_name', $_product->get_name(), $cart_item, $cart_item_key ) . '&nbsp;' ); } else { echo wp_kses_post( apply_filters( 'woocommerce_cart_item_name', sprintf( '<a href="%s">%s</a>', esc_url( $product_permalink ), $_product->get_name() ), $cart_item, $cart_item_key ) ); } do_action( 'woocommerce_after_cart_item_name', $cart_item, $cart_item_key ); ?> <?php // Meta data. // echo wc_get_formatted_cart_item_data( $cart_item ); // PHPCS: XSS ok. ?> <div class="bop-field"> BOP Value: <?php echo $cart_item['bop_value']; ?> </div> </td>
Trying update and save a value for product meta data on woocommerce cart
I've been trying to figure out how can I make the value on the product meta data shown on the cart to be editable and saved when clicking the update quote button. When I try to update the value on the BOP field for each product, it does not reflect properly I've made some work in progress but can never seem to make it work cart.php <?php foreach ( WC()->cart->get_cart() as $cart_item_key => $cart_item ) { $_product = apply_filters( 'woocommerce_cart_item_product', $cart_item['data'], $cart_item, $cart_item_key ); $product_id = apply_filters( 'woocommerce_cart_item_product_id', $cart_item['product_id'], $cart_item, $cart_item_key ); if ( $_product && $_product->exists() && $cart_item['quantity'] > 0 && apply_filters( 'woocommerce_cart_item_visible', true, $cart_item, $cart_item_key ) ) { $product_permalink = apply_filters( 'woocommerce_cart_item_permalink', $_product->is_visible() ? $_product->get_permalink( $cart_item ) : '', $cart_item, $cart_item_key ); ?> <tr class="woocommerce-cart-form__cart-item <?php echo esc_attr( apply_filters( 'woocommerce_cart_item_class', 'cart_item', $cart_item, $cart_item_key ) ); ?>"> <td class="product-remove"> <?php echo apply_filters( // phpcs:ignore WordPress.Security.EscapeOutput.OutputNotEscaped 'woocommerce_cart_item_remove_link', sprintf( '<a href="%s" class="remove" aria-label="%s" data-product_id="%s" data-product_sku="%s">&times;</a>', esc_url( wc_get_cart_remove_url( $cart_item_key ) ), esc_html__( 'Remove this item', 'woocommerce' ), esc_attr( $product_id ), esc_attr( $_product->get_sku() ) ), $cart_item_key ); ?> </td> <td class="product-thumbnail"> <?php $thumbnail = apply_filters( 'woocommerce_cart_item_thumbnail', $_product->get_image(), $cart_item, $cart_item_key ); if ( ! $product_permalink ) { echo $thumbnail; // PHPCS: XSS ok. } else { printf( '<a href="%s">%s</a>', esc_url( $product_permalink ), $thumbnail ); // PHPCS: XSS ok. } ?> </td> <td class="product-name" data-title="<?php esc_attr_e( 'Product', 'woocommerce' ); ?>"> <?php if ( ! $product_permalink ) { echo wp_kses_post( apply_filters( 'woocommerce_cart_item_name', $_product->get_name(), $cart_item, $cart_item_key ) . '&nbsp;' ); } else { echo wp_kses_post( apply_filters( 'woocommerce_cart_item_name', sprintf( '<a href="%s">%s</a>', esc_url( $product_permalink ), $_product->get_name() ), $cart_item, $cart_item_key ) ); } do_action( 'woocommerce_after_cart_item_name', $cart_item, $cart_item_key ); ?> <?php // Meta data. // echo wc_get_formatted_cart_item_data( $cart_item ); // PHPCS: XSS ok. ?> <div class="box-type-field"> <span>BOP: <input class="box-type" cart_item_key="<?php echo $cart_item['key'] ?>" type="number" value="<?php echo $cart_item['BOP'] ?>" /></span> </div> <?php /* foreach ( WC()->cart->get_cart() as $cart_item_key => $cart_item ) { ?> <!-- place it anywhere within the foreach --> <div class="box-type-field"> <select class="box-type" name="box-type" cart_item_key="<?php echo $cart_item_key ?>"> <option <?php echo $cart_item['BOP'] ?> value="boxes">1</option> <option <?php echo $cart_item['BOP'] ?> value="boxes">2</option> </select> </div> <?php } */ ?> <?php // Backorder notification. if ( $_product->backorders_require_notification() && $_product->is_on_backorder( $cart_item['quantity'] ) ) { echo wp_kses_post( apply_filters( 'woocommerce_cart_item_backorder_notification', '<p class="backorder_notification">' . esc_html__( 'Available on backorder', 'woocommerce' ) . '</p>', $product_id ) ); } ?> </td> <td class="product-quantity" data-title="<?php esc_attr_e( 'Quantity', 'woocommerce' ); ?>"> <?php if ( $_product->is_sold_individually() ) { $product_quantity = sprintf( '1 <input type="hidden" name="cart[%s][qty]" value="1" />', $cart_item_key ); } else { $product_quantity = woocommerce_quantity_input( array( 'input_name' => "cart[{$cart_item_key}][qty]", 'input_value' => $cart_item['quantity'], 'max_value' => $_product->get_max_purchase_quantity(), 'min_value' => '0', 'product_name' => $_product->get_name(), ), $_product, false ); } echo apply_filters( 'woocommerce_cart_item_quantity', $product_quantity, $cart_item_key, $cart_item ); // PHPCS: XSS ok. ?> </td> </tr> <?php } } ?> JS jQuery('.box-type-field .box-type').on('change', function () { var cartItemKey = jQuery(this).attr("cart_item_key"); var boxType = jQuery(this).val(); jQuery.ajax({ type : "post", url : '/wp-admin/admin-ajax.php', datatype: 'json', data : { action : "update_cart_boxtype", cart_item_key : cartItemKey, box_type : boxType, }, success: function(cartItem) { cartItemKey = cartItem[0]; cartItemQty = cartItem[1]; if (cartItem) jQuery('input[name="cart['+cartItemKey+'][qty]"]').val(cartItemQty); // update quantity jQuery('.woocommerce-cart-form button[type="submit"]').click(); // submit form } }) }) PHP (functions.php) function update_cart_boxtype_init() { if ( ! WC()->cart->is_empty() ) { $cart_item_key = (isset($_POST['cart_item_key']))?$_POST['cart_item_key'] : ''; $cart_item = WC()->cart->cart_contents[ $cart_item_key ]; $box_type = (isset($_POST['box_type']))?$_POST['box_type'] : ''; $cart_updated = false; $cart_item_key_new = WC()->cart->generate_cart_id( $cart_item['product_id'], $cart_item['variation_id'], $cart_item['variation'], ['box-type'=>$box_type] ); $found = WC()->cart->find_product_in_cart( $cart_item_key_new ); if ($found != '') { $new_qty = $cart_item['quantity'] + WC()->cart->cart_contents[ $cart_item_key_new ]['quantity']; WC()->cart->remove_cart_item($cart_item_key); wp_send_json_success([$cart_item_key_new, $new_qty]); } else { WC()->cart->add_to_cart($cart_item['product_id'], $cart_item['quantity'], $cart_item['variation_id'], $cart_item['variation'], ['box-type' => $box_type]); $cart_updated = true; WC()->cart->remove_cart_item($cart_item_key); wp_send_json_success(false); } } wp_die(); } add_action( 'wp_ajax_update_cart_boxtype', 'update_cart_boxtype_init' ); add_action( 'wp_ajax_nopriv_update_cart_boxtype', 'update_cart_boxtype_init' );
[ "// To make the value on the product meta data editable and savable in the cart, you can add a form input field for the BOP value in the cart.php file. For example:\n\n<input type=\"text\" name=\"bop_value\" value=\"<?php echo $bop_value; ?>\" />\n\n// Then, you can update the value of the BOP field when the update cart button is clicked by using the woocommerce_update_cart_action_cart_updated action hook. In the hooked function, you can retrieve the new value of the BOP field from the $_POST variable and update the cart item data accordingly. For example:\n\nadd_action( 'woocommerce_update_cart_action_cart_updated', 'update_bop_value' );\nfunction update_bop_value( $cart_updated ) {\nif ( ! $cart_updated ) {\nreturn;\n}\n\n// Loop through cart items and update BOP value\nforeach ( WC()->cart->get_cart() as $cart_item_key => $cart_item ) {\n $bop_value = isset( $_POST['bop_value'] ) ? sanitize_text_field( $_POST['bop_value'] ) : '';\n WC()->cart->cart_contents[$cart_item_key]['bop_value'] = $bop_value;\n}\n}\n\n// Finally, you can display the updated BOP value in the cart by modifying the code in the cart.php file to retrieve and display the updated value from the cart item data. For example:\n\n<td class=\"product-name\" data-title=\"<?php esc_attr_e( 'Product', 'woocommerce' ); ?>\">\n <?php\n if ( ! $product_permalink ) {\n echo wp_kses_post( apply_filters( 'woocommerce_cart_item_name', $_product->get_name(), $cart_item, $cart_item_key ) . '&nbsp;' );\n } else {\n echo wp_kses_post( apply_filters( 'woocommerce_cart_item_name', sprintf( '<a href=\"%s\">%s</a>', esc_url( $product_permalink ), $_product->get_name() ), $cart_item, $cart_item_key ) );\n }\n do_action( 'woocommerce_after_cart_item_name', $cart_item, $cart_item_key );\n ?>\n <?php \n // Meta data.\n // echo wc_get_formatted_cart_item_data( $cart_item ); // PHPCS: XSS ok. ?>\n <div class=\"bop-field\">\n BOP Value: <?php echo $cart_item['bop_value']; ?>\n </div>\n</td>\n\n" ]
[ 1 ]
[]
[]
[ "ajax", "php", "woocommerce", "wordpress" ]
stackoverflow_0074652853_ajax_php_woocommerce_wordpress.txt
Q: How to loop from a dataframe to another one to count occurence of certain words? enter image description here I have two dataframes, df1 contains a column with all possible combinations and df2 contains a column with the actual combinations. I want to make a second column within df1 that loops through df2 and counts the values. So if df1 has a row with 'A,C' and df2 rows with 'A,B,C' and with 'A,C,D' I want the code to add a 2 in the new column. Ofcourse if a loop isnt necessary here, something else is also ok.. I added a excel example, now I want to do it in python with more than 20000 rows.. #################### A: To loop through the rows of two dataframes and count the values in one dataframe based on the values in the other dataframe, you can use a for loop and the pandas DataFrame.isin() method. Here is an example of how you can do this: import pandas as pd # Define the dataframes df1 = pd.DataFrame({'col1': ['A,B', 'A,C', 'C,D', 'E,F']}) df2 = pd.DataFrame({'col1': ['A,B,C', 'A,C,D', 'A,C,E', 'C,D,E', 'E,F,G']}) # Initialize an empty list to store the counts counts = [] # Loop through the rows of df1 and count the number of rows in df2 # that contain the same value for i, row in df1.iterrows(): count = df2.col1.isin([row['col1']]).sum() counts.append(count) # Add the counts to df1 as a new column df1['counts'] = counts # Print the resulting dataframe print(df1) This code first defines the df1 and df2 dataframes, and then initializes an empty list called counts to store the counts. It then uses a for loop to iterate over the rows of df1 and count the number of rows in df2 that contain the same value. The counts are added to the counts list, and the list is then added as a new column to df1. Finally, the code prints the resulting dataframe. When you run this code, it will print the following output: col1 counts 0 A,B 1 1 A,C 2 2 C,D 2 3 E,F 1 This is the expected result, with a count of 2 for the rows with values 'A,C' and 'C,D' in df1, because these values are present in two rows of df2.
How to loop from a dataframe to another one to count occurence of certain words?
enter image description here I have two dataframes, df1 contains a column with all possible combinations and df2 contains a column with the actual combinations. I want to make a second column within df1 that loops through df2 and counts the values. So if df1 has a row with 'A,C' and df2 rows with 'A,B,C' and with 'A,C,D' I want the code to add a 2 in the new column. Ofcourse if a loop isnt necessary here, something else is also ok.. I added a excel example, now I want to do it in python with more than 20000 rows.. ####################
[ "To loop through the rows of two dataframes and count the values in one dataframe based on the values in the other dataframe, you can use a for loop and the pandas DataFrame.isin() method.\nHere is an example of how you can do this:\nimport pandas as pd\n\n# Define the dataframes\ndf1 = pd.DataFrame({'col1': ['A,B', 'A,C', 'C,D', 'E,F']})\ndf2 = pd.DataFrame({'col1': ['A,B,C', 'A,C,D', 'A,C,E', 'C,D,E', 'E,F,G']})\n\n# Initialize an empty list to store the counts\ncounts = []\n\n# Loop through the rows of df1 and count the number of rows in df2\n# that contain the same value\nfor i, row in df1.iterrows():\n count = df2.col1.isin([row['col1']]).sum()\n counts.append(count)\n\n# Add the counts to df1 as a new column\ndf1['counts'] = counts\n\n# Print the resulting dataframe\nprint(df1)\n\nThis code first defines the df1 and df2 dataframes, and then initializes an empty list called counts to store the counts. It then uses a for loop to iterate over the rows of df1 and count the number of rows in df2 that contain the same value. The counts are added to the counts list, and the list is then added as a new column to df1. Finally, the code prints the resulting dataframe.\nWhen you run this code, it will print the following output:\n col1 counts\n0 A,B 1\n1 A,C 2\n2 C,D 2\n3 E,F 1\n\nThis is the expected result, with a count of 2 for the rows with values 'A,C' and 'C,D' in df1, because these values are present in two rows of df2.\n" ]
[ 0 ]
[]
[]
[ "combinations", "count", "dataframe", "find_occurrences", "python" ]
stackoverflow_0074681083_combinations_count_dataframe_find_occurrences_python.txt
Q: How can I change the HTML of elements in a loop, using JavaScript? I'm trying to add paragraph numbers to a text using JavaScript. Here's what I have so far. // Get all the paragraphs. I only want the ones in <main> const paras = document.querySelectorAll('main p') // Iterate through all the paragraphs, keeping track of their indices for (let i = 0; i < paras.length; i++) { var thisPara = paras[i] // Change the paragraph HTML to include the paragraph number thisPara.innerHtml += '<span class="paragraphNumber">${i}</span>' } <main> <p>Lorem ipsum dolor sit amet, consectetur adipisicing elit. Assumenda ullam incidunt officia, nesciunt porro illo, voluptates vero reprehenderit quia aliquid amet. Eos, ducimus magnam sed at culpa quas recusandae maxime?</p> <p>Lorem ipsum dolor sit amet, consectetur adipisicing elit. Ipsam, eaque.</p> <p>Lorem ipsum dolor sit amet, consectetur adipisicing elit. Quasi accusantium atque veniam rerum sequi voluptate magni, ipsa, iste, perferendis beatae labore deleniti consectetur reiciendis aliquid unde consequuntur molestiae tempora quod maxime ratione eius velit aspernatur. Repudiandae dolorum, non obcaecati harum!</p> <p>Lorem ipsum dolor sit amet, consectetur adipisicing elit. Earum maiores placeat magni! Enim officia, tempora quas optio.</p> </main> I've been trying this in the browser console to see if I can get it to work, before adding it to the HTML. The HTML doesn't seem to change, so something's not working. What am I doing wrong? A: There is a typo in your code. You are using innerHtml to set the inner HTML of the paragraph element, but the correct property name is innerHTML. Here is the corrected code: // Get all the paragraphs. I only want the ones in <main> const paras = document.querySelectorAll('main p') // Iterate through all the paragraphs, keeping track of their indices for (let i = 0; i < paras.length; i++) { var thisPara = paras[i] // Change the paragraph HTML to include the paragraph number thisPara.innerHTML += `<span class="paragraphNumber">${i}</span>` } Shorted, this could look like this: document.querySelectorAll('main p').forEach((p, i) => p.innerHTML += `<span class="paragraphNumber">${i}</span>`);
How can I change the HTML of elements in a loop, using JavaScript?
I'm trying to add paragraph numbers to a text using JavaScript. Here's what I have so far. // Get all the paragraphs. I only want the ones in <main> const paras = document.querySelectorAll('main p') // Iterate through all the paragraphs, keeping track of their indices for (let i = 0; i < paras.length; i++) { var thisPara = paras[i] // Change the paragraph HTML to include the paragraph number thisPara.innerHtml += '<span class="paragraphNumber">${i}</span>' } <main> <p>Lorem ipsum dolor sit amet, consectetur adipisicing elit. Assumenda ullam incidunt officia, nesciunt porro illo, voluptates vero reprehenderit quia aliquid amet. Eos, ducimus magnam sed at culpa quas recusandae maxime?</p> <p>Lorem ipsum dolor sit amet, consectetur adipisicing elit. Ipsam, eaque.</p> <p>Lorem ipsum dolor sit amet, consectetur adipisicing elit. Quasi accusantium atque veniam rerum sequi voluptate magni, ipsa, iste, perferendis beatae labore deleniti consectetur reiciendis aliquid unde consequuntur molestiae tempora quod maxime ratione eius velit aspernatur. Repudiandae dolorum, non obcaecati harum!</p> <p>Lorem ipsum dolor sit amet, consectetur adipisicing elit. Earum maiores placeat magni! Enim officia, tempora quas optio.</p> </main> I've been trying this in the browser console to see if I can get it to work, before adding it to the HTML. The HTML doesn't seem to change, so something's not working. What am I doing wrong?
[ "There is a typo in your code. You are using innerHtml to set the inner HTML of the paragraph element, but the correct property name is innerHTML.\nHere is the corrected code:\n// Get all the paragraphs. I only want the ones in <main>\nconst paras = document.querySelectorAll('main p')\n\n// Iterate through all the paragraphs, keeping track of their indices\nfor (let i = 0; i < paras.length; i++) {\n var thisPara = paras[i]\n // Change the paragraph HTML to include the paragraph number\n thisPara.innerHTML += `<span class=\"paragraphNumber\">${i}</span>`\n}\n\nShorted, this could look like this:\ndocument.querySelectorAll('main p').forEach((p, i) => p.innerHTML += `<span class=\"paragraphNumber\">${i}</span>`);\n\n" ]
[ 0 ]
[]
[]
[ "javascript" ]
stackoverflow_0074681159_javascript.txt
Q: gitlab-runner executor failing on Microk8s Raspberry pi cluster - `ContainersNotInitialized: "containers with incomplete status: [init-permissions]"` gitlab-runner executor failing on Microk8s Raspberry pi cluster - ContainersNotInitialized: "containers with incomplete status: [init-permissions]" Any help or just suggestions as to how to troubleshoot this further would be appreciated! I am trying to us the gitlab-runner executor on a mincrok8s raspberry pi cluster. I am getting the following errors: Running with gitlab-runner 14.5.2 (e91107dd) on gitlab-runner-gitlab-runner-5779968774-dppmf kxEK3YoP Preparing the "kubernetes" executor 00:00 Using Kubernetes namespace: gitlab Using Kubernetes executor with image arm64v7/ubuntu:20.04 ... Using attach strategy to execute scripts... Preparing environment Waiting for pod gitlab/runner-kxek3yop-project-5-concurrent-0cp2v8 to be running, status is Pending ContainersNotInitialized: "containers with incomplete status: [init-permissions]" ContainersNotReady: "containers with unready status: [build helper]" ContainersNotReady: "containers with unready status: [build helper]" Waiting for pod gitlab/runner-kxek3yop-project-5-concurrent-0cp2v8 to be running, status is Pending ContainersNotInitialized: "containers with incomplete status: [init-permissions]" ContainersNotReady: "containers with unready status: [build helper]" ContainersNotReady: "containers with unready status: [build helper]" Waiting for pod gitlab/runner-kxek3yop-project-5-concurrent-0cp2v8 to be running, status is Pending ContainersNotInitialized: "containers with incomplete status: [init-permissions]" ContainersNotReady: "containers with unready status: [build helper]" ContainersNotReady: "containers with unready status: [build helper]" ERROR: Job failed (system failure): prepare environment: waiting for pod running: pod status is failed. Check https://docs.gitlab.com/runner/shells/index.html#shell-profile-loading for more information I installed the gitlab-runner executor with helm. Here is the values.yaml I used: gitlabUrl: http://<my-url-is-here-you-not-need-it>/ runnerRegistrationToken: "xxxxxxxxxxxxxxxx" concurrent: 20 checkInterval: 30 # For RBAC support: rbac: create: false runners: image: ubuntu:18.04 helpers: image: gitlab/gitlab-runner-helper:arm64-latest privileged: false builds: # cpuLimit: 200m # memoryLimit: 256Mi cpuRequests: 100m memoryRequests: 128Mi services: # cpuLimit: 200m # memoryLimit: 256Mi cpuRequests: 100m memoryRequests: 128Mi helpers: # cpuLimit: 200m # memoryLimit: 256Mi cpuRequests: 100m memoryRequests: 128Mi A: I had the same issue today and it was beacause of the default helper image which is basically wrong in the chart. Something went really wrong with gitlab's helper image registry because all of the arm images look to be built for amd64! Look here for example: https://hub.docker.com/r/gitlab/gitlab-runner-helper/tags?page=1&name=arm In any case here's what fixed my problem, in my values.yaml this is how I specified the runners section: runners: tags: "ascalia-k8s" secret: gitlab-runner-secret namespace: gitlab config: | [[runners]] name = "Kubernetes Prod Runner" executor = "kubernetes" [runners.kubernetes] image = "ubuntu:20.04" helper_image = "gitlab/gitlab-runner-helper:arm64-1278d3da"
gitlab-runner executor failing on Microk8s Raspberry pi cluster - `ContainersNotInitialized: "containers with incomplete status: [init-permissions]"`
gitlab-runner executor failing on Microk8s Raspberry pi cluster - ContainersNotInitialized: "containers with incomplete status: [init-permissions]" Any help or just suggestions as to how to troubleshoot this further would be appreciated! I am trying to us the gitlab-runner executor on a mincrok8s raspberry pi cluster. I am getting the following errors: Running with gitlab-runner 14.5.2 (e91107dd) on gitlab-runner-gitlab-runner-5779968774-dppmf kxEK3YoP Preparing the "kubernetes" executor 00:00 Using Kubernetes namespace: gitlab Using Kubernetes executor with image arm64v7/ubuntu:20.04 ... Using attach strategy to execute scripts... Preparing environment Waiting for pod gitlab/runner-kxek3yop-project-5-concurrent-0cp2v8 to be running, status is Pending ContainersNotInitialized: "containers with incomplete status: [init-permissions]" ContainersNotReady: "containers with unready status: [build helper]" ContainersNotReady: "containers with unready status: [build helper]" Waiting for pod gitlab/runner-kxek3yop-project-5-concurrent-0cp2v8 to be running, status is Pending ContainersNotInitialized: "containers with incomplete status: [init-permissions]" ContainersNotReady: "containers with unready status: [build helper]" ContainersNotReady: "containers with unready status: [build helper]" Waiting for pod gitlab/runner-kxek3yop-project-5-concurrent-0cp2v8 to be running, status is Pending ContainersNotInitialized: "containers with incomplete status: [init-permissions]" ContainersNotReady: "containers with unready status: [build helper]" ContainersNotReady: "containers with unready status: [build helper]" ERROR: Job failed (system failure): prepare environment: waiting for pod running: pod status is failed. Check https://docs.gitlab.com/runner/shells/index.html#shell-profile-loading for more information I installed the gitlab-runner executor with helm. Here is the values.yaml I used: gitlabUrl: http://<my-url-is-here-you-not-need-it>/ runnerRegistrationToken: "xxxxxxxxxxxxxxxx" concurrent: 20 checkInterval: 30 # For RBAC support: rbac: create: false runners: image: ubuntu:18.04 helpers: image: gitlab/gitlab-runner-helper:arm64-latest privileged: false builds: # cpuLimit: 200m # memoryLimit: 256Mi cpuRequests: 100m memoryRequests: 128Mi services: # cpuLimit: 200m # memoryLimit: 256Mi cpuRequests: 100m memoryRequests: 128Mi helpers: # cpuLimit: 200m # memoryLimit: 256Mi cpuRequests: 100m memoryRequests: 128Mi
[ "I had the same issue today and it was beacause of the default helper image which is basically wrong in the chart. Something went really wrong with gitlab's helper image registry because all of the arm images look to be built for amd64! Look here for example: https://hub.docker.com/r/gitlab/gitlab-runner-helper/tags?page=1&name=arm\nIn any case here's what fixed my problem, in my values.yaml this is how I specified the runners section:\nrunners:\n tags: \"ascalia-k8s\"\n secret: gitlab-runner-secret\n namespace: gitlab\n config: |\n [[runners]]\n name = \"Kubernetes Prod Runner\"\n executor = \"kubernetes\"\n [runners.kubernetes]\n image = \"ubuntu:20.04\"\n helper_image = \"gitlab/gitlab-runner-helper:arm64-1278d3da\"\n\n" ]
[ 0 ]
[]
[]
[ "gitlab_ci", "gitlab_ci_runner", "microk8s", "raspberry_pi4" ]
stackoverflow_0070350914_gitlab_ci_gitlab_ci_runner_microk8s_raspberry_pi4.txt
Q: CPanel Node.js application stopped working after successfully pulling data from Firebase I deployed a Node.js application on CPanel at this domain https://ad900.brighton.domains/api It stopped working after updating my app.js file to pull data from a Firebase database. Yesterday it was actually up and running and it was able to retrieve data from the database but when I checked today, the page is not even loading. I am new to both Firebase and deploying a node app on CPanel but I suspect it might be an incorrect use of the get function provided by firebase. Here is my code: const express = require('express') const bodyParser = require('body-parser'); const { initializeApp } = require('firebase/app'); const { getDatabase, ref, get } = require('firebase/database'); const firebaseConfig = { databaseURL : "https://ido-webscraper-default-rtdb.europe-west1.firebasedatabase.app/" } // Initialize database const firebase = initializeApp(firebaseConfig) const database = getDatabase(firebase); const dbRef = ref(database); var data = {}; // Retrieve data get(dbRef).then((snapshot) => { if (snapshot.exists()) { data = snapshot.val(); console.log(data); } else { console.log("No data available"); } }) .catch((error) => { console.error(error) }) // Initialize express app const app = express() app.use(bodyParser.urlencoded({extended: false})); app.get('/api', async (req, res) => { res.json(data) }) A: If you are using shared hosting, it might be that the processes are terminated and you need to run a cron job to keep it running. Please see this video for full set up via shared hosting with cPanel https://youtu.be/sIcy3q3Ib_s
CPanel Node.js application stopped working after successfully pulling data from Firebase
I deployed a Node.js application on CPanel at this domain https://ad900.brighton.domains/api It stopped working after updating my app.js file to pull data from a Firebase database. Yesterday it was actually up and running and it was able to retrieve data from the database but when I checked today, the page is not even loading. I am new to both Firebase and deploying a node app on CPanel but I suspect it might be an incorrect use of the get function provided by firebase. Here is my code: const express = require('express') const bodyParser = require('body-parser'); const { initializeApp } = require('firebase/app'); const { getDatabase, ref, get } = require('firebase/database'); const firebaseConfig = { databaseURL : "https://ido-webscraper-default-rtdb.europe-west1.firebasedatabase.app/" } // Initialize database const firebase = initializeApp(firebaseConfig) const database = getDatabase(firebase); const dbRef = ref(database); var data = {}; // Retrieve data get(dbRef).then((snapshot) => { if (snapshot.exists()) { data = snapshot.val(); console.log(data); } else { console.log("No data available"); } }) .catch((error) => { console.error(error) }) // Initialize express app const app = express() app.use(bodyParser.urlencoded({extended: false})); app.get('/api', async (req, res) => { res.json(data) })
[ "If you are using shared hosting, it might be that the processes are terminated and you need to run a cron job to keep it running.\nPlease see this video for full set up via shared hosting with cPanel https://youtu.be/sIcy3q3Ib_s\n" ]
[ 0 ]
[]
[]
[ "cpanel", "express", "firebase", "firebase_realtime_database", "node.js" ]
stackoverflow_0074405957_cpanel_express_firebase_firebase_realtime_database_node.js.txt
Q: Is there a way to take a combo of mixed numbers and letters and just put the numbers into a new variable For example, If I input 9a8,4 I want 984 output from it. I am not sure how I would do this or if I can. Thank you. I have not been able to try anything because i do not know where to start. A: Sure you can, take a look at arrays and try manipulate them. You can start creating an array with a string type and scan it using loops. When you find a number you take it and put into another array. Another solution could be to scan to find letters and remove those from the array. There are different ways, you just have to figure out wich one fits better in your problem. A: Yes, it is possible to extract the numbers from a string that contains both numbers and letters in C as well. Here is one way you can do it: Create a new empty string to hold the extracted numbers. Iterate through each character in the original string. If the character is a number, append it to the new string. After all the characters have been processed, the new string will contain only the numbers from the original string. Here is some example code that shows how this can be done in C: #include <stdio.h> #include <string.h> int main(void) { // create an empty string to hold the extracted numbers char numbers[100] = ""; // get the input string char input_str[] = "9a8,4"; // iterate through each character in the input string for (int i = 0; i < strlen(input_str); i++) { // if the character is a number, append it to the numbers string if (isdigit(input_str[i])) { strncat(numbers, &input_str[i], 1); } } // print the extracted numbers printf("%s\n", numbers); // this will print "984" return 0; } This code will work for extracting numbers from a string that contains both numbers and letters. I hope this helps! Let me know if you have any other questions.
Is there a way to take a combo of mixed numbers and letters and just put the numbers into a new variable
For example, If I input 9a8,4 I want 984 output from it. I am not sure how I would do this or if I can. Thank you. I have not been able to try anything because i do not know where to start.
[ "Sure you can, take a look at arrays and try manipulate them.\nYou can start creating an array with a string type and scan it using loops. When you find a number you take it and put into another array. Another solution could be to scan to find letters and remove those from the array.\nThere are different ways, you just have to figure out wich one fits better in your problem.\n", "Yes, it is possible to extract the numbers from a string that contains both numbers and letters in C as well. Here is one way you can do it:\nCreate a new empty string to hold the extracted numbers.\nIterate through each character in the original string.\nIf the character is a number, append it to the new string.\nAfter all the characters have been processed, the new string will contain only the numbers from the original string.\nHere is some example code that shows how this can be done in C:\n#include <stdio.h>\n#include <string.h>\n\nint main(void) {\n // create an empty string to hold the extracted numbers\n char numbers[100] = \"\";\n\n // get the input string\n char input_str[] = \"9a8,4\";\n\n // iterate through each character in the input string\n for (int i = 0; i < strlen(input_str); i++) {\n // if the character is a number, append it to the numbers string\n if (isdigit(input_str[i])) {\n strncat(numbers, &input_str[i], 1);\n }\n }\n\n // print the extracted numbers\n printf(\"%s\\n\", numbers); // this will print \"984\"\n\n return 0;\n}\n\nThis code will work for extracting numbers from a string that contains both numbers and letters. I hope this helps! Let me know if you have any other questions.\n" ]
[ 1, 0 ]
[]
[]
[ "c" ]
stackoverflow_0074681133_c.txt
Q: Angular emit one value when element is in view I created a directive that should emit an event once its element is visible in the viewport. @Directive({ selector: '[scrollListener]', }) export class ScrollListenerDirective { @Output() scrollListener: Observable<number>; constructor(private el: ElementRef) { this.scrollListener = fromEvent(document, 'scroll').pipe( map(() => this.el.nativeElement.getBoundingClientRect()), map(({ top }) => Math.abs(top)), filter((top) => top <= 100) ); } } The problem is that this event will emit whenever the top is less than 100px. That results in way too many events. How do I change this event so it only emit once the element is in view, stop emitting when it isn't, and then once the next time when it is visible. A: You could use the scan operator to cache the preceding value of top. By comparing the preceding value with the current value of top you can determine when the transition from > 100 to <= 100 happens. At the time of this transition, the observable emits a value. @Directive({ selector: '[scrollListener]', }) export class ScrollListenerDirective { @Output() scrollListener: Observable<number>; constructor(private el: ElementRef) { this.scrollListener = fromEvent(document, 'scroll').pipe( map(() => this.el.nativeElement.getBoundingClientRect()), map(({ top }) => Math.abs(top)), scan((acc: number[], curr: number) => [acc[1], curr], [0, 0]), filter(valuesOfTop => valuesOfTop[0] > 100 && valuesOfTop[1] <= 100), map(valuesOfTop => valuesOfTop[1]), ); } }
Angular emit one value when element is in view
I created a directive that should emit an event once its element is visible in the viewport. @Directive({ selector: '[scrollListener]', }) export class ScrollListenerDirective { @Output() scrollListener: Observable<number>; constructor(private el: ElementRef) { this.scrollListener = fromEvent(document, 'scroll').pipe( map(() => this.el.nativeElement.getBoundingClientRect()), map(({ top }) => Math.abs(top)), filter((top) => top <= 100) ); } } The problem is that this event will emit whenever the top is less than 100px. That results in way too many events. How do I change this event so it only emit once the element is in view, stop emitting when it isn't, and then once the next time when it is visible.
[ "You could use the scan operator to cache the preceding value of top. By comparing the preceding value with the current value of top you can determine when the transition from > 100 to <= 100 happens. At the time of this transition, the observable emits a value.\n@Directive({\n selector: '[scrollListener]',\n })\n export class ScrollListenerDirective {\n @Output() scrollListener: Observable<number>;\n \n constructor(private el: ElementRef) {\n this.scrollListener = fromEvent(document, 'scroll').pipe(\n map(() => this.el.nativeElement.getBoundingClientRect()),\n map(({ top }) => Math.abs(top)),\n scan((acc: number[], curr: number) => [acc[1], curr], [0, 0]),\n filter(valuesOfTop => valuesOfTop[0] > 100 && valuesOfTop[1] <= 100),\n map(valuesOfTop => valuesOfTop[1]),\n );\n }\n}\n\n" ]
[ 0 ]
[]
[]
[ "angular", "eventemitter", "rxjs" ]
stackoverflow_0074679636_angular_eventemitter_rxjs.txt
Q: How properly store values in belonging rows/cells of dataframe while scraping with Beautiful Soup? So, I am trying to scrape data from a journal. While I can successfully scrape titles of papers, keywords, and so on, and save them in dataframe properly when it comes to collecting authors' names, each mentioned author of the paper after the first one is stored in a new row. The same problem applies to affiliations. It’s making stored data useless and unrelated, obviously. Thus, instead of having the same number of rows, I get stuck with a useless dataframe. It is my understanding that the problem arises because the program doesn’t β€œknow” to store all the data associated with each paper in separate rows. Additionally, some papers only have one author, while others have 3-4. For example, authors need to be stored in a "NameSurname, NameSurname, NameSurname..." format within separate rows containing information about each research paper: authors, affiliations, etc. But when it comes to specifying classes that I intend to scrape, I am uncertain how to set up the Python (BS4) code properly. Here's a snippet of the relevant code from the simple scraper: title = [] authors = [] afiliations = [] for i in urls: page = requests.get(link) content = page.text soup = BeautifulSoup(content, "html.parser") for t in soup.select(".obj_article_details .page_title"): title.append(t.get_text(strip=True)) for au in soup.select(".obj_article_details .authors .name"): authors.append(au.get_text(strip=True)) for af in soup.select(".obj_article_details .item.authors .affiliation"): affiliations.append(af.get_text(strip=True)) time.sleep(3) Also, here is a structure of section which i am intending to scrape ... <article class="obj_article_details"> <h1 class="page_title"> Lorem ipsum dolor sit amet </h1> <div class="row"> <div class="main_entry"> <section class="item authors"> <ul class="authors"> <li> <span class="name">Brandon Scott </span> <span class="affiliation"> Villanova University, Pennsylvania </span> </li> <li> <span class="name">Alvaro Cote </span> <span class="affiliation">Carleton College, Minnesota</span> </li> </ul> </section> ... What I am getting now: |Authors | Affiliation | +--------------+------------------------------------+ |Brandon Scott | Villanova University, Pennsylvania | +--------------+------------------------------------+ |Alvaro Cote | Carleton College, Minnesota | +--------------+------------------------------------+ |... | ... | What i want: |Authors | Affiliation | +--------------+------------------------------------+ |Brandon Scott, Alvaro Cote | Villanova University..| +--------------+------------------------------------+ |... |... | +--------------+------------------------------------+ |... |... | A: For cases like this, you should use nested loops - an outer loop for the containers ResultSet (soup.select('article.obj_article_details') here), and the inner loop/s for the details you want - title/author/affiliation/etc. And it's also better to build a dictionary of the details for each container and add it to a list of dictionaries than to try to bind together separate lists (you've already faced some of the issues that are caused by that approach). Since you're doing the same thing for each detail (select followed by get_text), it would be more convenient to move those operations to a function like def getText_bySelector(tagSoup, selector, sep=None): selTags = tagSoup.select(selector) if selector else [tagSoup] if type(sep) == str: return sep.join([s.get_text(' ').strip() for s in selTags]) return selTags[0].get_text(' ').strip() if selTags else None (This is a variation of this function, which I use in most of my bs4 projects.) If you pass a string (like , /; /etc) as sep, it will join all the results with it (or return an empty string [""] if there are no results); otherwise, it will return the first result (or None if there are no results). Another reason I like using functions like this is that it allows me to use list comprehension instead the innermost for loop. Then, you just need to define a reference dictionary with the arguments you'll need to pass to getText_bySelector refDict = { 'title': ('.page_title', None), 'authors': ('.authors .name', ', '), 'affiliations': ('.item.authors .affiliation', '; ') } Now you can built a list of dictionaries with dictList = [] for i in urls: page = requests.get(link) content = page.text soup = BeautifulSoup(content, "html.parser") dictList += [{ k: getText_bySelector(a, vsel, vsep) for k, (vsel, vsep) in refDict.items() } for a in soup.select('article.obj_article_details')] The items in dictList will look like { 'title': 'Lorem ipsum dolor sit amet', 'authors': 'Brandon Scott, Alvaro Cote', 'affiliations': 'Villanova University, Pennsylvania; Carleton College, Minnesota' } and you can easily use pandas to view dictList as a table
How properly store values in belonging rows/cells of dataframe while scraping with Beautiful Soup?
So, I am trying to scrape data from a journal. While I can successfully scrape titles of papers, keywords, and so on, and save them in dataframe properly when it comes to collecting authors' names, each mentioned author of the paper after the first one is stored in a new row. The same problem applies to affiliations. It’s making stored data useless and unrelated, obviously. Thus, instead of having the same number of rows, I get stuck with a useless dataframe. It is my understanding that the problem arises because the program doesn’t β€œknow” to store all the data associated with each paper in separate rows. Additionally, some papers only have one author, while others have 3-4. For example, authors need to be stored in a "NameSurname, NameSurname, NameSurname..." format within separate rows containing information about each research paper: authors, affiliations, etc. But when it comes to specifying classes that I intend to scrape, I am uncertain how to set up the Python (BS4) code properly. Here's a snippet of the relevant code from the simple scraper: title = [] authors = [] afiliations = [] for i in urls: page = requests.get(link) content = page.text soup = BeautifulSoup(content, "html.parser") for t in soup.select(".obj_article_details .page_title"): title.append(t.get_text(strip=True)) for au in soup.select(".obj_article_details .authors .name"): authors.append(au.get_text(strip=True)) for af in soup.select(".obj_article_details .item.authors .affiliation"): affiliations.append(af.get_text(strip=True)) time.sleep(3) Also, here is a structure of section which i am intending to scrape ... <article class="obj_article_details"> <h1 class="page_title"> Lorem ipsum dolor sit amet </h1> <div class="row"> <div class="main_entry"> <section class="item authors"> <ul class="authors"> <li> <span class="name">Brandon Scott </span> <span class="affiliation"> Villanova University, Pennsylvania </span> </li> <li> <span class="name">Alvaro Cote </span> <span class="affiliation">Carleton College, Minnesota</span> </li> </ul> </section> ... What I am getting now: |Authors | Affiliation | +--------------+------------------------------------+ |Brandon Scott | Villanova University, Pennsylvania | +--------------+------------------------------------+ |Alvaro Cote | Carleton College, Minnesota | +--------------+------------------------------------+ |... | ... | What i want: |Authors | Affiliation | +--------------+------------------------------------+ |Brandon Scott, Alvaro Cote | Villanova University..| +--------------+------------------------------------+ |... |... | +--------------+------------------------------------+ |... |... |
[ "For cases like this, you should use nested loops - an outer loop for the containers ResultSet (soup.select('article.obj_article_details') here), and the inner loop/s for the details you want - title/author/affiliation/etc. And it's also better to build a dictionary of the details for each container and add it to a list of dictionaries than to try to bind together separate lists (you've already faced some of the issues that are caused by that approach).\n\nSince you're doing the same thing for each detail (select followed by get_text), it would be more convenient to move those operations to a function like\ndef getText_bySelector(tagSoup, selector, sep=None):\n selTags = tagSoup.select(selector) if selector else [tagSoup]\n if type(sep) == str: \n return sep.join([s.get_text(' ').strip() for s in selTags])\n return selTags[0].get_text(' ').strip() if selTags else None\n\n(This is a variation of this function, which I use in most of my bs4 projects.)\nIf you pass a string (like , /; /etc) as sep, it will join all the results with it (or return an empty string [\"\"] if there are no results); otherwise, it will return the first result (or None if there are no results).\nAnother reason I like using functions like this is that it allows me to use list comprehension instead the innermost for loop.\n\nThen, you just need to define a reference dictionary with the arguments you'll need to pass to getText_bySelector\nrefDict = {\n 'title': ('.page_title', None), \n 'authors': ('.authors .name', ', '),\n 'affiliations': ('.item.authors .affiliation', '; ')\n} \n\n\nNow you can built a list of dictionaries with\ndictList = []\nfor i in urls: \n page = requests.get(link)\n content = page.text\n soup = BeautifulSoup(content, \"html.parser\")\n\n dictList += [{\n k: getText_bySelector(a, vsel, vsep) \n for k, (vsel, vsep) in refDict.items()\n } for a in soup.select('article.obj_article_details')]\n\n\nThe items in dictList will look like\n{\n 'title': 'Lorem ipsum dolor sit amet',\n 'authors': 'Brandon Scott, Alvaro Cote',\n 'affiliations': 'Villanova University, Pennsylvania; Carleton College, Minnesota' \n}\n\nand you can easily use pandas to view dictList as a table\n\n" ]
[ 0 ]
[]
[]
[ "beautifulsoup", "dataframe", "pandas", "python_3.x", "web_scraping" ]
stackoverflow_0074680330_beautifulsoup_dataframe_pandas_python_3.x_web_scraping.txt
Q: Multiple qq plots in one figure I have a matrix mEps which is of shape (10, 1042), where 10 is the number of assets, and 1042 is the amount of datapoints. I want to show the Q-Q plot for each asset, so I can plot: for i in range(iN): sm.qqplot((mEps[i,:]), fit = True, line='q') However, then I get 10 pictures of Q-Q plots. I would like to have them in one figure, so I have the following code: fig, axes = plt.subplots(nrows=4, ncols=3, figsize=(15,10)) ax= axes.flatten() for i in range(iN): sm.qqplot((mEps[i,:]), fit = True, line='q') This code creates the figure, but it doesn't fill it with Q-Q plots.. Does anyone know how to do this? A: QQplot documentation https://www.statsmodels.org/dev/generated/statsmodels.graphics.gofplots.qqplot.html states that function takes as argument "ax" the ax in subplots, where you want to place your qqplot fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(10,4)) qqplot(data_a['metrics'], line='s', ax=ax1) qqplot(data_b['metrics'], line='s', ax=ax2) ax1.set_title('Data A') ax2.set_title('Data B') plt.show()
Multiple qq plots in one figure
I have a matrix mEps which is of shape (10, 1042), where 10 is the number of assets, and 1042 is the amount of datapoints. I want to show the Q-Q plot for each asset, so I can plot: for i in range(iN): sm.qqplot((mEps[i,:]), fit = True, line='q') However, then I get 10 pictures of Q-Q plots. I would like to have them in one figure, so I have the following code: fig, axes = plt.subplots(nrows=4, ncols=3, figsize=(15,10)) ax= axes.flatten() for i in range(iN): sm.qqplot((mEps[i,:]), fit = True, line='q') This code creates the figure, but it doesn't fill it with Q-Q plots.. Does anyone know how to do this?
[ "QQplot documentation https://www.statsmodels.org/dev/generated/statsmodels.graphics.gofplots.qqplot.html\nstates that function takes as argument \"ax\" the ax in subplots, where you want to place your qqplot\nfig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(10,4))\n\nqqplot(data_a['metrics'], line='s', ax=ax1)\nqqplot(data_b['metrics'], line='s', ax=ax2)\nax1.set_title('Data A')\nax2.set_title('Data B')\n\nplt.show()\n\n" ]
[ 0 ]
[]
[]
[ "plot", "python", "qq", "quantile" ]
stackoverflow_0052813683_plot_python_qq_quantile.txt
Q: Multiplayer with Photon, but I am controlling the other character instead I am using Photon PUN 2 for multiplayer in Unity. I can create a room or join a room. The problem appears when I join with a second player. When I control one of the players, I control the other player instead. I am using Standard Assets FPS controller. Do anyone know what is causing the problem? Any help would be appreciated. I will share the edited code: using System; using UnityEngine; using UnityStandardAssets.CrossPlatformInput; using UnityStandardAssets.Utility; using Random = UnityEngine.Random; using Photon.Pun; #pragma warning disable 618, 649 namespace UnityStandardAssets.Characters.FirstPerson { [RequireComponent(typeof (CharacterController))] [RequireComponent(typeof (AudioSource))] public class FirstPersonController : MonoBehaviour { [SerializeField] private bool m_IsWalking; [SerializeField] private float m_WalkSpeed; [SerializeField] private float m_RunSpeed; [SerializeField] [Range(0f, 1f)] private float m_RunstepLenghten; [SerializeField] private float m_JumpSpeed; [SerializeField] private float m_StickToGroundForce; [SerializeField] private float m_GravityMultiplier; [SerializeField] private MouseLook m_MouseLook; [SerializeField] private bool m_UseFovKick; [SerializeField] private FOVKick m_FovKick = new FOVKick(); [SerializeField] private bool m_UseHeadBob; [SerializeField] private CurveControlledBob m_HeadBob = new CurveControlledBob(); [SerializeField] private LerpControlledBob m_JumpBob = new LerpControlledBob(); [SerializeField] private float m_StepInterval; [SerializeField] private AudioClip[] m_FootstepSounds; // an array of footstep sounds that will be randomly selected from. [SerializeField] private AudioClip m_JumpSound; // the sound played when character leaves the ground. [SerializeField] private AudioClip m_LandSound; // the sound played when character touches back on ground. PhotonView view; private Camera m_Camera; private bool m_Jump; private float m_YRotation; private Vector2 m_Input; private Vector3 m_MoveDir = Vector3.zero; private CharacterController m_CharacterController; private CollisionFlags m_CollisionFlags; private bool m_PreviouslyGrounded; private Vector3 m_OriginalCameraPosition; private float m_StepCycle; private float m_NextStep; private bool m_Jumping; private AudioSource m_AudioSource; // Use this for initialization private void Start() { m_CharacterController = GetComponent<CharacterController>(); m_Camera = Camera.main; m_OriginalCameraPosition = m_Camera.transform.localPosition; m_FovKick.Setup(m_Camera); m_HeadBob.Setup(m_Camera, m_StepInterval); m_StepCycle = 0f; m_NextStep = m_StepCycle/2f; m_Jumping = false; m_AudioSource = GetComponent<AudioSource>(); m_MouseLook.Init(transform , m_Camera.transform); view = GetComponent<PhotonView>(); } // Update is called once per frame private void Update() { RotateView(); // the jump state needs to read here to make sure it is not missed if (!m_Jump) { m_Jump = CrossPlatformInputManager.GetButtonDown("Jump"); } if (!m_PreviouslyGrounded && m_CharacterController.isGrounded) { StartCoroutine(m_JumpBob.DoBobCycle()); PlayLandingSound(); m_MoveDir.y = 0f; m_Jumping = false; } if (!m_CharacterController.isGrounded && !m_Jumping && m_PreviouslyGrounded) { m_MoveDir.y = 0f; } m_PreviouslyGrounded = m_CharacterController.isGrounded; } private void PlayLandingSound() { m_AudioSource.clip = m_LandSound; m_AudioSource.Play(); m_NextStep = m_StepCycle + .5f; } private void FixedUpdate() { if (view.IsMine) { float speed; GetInput(out speed); // always move along the camera forward as it is the direction that it being aimed at Vector3 desiredMove = transform.forward * m_Input.y + transform.right * m_Input.x; // get a normal for the surface that is being touched to move along it RaycastHit hitInfo; Physics.SphereCast(transform.position, m_CharacterController.radius, Vector3.down, out hitInfo, m_CharacterController.height / 2f, Physics.AllLayers, QueryTriggerInteraction.Ignore); desiredMove = Vector3.ProjectOnPlane(desiredMove, hitInfo.normal).normalized; m_MoveDir.x = desiredMove.x * speed; m_MoveDir.z = desiredMove.z * speed; if (m_CharacterController.isGrounded) { m_MoveDir.y = -m_StickToGroundForce; if (m_Jump) { m_MoveDir.y = m_JumpSpeed; PlayJumpSound(); m_Jump = false; m_Jumping = true; } } else { m_MoveDir += Physics.gravity * m_GravityMultiplier * Time.fixedDeltaTime; } m_CollisionFlags = m_CharacterController.Move(m_MoveDir * Time.fixedDeltaTime); ProgressStepCycle(speed); UpdateCameraPosition(speed); m_MouseLook.UpdateCursorLock(); } } private void PlayJumpSound() { m_AudioSource.clip = m_JumpSound; m_AudioSource.Play(); } private void ProgressStepCycle(float speed) { if (m_CharacterController.velocity.sqrMagnitude > 0 && (m_Input.x != 0 || m_Input.y != 0)) { m_StepCycle += (m_CharacterController.velocity.magnitude + (speed*(m_IsWalking ? 1f : m_RunstepLenghten)))* Time.fixedDeltaTime; } if (!(m_StepCycle > m_NextStep)) { return; } m_NextStep = m_StepCycle + m_StepInterval; PlayFootStepAudio(); } private void PlayFootStepAudio() { if (!m_CharacterController.isGrounded) { return; } // pick & play a random footstep sound from the array, // excluding sound at index 0 int n = Random.Range(1, m_FootstepSounds.Length); m_AudioSource.clip = m_FootstepSounds[n]; m_AudioSource.PlayOneShot(m_AudioSource.clip); // move picked sound to index 0 so it's not picked next time m_FootstepSounds[n] = m_FootstepSounds[0]; m_FootstepSounds[0] = m_AudioSource.clip; } private void UpdateCameraPosition(float speed) { if (view.IsMine) { Vector3 newCameraPosition; if (!m_UseHeadBob) { return; } if (m_CharacterController.velocity.magnitude > 0 && m_CharacterController.isGrounded) { m_Camera.transform.localPosition = m_HeadBob.DoHeadBob(m_CharacterController.velocity.magnitude + (speed * (m_IsWalking ? 1f : m_RunstepLenghten))); newCameraPosition = m_Camera.transform.localPosition; newCameraPosition.y = m_Camera.transform.localPosition.y - m_JumpBob.Offset(); } else { newCameraPosition = m_Camera.transform.localPosition; newCameraPosition.y = m_OriginalCameraPosition.y - m_JumpBob.Offset(); } m_Camera.transform.localPosition = newCameraPosition; } } private void GetInput(out float speed) { // Read input float horizontal = CrossPlatformInputManager.GetAxis("Horizontal"); float vertical = CrossPlatformInputManager.GetAxis("Vertical"); bool waswalking = m_IsWalking; #if !MOBILE_INPUT // On standalone builds, walk/run speed is modified by a key press. // keep track of whether or not the character is walking or running m_IsWalking = !Input.GetKey(KeyCode.LeftShift); #endif // set the desired speed to be walking or running speed = m_IsWalking ? m_WalkSpeed : m_RunSpeed; m_Input = new Vector2(horizontal, vertical); // normalize input if it exceeds 1 in combined length: if (m_Input.sqrMagnitude > 1) { m_Input.Normalize(); } // handle speed change to give an fov kick // only if the player is going to a run, is running and the fovkick is to be used if (m_IsWalking != waswalking && m_UseFovKick && m_CharacterController.velocity.sqrMagnitude > 0) { StopAllCoroutines(); StartCoroutine(!m_IsWalking ? m_FovKick.FOVKickUp() : m_FovKick.FOVKickDown()); } } private void RotateView() { if (view.IsMine) { m_MouseLook.LookRotation(transform, m_Camera.transform); } } private void OnControllerColliderHit(ControllerColliderHit hit) { Rigidbody body = hit.collider.attachedRigidbody; //dont move the rigidbody if the character is on top of it if (m_CollisionFlags == CollisionFlags.Below) { return; } if (body == null || body.isKinematic) { return; } body.AddForceAtPosition(m_CharacterController.velocity*0.1f, hit.point, ForceMode.Impulse); } } } A: put isMine in your code and put the movement code inside of it Example: private void Update() { if(view.IsMine){ //Add this RotateView(); if (!m_Jump) { m_Jump = CrossPlatformInputManager.GetButtonDown("Jump"); } if (!m_PreviouslyGrounded && m_CharacterController.isGrounded) { StartCoroutine(m_JumpBob.DoBobCycle()); PlayLandingSound(); m_MoveDir.y = 0f; m_Jumping = false; } if (!m_CharacterController.isGrounded && !m_Jumping && m_PreviouslyGrounded) { m_MoveDir.y = 0f; } m_PreviouslyGrounded = m_CharacterController.isGrounded; } }
Multiplayer with Photon, but I am controlling the other character instead
I am using Photon PUN 2 for multiplayer in Unity. I can create a room or join a room. The problem appears when I join with a second player. When I control one of the players, I control the other player instead. I am using Standard Assets FPS controller. Do anyone know what is causing the problem? Any help would be appreciated. I will share the edited code: using System; using UnityEngine; using UnityStandardAssets.CrossPlatformInput; using UnityStandardAssets.Utility; using Random = UnityEngine.Random; using Photon.Pun; #pragma warning disable 618, 649 namespace UnityStandardAssets.Characters.FirstPerson { [RequireComponent(typeof (CharacterController))] [RequireComponent(typeof (AudioSource))] public class FirstPersonController : MonoBehaviour { [SerializeField] private bool m_IsWalking; [SerializeField] private float m_WalkSpeed; [SerializeField] private float m_RunSpeed; [SerializeField] [Range(0f, 1f)] private float m_RunstepLenghten; [SerializeField] private float m_JumpSpeed; [SerializeField] private float m_StickToGroundForce; [SerializeField] private float m_GravityMultiplier; [SerializeField] private MouseLook m_MouseLook; [SerializeField] private bool m_UseFovKick; [SerializeField] private FOVKick m_FovKick = new FOVKick(); [SerializeField] private bool m_UseHeadBob; [SerializeField] private CurveControlledBob m_HeadBob = new CurveControlledBob(); [SerializeField] private LerpControlledBob m_JumpBob = new LerpControlledBob(); [SerializeField] private float m_StepInterval; [SerializeField] private AudioClip[] m_FootstepSounds; // an array of footstep sounds that will be randomly selected from. [SerializeField] private AudioClip m_JumpSound; // the sound played when character leaves the ground. [SerializeField] private AudioClip m_LandSound; // the sound played when character touches back on ground. PhotonView view; private Camera m_Camera; private bool m_Jump; private float m_YRotation; private Vector2 m_Input; private Vector3 m_MoveDir = Vector3.zero; private CharacterController m_CharacterController; private CollisionFlags m_CollisionFlags; private bool m_PreviouslyGrounded; private Vector3 m_OriginalCameraPosition; private float m_StepCycle; private float m_NextStep; private bool m_Jumping; private AudioSource m_AudioSource; // Use this for initialization private void Start() { m_CharacterController = GetComponent<CharacterController>(); m_Camera = Camera.main; m_OriginalCameraPosition = m_Camera.transform.localPosition; m_FovKick.Setup(m_Camera); m_HeadBob.Setup(m_Camera, m_StepInterval); m_StepCycle = 0f; m_NextStep = m_StepCycle/2f; m_Jumping = false; m_AudioSource = GetComponent<AudioSource>(); m_MouseLook.Init(transform , m_Camera.transform); view = GetComponent<PhotonView>(); } // Update is called once per frame private void Update() { RotateView(); // the jump state needs to read here to make sure it is not missed if (!m_Jump) { m_Jump = CrossPlatformInputManager.GetButtonDown("Jump"); } if (!m_PreviouslyGrounded && m_CharacterController.isGrounded) { StartCoroutine(m_JumpBob.DoBobCycle()); PlayLandingSound(); m_MoveDir.y = 0f; m_Jumping = false; } if (!m_CharacterController.isGrounded && !m_Jumping && m_PreviouslyGrounded) { m_MoveDir.y = 0f; } m_PreviouslyGrounded = m_CharacterController.isGrounded; } private void PlayLandingSound() { m_AudioSource.clip = m_LandSound; m_AudioSource.Play(); m_NextStep = m_StepCycle + .5f; } private void FixedUpdate() { if (view.IsMine) { float speed; GetInput(out speed); // always move along the camera forward as it is the direction that it being aimed at Vector3 desiredMove = transform.forward * m_Input.y + transform.right * m_Input.x; // get a normal for the surface that is being touched to move along it RaycastHit hitInfo; Physics.SphereCast(transform.position, m_CharacterController.radius, Vector3.down, out hitInfo, m_CharacterController.height / 2f, Physics.AllLayers, QueryTriggerInteraction.Ignore); desiredMove = Vector3.ProjectOnPlane(desiredMove, hitInfo.normal).normalized; m_MoveDir.x = desiredMove.x * speed; m_MoveDir.z = desiredMove.z * speed; if (m_CharacterController.isGrounded) { m_MoveDir.y = -m_StickToGroundForce; if (m_Jump) { m_MoveDir.y = m_JumpSpeed; PlayJumpSound(); m_Jump = false; m_Jumping = true; } } else { m_MoveDir += Physics.gravity * m_GravityMultiplier * Time.fixedDeltaTime; } m_CollisionFlags = m_CharacterController.Move(m_MoveDir * Time.fixedDeltaTime); ProgressStepCycle(speed); UpdateCameraPosition(speed); m_MouseLook.UpdateCursorLock(); } } private void PlayJumpSound() { m_AudioSource.clip = m_JumpSound; m_AudioSource.Play(); } private void ProgressStepCycle(float speed) { if (m_CharacterController.velocity.sqrMagnitude > 0 && (m_Input.x != 0 || m_Input.y != 0)) { m_StepCycle += (m_CharacterController.velocity.magnitude + (speed*(m_IsWalking ? 1f : m_RunstepLenghten)))* Time.fixedDeltaTime; } if (!(m_StepCycle > m_NextStep)) { return; } m_NextStep = m_StepCycle + m_StepInterval; PlayFootStepAudio(); } private void PlayFootStepAudio() { if (!m_CharacterController.isGrounded) { return; } // pick & play a random footstep sound from the array, // excluding sound at index 0 int n = Random.Range(1, m_FootstepSounds.Length); m_AudioSource.clip = m_FootstepSounds[n]; m_AudioSource.PlayOneShot(m_AudioSource.clip); // move picked sound to index 0 so it's not picked next time m_FootstepSounds[n] = m_FootstepSounds[0]; m_FootstepSounds[0] = m_AudioSource.clip; } private void UpdateCameraPosition(float speed) { if (view.IsMine) { Vector3 newCameraPosition; if (!m_UseHeadBob) { return; } if (m_CharacterController.velocity.magnitude > 0 && m_CharacterController.isGrounded) { m_Camera.transform.localPosition = m_HeadBob.DoHeadBob(m_CharacterController.velocity.magnitude + (speed * (m_IsWalking ? 1f : m_RunstepLenghten))); newCameraPosition = m_Camera.transform.localPosition; newCameraPosition.y = m_Camera.transform.localPosition.y - m_JumpBob.Offset(); } else { newCameraPosition = m_Camera.transform.localPosition; newCameraPosition.y = m_OriginalCameraPosition.y - m_JumpBob.Offset(); } m_Camera.transform.localPosition = newCameraPosition; } } private void GetInput(out float speed) { // Read input float horizontal = CrossPlatformInputManager.GetAxis("Horizontal"); float vertical = CrossPlatformInputManager.GetAxis("Vertical"); bool waswalking = m_IsWalking; #if !MOBILE_INPUT // On standalone builds, walk/run speed is modified by a key press. // keep track of whether or not the character is walking or running m_IsWalking = !Input.GetKey(KeyCode.LeftShift); #endif // set the desired speed to be walking or running speed = m_IsWalking ? m_WalkSpeed : m_RunSpeed; m_Input = new Vector2(horizontal, vertical); // normalize input if it exceeds 1 in combined length: if (m_Input.sqrMagnitude > 1) { m_Input.Normalize(); } // handle speed change to give an fov kick // only if the player is going to a run, is running and the fovkick is to be used if (m_IsWalking != waswalking && m_UseFovKick && m_CharacterController.velocity.sqrMagnitude > 0) { StopAllCoroutines(); StartCoroutine(!m_IsWalking ? m_FovKick.FOVKickUp() : m_FovKick.FOVKickDown()); } } private void RotateView() { if (view.IsMine) { m_MouseLook.LookRotation(transform, m_Camera.transform); } } private void OnControllerColliderHit(ControllerColliderHit hit) { Rigidbody body = hit.collider.attachedRigidbody; //dont move the rigidbody if the character is on top of it if (m_CollisionFlags == CollisionFlags.Below) { return; } if (body == null || body.isKinematic) { return; } body.AddForceAtPosition(m_CharacterController.velocity*0.1f, hit.point, ForceMode.Impulse); } } }
[ "put isMine in your code and put the movement code inside of it\nExample:\nprivate void Update()\n {\n if(view.IsMine){ //Add this \n RotateView();\n \n if (!m_Jump)\n {\n m_Jump = CrossPlatformInputManager.GetButtonDown(\"Jump\");\n }\n\n if (!m_PreviouslyGrounded && m_CharacterController.isGrounded)\n {\n StartCoroutine(m_JumpBob.DoBobCycle());\n PlayLandingSound();\n m_MoveDir.y = 0f;\n m_Jumping = false;\n }\n if (!m_CharacterController.isGrounded && !m_Jumping && m_PreviouslyGrounded)\n {\n m_MoveDir.y = 0f;\n }\n\n m_PreviouslyGrounded = m_CharacterController.isGrounded;\n \n }\n }\n\n" ]
[ 0 ]
[]
[]
[ "c#", "photon", "unity3d" ]
stackoverflow_0068694333_c#_photon_unity3d.txt
Q: Gitlab CI/CD cache expires and therefor build fails I got AWS CDK application in typescript and pretty simple gitlab CI/CD pipeline with 2 stages, which takes care of the deployment: image: node:latest stages: - dependencies - deploy dependencies: stage: dependencies only: refs: - master changes: - package-lock.json script: - npm install - rm -rf node_modules/sharp - SHARP_IGNORE_GLOBAL_LIBVIPS=1 npm install --arch=x64 --platform=linux --libc=glibc sharp cache: key: files: - package-lock.json paths: - node_modules policy: push deploy: stage: deploy only: - master script: - npm run deploy cache: key: files: - package-lock.json paths: - node_modules policy: pull npm run deploy is just a wrapper for the cdk command. But for some reason, sometimes it happens, that the cache of the node_modules (probably) expires - simply deploy stage is not able to fetch for it and therefore the deploy stage fails: Restoring cache Checking cache for ***-protected... WARNING: file does not exist Failed to extract cache I checked that the cache name is the same as the one built previously in the last pipeline run with dependencies stage. I suppose it happens, as often times this CI/CD is not running even for multiple weeks, since I contribute to that repo rarely. I was trying to search for the root causes but failed miserably. I pretty much understand that cache can expire after some times(30 days from what I found by default), but I would expect CI/CD to recover from that by running the dependencies stage despite the fact package-lock.json wasn't updated. So my question is simply "What am I missing? Is my understanding of caching in Gitlab's CI/CD completely wrong? Do I have to turn on some feature switcher?" Basically my ultimate goal is to skip the building of the node_modules part as often as possible, but not failing on the non-existent cache even if I don't run the pipeline for multiple months. A: A cache is only a performance optimization, but is not guaranteed to always work. Your expectation that the cache might be expired is most likely correct, and thus you'll need to have a fallback in your deploy script. One thing you could do is that you change your dependencies job to: Always run Both push & pull the cache Shortcircuit the job if the cache was found. E.g. something like this: dependencies: stage: dependencies only: refs: - master changes: - package-lock.json script: - | if [[ -d node_modules ]]; then exit 0 fi - npm install - rm -rf node_modules/sharp - SHARP_IGNORE_GLOBAL_LIBVIPS=1 npm install --arch=x64 --platform=linux --libc=glibc sharp cache: key: files: - package-lock.json paths: - node_modules See also this related question. If you want to avoid spinning up unnecessary jobs, then you could also consider to merge the dependencies & deploy jobs, and take a similar approach as above in the combined job.
Gitlab CI/CD cache expires and therefor build fails
I got AWS CDK application in typescript and pretty simple gitlab CI/CD pipeline with 2 stages, which takes care of the deployment: image: node:latest stages: - dependencies - deploy dependencies: stage: dependencies only: refs: - master changes: - package-lock.json script: - npm install - rm -rf node_modules/sharp - SHARP_IGNORE_GLOBAL_LIBVIPS=1 npm install --arch=x64 --platform=linux --libc=glibc sharp cache: key: files: - package-lock.json paths: - node_modules policy: push deploy: stage: deploy only: - master script: - npm run deploy cache: key: files: - package-lock.json paths: - node_modules policy: pull npm run deploy is just a wrapper for the cdk command. But for some reason, sometimes it happens, that the cache of the node_modules (probably) expires - simply deploy stage is not able to fetch for it and therefore the deploy stage fails: Restoring cache Checking cache for ***-protected... WARNING: file does not exist Failed to extract cache I checked that the cache name is the same as the one built previously in the last pipeline run with dependencies stage. I suppose it happens, as often times this CI/CD is not running even for multiple weeks, since I contribute to that repo rarely. I was trying to search for the root causes but failed miserably. I pretty much understand that cache can expire after some times(30 days from what I found by default), but I would expect CI/CD to recover from that by running the dependencies stage despite the fact package-lock.json wasn't updated. So my question is simply "What am I missing? Is my understanding of caching in Gitlab's CI/CD completely wrong? Do I have to turn on some feature switcher?" Basically my ultimate goal is to skip the building of the node_modules part as often as possible, but not failing on the non-existent cache even if I don't run the pipeline for multiple months.
[ "A cache is only a performance optimization, but is not guaranteed to always work. Your expectation that the cache might be expired is most likely correct, and thus you'll need to have a fallback in your deploy script.\nOne thing you could do is that you change your dependencies job to:\n\nAlways run\nBoth push & pull the cache\nShortcircuit the job if the cache was found.\n\nE.g. something like this:\ndependencies:\n stage: dependencies\n only:\n refs:\n - master\n changes:\n - package-lock.json\n script:\n - |\n if [[ -d node_modules ]]; then\n exit 0\n fi\n - npm install\n - rm -rf node_modules/sharp\n - SHARP_IGNORE_GLOBAL_LIBVIPS=1 npm install --arch=x64 --platform=linux --libc=glibc sharp\n cache:\n key:\n files:\n - package-lock.json\n paths:\n - node_modules\n\nSee also this related question.\nIf you want to avoid spinning up unnecessary jobs, then you could also consider to merge the dependencies & deploy jobs, and take a similar approach as above in the combined job.\n" ]
[ 0 ]
[]
[]
[ "aws_cdk", "gitlab_ci", "gitlab_ci.yml", "node.js", "typescript" ]
stackoverflow_0074678052_aws_cdk_gitlab_ci_gitlab_ci.yml_node.js_typescript.txt
Q: recreate folder structure Osx I currently have a set of folders that I need to duplicate the structure into another folder. I currently work with photos that are dumped into a Dump folder in groups. Eg. Photo Dump Group1 Group2 etc I would like to have a script to recreate these folders without the files to the good folder so that I don't have to recreate them manually Any ideas? Nathan A: If I understand correctly, you want to copy the parent folder and all of its subfolders, but none of the files contained therein. There might be a simpler way, but I just threw together this Terminal command (which should also work on Linux or anywhere else with Bash): ls -R | grep :$ | sed 's/\.\/\(.*\):$/\1/' | \ while read thisFolder; do mkdir -p "destination"/"$thisFolder"; done It will copy the folder structure of all folders in the current directory into a folder called "destination"; you can of course change this to any path you wish, e.g. ~/Desktop/"Folder Copies" or whatever. Take care to first "cd" into whatever directory contains the folder tree you want to duplicate, because if you run it as soon as you open the terminal, you'll wind up with a replication of your entire home folder directory structure, including the many contained within Library. A: I found this to be a tad clearer: find 'Photo Dump' -type d | sed -e 's:^Photo Dump:destination:g' | sort | xargs mkdir find 'Photo Dump' -type d -> List all folders in "Photo Dump" sed -e 's:^Photo Dump:destination:g' - Since all folders listed in the above step will start with Photo Dump/..., we can just replace the beginning with the folder we want to copy the structure to (in this case I called it destination) sort - Sorts results. This is required so that the parent folders are created before the children xargs mkdir - Passes all the results from above into mkdir so it can create the folders
recreate folder structure Osx
I currently have a set of folders that I need to duplicate the structure into another folder. I currently work with photos that are dumped into a Dump folder in groups. Eg. Photo Dump Group1 Group2 etc I would like to have a script to recreate these folders without the files to the good folder so that I don't have to recreate them manually Any ideas? Nathan
[ "If I understand correctly, you want to copy the parent folder and all of its subfolders, but none of the files contained therein. There might be a simpler way, but I just threw together this Terminal command (which should also work on Linux or anywhere else with Bash):\nls -R | grep :$ | sed 's/\\.\\/\\(.*\\):$/\\1/' | \\\nwhile read thisFolder; do mkdir -p \"destination\"/\"$thisFolder\"; done\n\nIt will copy the folder structure of all folders in the current directory into a folder called \"destination\"; you can of course change this to any path you wish, e.g. ~/Desktop/\"Folder Copies\" or whatever.\nTake care to first \"cd\" into whatever directory contains the folder tree you want to duplicate, because if you run it as soon as you open the terminal, you'll wind up with a replication of your entire home folder directory structure, including the many contained within Library.\n", "I found this to be a tad clearer:\nfind 'Photo Dump' -type d | sed -e 's:^Photo Dump:destination:g' | sort | xargs mkdir\n\n\nfind 'Photo Dump' -type d -> List all folders in \"Photo Dump\"\n\nsed -e 's:^Photo Dump:destination:g' - Since all folders listed in the above step will start with Photo Dump/..., we can just replace the beginning with the folder we want to copy the structure to (in this case I called it destination)\n\nsort - Sorts results. This is required so that the parent folders are created before the children\n\nxargs mkdir - Passes all the results from above into mkdir so it can create the folders\n\n\n" ]
[ 2, 0 ]
[]
[]
[ "directory", "macos" ]
stackoverflow_0023588221_directory_macos.txt
Q: Ansible dynamically assigned variable Unable to print the output of a variable when assigned in playbook. it returns the value of variable. - name: get contents of local file set_fact: "{{ item.filename }}": "{{ lookup('file', 'files/conf/{{ item.name }}.zip') }}" loop: "{{ genericconf }}" - name: encode to base64 set_fact: "{{ item.basename }}": "{{ '{{ item.filename }}' |b64encode }}" loop: "{{ genericconf }}" - name: add lines blockinfile: block: |4 "{{ {{ item.basename')} }}" path: "/opt/try/{{ item.name }}.name" insertafter: "configuration: >-" marker: "" loop: "{{ genericconf }}" assign a variable to output of dictionary and try to call the value results in jinja template error A: - name: get contents of local file set_fact: filename: "{{ lookup('file', 'files/conf/{{ item.name }}.zip') }}" loop: "{{ genericconf }}" - name: encode to base64 set_fact: basename: "{{ filename | b64encode }}" loop: "{{ genericconf }}" - name: add lines blockinfile: block: |4 "{{ basename }}" path: "/opt/try/{{ item.name }}.name" insertafter: "configuration: >-" marker: "" loop: "{{ genericconf }}"
Ansible dynamically assigned variable
Unable to print the output of a variable when assigned in playbook. it returns the value of variable. - name: get contents of local file set_fact: "{{ item.filename }}": "{{ lookup('file', 'files/conf/{{ item.name }}.zip') }}" loop: "{{ genericconf }}" - name: encode to base64 set_fact: "{{ item.basename }}": "{{ '{{ item.filename }}' |b64encode }}" loop: "{{ genericconf }}" - name: add lines blockinfile: block: |4 "{{ {{ item.basename')} }}" path: "/opt/try/{{ item.name }}.name" insertafter: "configuration: >-" marker: "" loop: "{{ genericconf }}" assign a variable to output of dictionary and try to call the value results in jinja template error
[ "- name: get contents of local file\n set_fact:\n filename: \"{{ lookup('file', 'files/conf/{{ item.name }}.zip') }}\"\n loop: \"{{ genericconf }}\"\n\n- name: encode to base64\n set_fact:\n basename: \"{{ filename | b64encode }}\"\n loop: \"{{ genericconf }}\"\n\n- name: add lines\n blockinfile:\n block: |4\n \"{{ basename }}\"\n path: \"/opt/try/{{ item.name }}.name\"\n insertafter: \"configuration: >-\"\n marker: \"\"\n loop: \"{{ genericconf }}\"\n\n" ]
[ 0 ]
[]
[]
[ "ansible", "ansible_2.x" ]
stackoverflow_0074678348_ansible_ansible_2.x.txt
Q: when I debugging code I found "Exception has occurred." in object_patch.dart file NoSuchMethodError (NoSuchMethodError: The method '[]' was called on null. Receiver: null Tried calling: ) I cant solve this , because I can't understand this mean but its can't received null A: The error message "NoSuchMethodError (NoSuchMethodError: The method '[]' was called on null. Receiver: null Tried calling: )" indicates that you are trying to call the [] operator (also known as the index operator) on a null object. This means that the object you are trying to access is null, and therefore the [] operator cannot be called on it. To fix this error, you need to check if the object is null before trying to access it using the [] operator. You can do this using the ?? operator, which returns the left-hand operand if it is not null, and the right-hand operand otherwise. Here is an example of how you can use the ?? operator to fix this error: // Define the object var myObject = null; // Check if the object is null before accessing it var result = myObject?.[0] ?? 0; // Print the result print(result); // This will print 0 In this example, the myObject variable is defined as null, so the [] operator cannot be called on it. Therefore, the ?? operator is used to check if the object is null, and if it is, the value 0 is returned instead of trying to access the object using the [] operator.
when I debugging code I found "Exception has occurred." in object_patch.dart file
NoSuchMethodError (NoSuchMethodError: The method '[]' was called on null. Receiver: null Tried calling: ) I cant solve this , because I can't understand this mean but its can't received null
[ "The error message \"NoSuchMethodError (NoSuchMethodError: The method '[]' was called on null. Receiver: null Tried calling: )\" indicates that you are trying to call the [] operator (also known as the index operator) on a null object. This means that the object you are trying to access is null, and therefore the [] operator cannot be called on it.\nTo fix this error, you need to check if the object is null before trying to access it using the [] operator. You can do this using the ?? operator, which returns the left-hand operand if it is not null, and the right-hand operand otherwise.\nHere is an example of how you can use the ?? operator to fix this error:\n// Define the object\nvar myObject = null;\n\n// Check if the object is null before accessing it\nvar result = myObject?.[0] ?? 0;\n\n// Print the result\nprint(result); // This will print 0\n\nIn this example, the myObject variable is defined as null, so the [] operator cannot be called on it. Therefore, the ?? operator is used to check if the object is null, and if it is, the value 0 is returned instead of trying to access the object using the [] operator.\n" ]
[ 0 ]
[]
[]
[ "api", "dart", "dart_async", "flutter" ]
stackoverflow_0074680972_api_dart_dart_async_flutter.txt
Q: Screen Record Full Resolution Chrome Mobile Responsive Screen I'm trying to record our software in Chrome's mobile device emulator. The resolution for the hypothetical device is 1080 x 1920 (vertical orientation). I would like to be able to just record the emulation screen at full resolution. I know I could record my entire screen and just crop for the window showing the software, but then it does not record at the proper 1080 x 1920 resolution and looks blurry. Does anyone have any methods for recording just the emulation screen at full size? Essentially, I want to record the red boxed area in full resolution (which should be 1080 x 1920) Chrome Screen Emulation Window Thank you in advance!! A: One option is to use a screen recording software that allows you to select a specific area to record. This will allow you to select the emulator screen and record just that area at the desired resolution. Another option is to use the built-in screen recording feature in Chrome. To do this, you will need to open the developer tools in Chrome, go to the "Emulation" tab, and select the "Capture Screenshot" option. This will capture a screenshot of the emulator screen, which you can then save as an image file. From there, you can use a video editing software to create a video from the screenshot and set the resolution to 1080 x 1920. Alternatively, you can also use a mobile device to record the emulator screen by connecting it to your computer and using a screen recording app on the device. This will allow you to record the emulator screen in full resolution from the perspective of the mobile device
Screen Record Full Resolution Chrome Mobile Responsive Screen
I'm trying to record our software in Chrome's mobile device emulator. The resolution for the hypothetical device is 1080 x 1920 (vertical orientation). I would like to be able to just record the emulation screen at full resolution. I know I could record my entire screen and just crop for the window showing the software, but then it does not record at the proper 1080 x 1920 resolution and looks blurry. Does anyone have any methods for recording just the emulation screen at full size? Essentially, I want to record the red boxed area in full resolution (which should be 1080 x 1920) Chrome Screen Emulation Window Thank you in advance!!
[ "One option is to use a screen recording software that allows you to select a specific area to record. This will allow you to select the emulator screen and record just that area at the desired resolution.\nAnother option is to use the built-in screen recording feature in Chrome. To do this, you will need to open the developer tools in Chrome, go to the \"Emulation\" tab, and select the \"Capture Screenshot\" option. This will capture a screenshot of the emulator screen, which you can then save as an image file. From there, you can use a video editing software to create a video from the screenshot and set the resolution to 1080 x 1920.\nAlternatively, you can also use a mobile device to record the emulator screen by connecting it to your computer and using a screen recording app on the device. This will allow you to record the emulator screen in full resolution from the perspective of the mobile device\n" ]
[ 0 ]
[]
[]
[ "google_chrome", "responsive_design", "screen_capture", "screen_recording" ]
stackoverflow_0073280654_google_chrome_responsive_design_screen_capture_screen_recording.txt
Q: Typescript: Get runtime value of argument for return type evaluation I am having a problem with typescript return types. I have written the following function as a wrapper around process.env: function env< RequiredType = undefined | boolean, FallbackType = undefined | string, >( name: string, required?: RequiredType, fallback?: FallbackType, ): FallbackType extends string ? string : string | undefined { //do some magic to return env variable } This function, as declared above, is a wrapper around process.env. It takes a name as the first argument which is the name of an env var. The second and third argument are optional. When I set required to true, the functions return type should be infered as a string because if the env var is not defined and can not use a fallback the function runs process.exit(1). So in every scenario where required is set to true, it will return a string. The same is with fallback, if a fallback is set, the functions return type should be string because if an env var is not defined it will be replaced by a fallback value so it will retun a string anyways. Setting the return type to string if an fallback value is given works just fine, but i can not get my head around an implementation for the required argument. An example would be: const a = env("name") //infered type of a should be "string | undefined" (working) const b = env("name", false) //infered type of "b" should be "string | undefined" (working) const c = env("name", true) //infered type of "c" should be "string" (not working, should work because required is "true") <------- const d = env("name", false, "This is my name") //infered type of "d" should be "string" (working because of fallback) const e = env("name", true, "This is my name") //infered type of "e" should be "string" (working because of fallback, but should also work because required is "true") A: Recall that Typescript types don't exist at runtime. Thus, if you want to narrow the function signature for specific data passed, you have to teach Typescript about what subsets of data passed to your function will return in specific more narrowed return values. You can accomplish this with function overloading: function env(name: string, required: true, fallback?: string): string; function env(name: string, required: boolean|undefined, fallback: string): string; function env(name: string, required?: boolean, fallback?: string): string | undefined; function env( name: string, required?: boolean, fallback?: string, ) { const value = process.env[name] ?? fallback; if(required && value === undefined) { process.exit(1); } return value; } By providing multiple type signatures which are more narrow than the implementation signature, you can more directly control the return types Typescript is able to infer from a given usage of the function. A: For anyone interessted, here is the complete code: import { logger } from "../Components/logger"; //defined overloads export function env(name: string, required: true, fallback?: string): string; export function env( name: string, required: boolean | undefined, fallback: string, ): string; export function env( name: string, required?: boolean, fallback?: string, ): string | undefined; //env wrapper function export function env(name: string, required?: boolean, fallback?: string) { //check if env is set const isSet = process.env[name] !== undefined; //check if env is not set and required if (!isSet && required) { //check if fallback is not set (everything that is a string and not set) if (typeof fallback !== "string") { //log error logger.error( `ENV-Var "${name}" is not set, required and has no fallback!`, ); //exit process with error code 1 process.exit(1); } //log warning logger.warn( `ENV-Var "${name}" is not set, required but uses fallback value of "${fallback}"!`, ); } //return env when set else return fallback return isSet ? process.env[name] : fallback; }
Typescript: Get runtime value of argument for return type evaluation
I am having a problem with typescript return types. I have written the following function as a wrapper around process.env: function env< RequiredType = undefined | boolean, FallbackType = undefined | string, >( name: string, required?: RequiredType, fallback?: FallbackType, ): FallbackType extends string ? string : string | undefined { //do some magic to return env variable } This function, as declared above, is a wrapper around process.env. It takes a name as the first argument which is the name of an env var. The second and third argument are optional. When I set required to true, the functions return type should be infered as a string because if the env var is not defined and can not use a fallback the function runs process.exit(1). So in every scenario where required is set to true, it will return a string. The same is with fallback, if a fallback is set, the functions return type should be string because if an env var is not defined it will be replaced by a fallback value so it will retun a string anyways. Setting the return type to string if an fallback value is given works just fine, but i can not get my head around an implementation for the required argument. An example would be: const a = env("name") //infered type of a should be "string | undefined" (working) const b = env("name", false) //infered type of "b" should be "string | undefined" (working) const c = env("name", true) //infered type of "c" should be "string" (not working, should work because required is "true") <------- const d = env("name", false, "This is my name") //infered type of "d" should be "string" (working because of fallback) const e = env("name", true, "This is my name") //infered type of "e" should be "string" (working because of fallback, but should also work because required is "true")
[ "Recall that Typescript types don't exist at runtime. Thus, if you want to narrow the function signature for specific data passed, you have to teach Typescript about what subsets of data passed to your function will return in specific more narrowed return values. You can accomplish this with function overloading:\nfunction env(name: string, required: true, fallback?: string): string;\nfunction env(name: string, required: boolean|undefined, fallback: string): string;\nfunction env(name: string, required?: boolean, fallback?: string): string | undefined;\n\nfunction env(\n name: string,\n required?: boolean,\n fallback?: string,\n) {\n const value = process.env[name] ?? fallback;\n if(required && value === undefined) { process.exit(1); }\n return value;\n}\n\nBy providing multiple type signatures which are more narrow than the implementation signature, you can more directly control the return types Typescript is able to infer from a given usage of the function.\n", "For anyone interessted, here is the complete code:\nimport { logger } from \"../Components/logger\";\n\n//defined overloads\nexport function env(name: string, required: true, fallback?: string): string;\nexport function env(\n name: string,\n required: boolean | undefined,\n fallback: string,\n): string;\nexport function env(\n name: string,\n required?: boolean,\n fallback?: string,\n): string | undefined;\n\n//env wrapper function\nexport function env(name: string, required?: boolean, fallback?: string) {\n //check if env is set\n const isSet = process.env[name] !== undefined;\n\n //check if env is not set and required\n if (!isSet && required) {\n //check if fallback is not set (everything that is a string and not set)\n if (typeof fallback !== \"string\") {\n //log error\n logger.error(\n `ENV-Var \"${name}\" is not set, required and has no fallback!`,\n );\n\n //exit process with error code 1\n process.exit(1);\n }\n\n //log warning\n logger.warn(\n `ENV-Var \"${name}\" is not set, required but uses fallback value of \"${fallback}\"!`,\n );\n }\n\n //return env when set else return fallback\n return isSet ? process.env[name] : fallback;\n}\n\n\n" ]
[ 0, 0 ]
[]
[]
[ "types", "typescript" ]
stackoverflow_0074680380_types_typescript.txt
Q: Python - Shutil - Skip File Already exists I have many pdfs on my desktop. I want to run a python script to move all these pdfs to a folder I am testing a script and I found that a file already exists in the destination folder. The script when run says the file already exists. In this scenario, I would like to overwrite the file if it exists. How do I tell shutil to overwrite. import os import shutil import glob src = '/Users/myusername/Desktop' dest = '/Users/myusername/Desktop/PDF' os.chdir(src) for i in glob.glob("*.pdf"): print(i) shutil.move(i,dest) shutil.Error: Destination path '/Users/myusername/Desktop/PDF/test.pdf' already exists A: To tell the shutil.move() function to overwrite the destination file if it already exists, you can use the shutil.move() function's copy_function argument and set it to the shutil.copy2() function. This will cause the shutil.move() function to use the shutil.copy2() function to copy the file to the destination, which has the ability to overwrite an existing file. Here is an example of how you could modify your code to use the shutil.copy2() function to overwrite the destination file if it already exists: import os import shutil import glob src = '/Users/myusername/Desktop' dest = '/Users/myusername/Desktop/PDF' os.chdir(src) for i in glob.glob("*.pdf"): print(i) shutil.move(i, dest, copy_function=shutil.copy2) Alternatively, you can use the os.replace() function to move the file and overwrite the destination file if it already exists. This function is available in Python 3.3 and later. Here is an example of how you could use the os.replace() function to move the file and overwrite the destination file if it already exists: import os import glob src = '/Users/myusername/Desktop' dest = '/Users/myusername/Desktop/PDF' os.chdir(src) for i in glob.glob("*.pdf"): print(i) os.replace(i, os.path.join(dest, i)) Note that the os.replace() function is not available in Python 2.x, so if you are using Python 2.x, you will need to use the shutil.move() function with the copy_function argument set to the shutil.copy2() function.
Python - Shutil - Skip File Already exists
I have many pdfs on my desktop. I want to run a python script to move all these pdfs to a folder I am testing a script and I found that a file already exists in the destination folder. The script when run says the file already exists. In this scenario, I would like to overwrite the file if it exists. How do I tell shutil to overwrite. import os import shutil import glob src = '/Users/myusername/Desktop' dest = '/Users/myusername/Desktop/PDF' os.chdir(src) for i in glob.glob("*.pdf"): print(i) shutil.move(i,dest) shutil.Error: Destination path '/Users/myusername/Desktop/PDF/test.pdf' already exists
[ "To tell the shutil.move() function to overwrite the destination file if it already exists, you can use the shutil.move() function's copy_function argument and set it to the shutil.copy2() function. This will cause the shutil.move() function to use the shutil.copy2() function to copy the file to the destination, which has the ability to overwrite an existing file.\nHere is an example of how you could modify your code to use the shutil.copy2() function to overwrite the destination file if it already exists:\nimport os\nimport shutil\nimport glob\n\nsrc = '/Users/myusername/Desktop'\ndest = '/Users/myusername/Desktop/PDF'\n\nos.chdir(src)\nfor i in glob.glob(\"*.pdf\"):\n print(i)\n shutil.move(i, dest, copy_function=shutil.copy2)\n\nAlternatively, you can use the os.replace() function to move the file and overwrite the destination file if it already exists. This function is available in Python 3.3 and later. Here is an example of how you could use the os.replace() function to move the file and overwrite the destination file if it already exists:\nimport os\nimport glob\n\nsrc = '/Users/myusername/Desktop'\ndest = '/Users/myusername/Desktop/PDF'\n\nos.chdir(src)\nfor i in glob.glob(\"*.pdf\"):\n print(i)\n os.replace(i, os.path.join(dest, i))\n\nNote that the os.replace() function is not available in Python 2.x, so if you are using Python 2.x, you will need to use the shutil.move() function with the copy_function argument set to the shutil.copy2() function.\n" ]
[ 0 ]
[]
[]
[ "python", "shutil" ]
stackoverflow_0074681196_python_shutil.txt
Q: Cucumber 6 + JUnit 5 + Spring Parallel Scenarios Execution I've been reading a lot of documentations, posts, articles and it's said that out-of-box solution to run scenarios in a single feature file in parallel is impossible. We can use maven-surefire-plugin to run in parallel different feature files, but not scenarios. For example there is a feature file with scenarios: Feature: Parallel Scenarios Scenario: First ... Scenario: Second ... Scenario: Third ... And I'd like to run all there scenarios concurrently in separated threads. How can I achieve this? A: I am using testNG with courgette-jvm to run parallel tests at scenario level . Here is runner file import courgette.api.CourgetteOptions; import courgette.api.CourgetteRunLevel; import courgette.api.CucumberOptions; import courgette.api.testng.TestNGCourgette; import org.testng.annotations.Test; @Test @CourgetteOptions( threads = 10, runLevel = CourgetteRunLevel.SCENARIO, rerunFailedScenarios = true, rerunAttempts = 1, showTestOutput = true, reportTitle = "Courgette-JVM Example", reportTargetDir = "build", environmentInfo = "browser=chrome; git_branch=master", cucumberOptions = @CucumberOptions( features = "src/test/resources/com/test/", glue = "com.test.stepdefs", publish = true, plugin = { "pretty", "json:target/cucumber-report/cucumber.json", "html:target/cucumber-report/cucumber.html"} )) class AcceptanceIT extends TestNGCourgette { } and then use regular webdriver config, I use RemoteWebDriver protected RemoteWebDriver createDriver() throws MalformedURLException { //wherever grid hub is pointing. it should work without grid too String hubURL = "http://localhost:xxxx/wd/hub"; ChromeOptions options = new ChromeOptions(); DesiredCapabilities capabilities = DesiredCapabilities.chrome(); capabilities.setCapability(ChromeOptions.CAPABILITY, options); return (RemoteWebDriver) (driver = new RemoteWebDriver(new URL(hubURL), capabilities)); } public RemoteWebDriver getDriver() throws MalformedURLException { if (driver == null) { this.createDriver(); } return (RemoteWebDriver) driver; } you may have to utilize these dependencies <dependency> <groupId>io.github.prashant-ramcharan</groupId> <artifactId>courgette-jvm</artifactId> <version>5.11.0</version> </dependency> <dependency> <!-- httpclient dpendendecy is to resolve courgette-jvm error - NoClassDefFoundError: org/apache/http/conn/ssl/TrustAllStrategy --> <groupId>org.apache.httpcomponents</groupId> <artifactId>httpclient</artifactId> <version>4.5.10</version> </dependency> <dependency> <groupId>org.testng</groupId> <artifactId>testng</artifactId> <version>6.14.3</version> <scope>test</scope> </dependency> <dependency> <groupId>io.cucumber</groupId> <artifactId>cucumber-testng</artifactId> <version>6.9.1</version> </dependency>
Cucumber 6 + JUnit 5 + Spring Parallel Scenarios Execution
I've been reading a lot of documentations, posts, articles and it's said that out-of-box solution to run scenarios in a single feature file in parallel is impossible. We can use maven-surefire-plugin to run in parallel different feature files, but not scenarios. For example there is a feature file with scenarios: Feature: Parallel Scenarios Scenario: First ... Scenario: Second ... Scenario: Third ... And I'd like to run all there scenarios concurrently in separated threads. How can I achieve this?
[ "I am using testNG with courgette-jvm to run parallel tests at scenario level\n. Here is runner file\nimport courgette.api.CourgetteOptions;\nimport courgette.api.CourgetteRunLevel;\nimport courgette.api.CucumberOptions;\nimport courgette.api.testng.TestNGCourgette;\nimport org.testng.annotations.Test;\n\n@Test\n@CourgetteOptions(\n threads = 10,\n runLevel = CourgetteRunLevel.SCENARIO,\n rerunFailedScenarios = true,\n rerunAttempts = 1,\n showTestOutput = true,\n reportTitle = \"Courgette-JVM Example\",\n reportTargetDir = \"build\",\n environmentInfo = \"browser=chrome; git_branch=master\",\n cucumberOptions = @CucumberOptions(\n features = \"src/test/resources/com/test/\",\n glue = \"com.test.stepdefs\",\n publish = true,\n plugin = {\n \"pretty\",\n \"json:target/cucumber-report/cucumber.json\",\n \"html:target/cucumber-report/cucumber.html\"}\n ))\nclass AcceptanceIT extends TestNGCourgette {\n}\n\nand then use regular webdriver config, I use RemoteWebDriver\n protected RemoteWebDriver createDriver() throws MalformedURLException {\n //wherever grid hub is pointing. it should work without grid too\n String hubURL = \"http://localhost:xxxx/wd/hub\";\n\n ChromeOptions options = new ChromeOptions();\n DesiredCapabilities capabilities = DesiredCapabilities.chrome();\n capabilities.setCapability(ChromeOptions.CAPABILITY, options);\n return (RemoteWebDriver) (driver = new RemoteWebDriver(new URL(hubURL), capabilities));\n \n }\n\n public RemoteWebDriver getDriver() throws MalformedURLException {\n if (driver == null) {\n this.createDriver();\n }\n return (RemoteWebDriver) driver;\n }\n\nyou may have to utilize these dependencies\n <dependency>\n <groupId>io.github.prashant-ramcharan</groupId>\n <artifactId>courgette-jvm</artifactId>\n <version>5.11.0</version>\n </dependency>\n <dependency>\n <!-- httpclient dpendendecy is to resolve courgette-jvm error - NoClassDefFoundError: org/apache/http/conn/ssl/TrustAllStrategy --> \n <groupId>org.apache.httpcomponents</groupId>\n <artifactId>httpclient</artifactId>\n <version>4.5.10</version>\n </dependency>\n <dependency>\n <groupId>org.testng</groupId>\n <artifactId>testng</artifactId>\n <version>6.14.3</version>\n <scope>test</scope>\n</dependency>\n <dependency>\n <groupId>io.cucumber</groupId>\n <artifactId>cucumber-testng</artifactId>\n <version>6.9.1</version>\n</dependency>\n\n" ]
[ 1 ]
[ "For example two test fraemworks with parallel execution and 'Allure' screenshots(when step fail).\nAll detailed information in readme on github:\nhttps://github.com/simileyskiy/cucumber7-selenium3.selenide5-junit5-Allure-parallelExecution\nhttps://github.com/simileyskiy/cucumber7-selenium4.selenide6-junit5-Allure-parallelExecution\n" ]
[ -7 ]
[ "cucumber", "java", "maven", "parallel_processing", "spring" ]
stackoverflow_0071020032_cucumber_java_maven_parallel_processing_spring.txt
Q: gcd of all pair numbers in 2d array It is given a two-dimensional array with N rows and M columns(N and M are entered by the user). Calculate the GCDs(greatest common divisors) of each possible pairs of elements in the two-dimensional array. And those GCDs have to be strored in one-dimensional array, sorted from smallest to largest(including duplicated numbers). I think I have mistake(s) in for loops and in storing values in the array. By the way, I haven't sorted the array here, I'll do it later #include <stdio.h> int N; int M; //Function, that returns the gcd of two numbers int gcd(int a, int b){ return b = 0 ? a : gcd(b, a % b); } //Function that takes the requared 2d array and stores all GCDs in one dimensional array(all[]) int * dimArr(int arr[N][M]){ int gcds; static int all[1000]; for(int i=0; i<N; i++){ for(int j=i; j<M; j++){ gcds = gcd(arr[i][j], arr[i][j]+1); all[j] = gcds; } } return all; } int main(){ //And I don't know why I can't declare this 2d array with N and M variables int Arr[3][3] = { 3, 5, 8, 4, 2, 7, 9, 5, 6 }; //This pointer p accept the array int * p = dimArr(Arr); //Loop through the array and print the values for(int i = 0; i<sizeof(p)/sizeof(p[0]); i++){ printf("%d", *(p + i)); } return 0; } The result : 00 A: Here is some sample code that calculates the GCDs of all possible pairs of elements in a two-dimensional array and stores them in a one-dimensional array sorted from smallest to largest. #include <stdio.h> #include <stdlib.h> int gcd(int a, int b) { if (b == 0) return a; return gcd(b, a % b); } int main() { int n, m; printf("Enter the number of rows and columns in the array: "); scanf("%d%d", &n, &m); // Create the two-dimensional array and fill it with values int** arr = (int**) malloc(n * sizeof(int*)); for (int i = 0; i < n; i++) { arr[i] = (int*) malloc(m * sizeof(int)); for (int j = 0; j < m; j++) { printf("Enter the value at arr[%d][%d]: ", i, j); scanf("%d", &arr[i][j]); } } // Calculate the GCDs of all possible pairs of elements in the array // and store them in a one-dimensional array int* gcds = (int*) malloc((n * m * (n * m - 1) / 2) * sizeof(int)); int k = 0; for (int i = 0; i < n; i++) { for (int j = 0; j < m; j++) { for (int p = i; p < n; p++) { for (int q = j; q < m; q++) { if (p == i && q == j) continue; gcds[k++] = gcd(arr[i][j], arr[p][q]); } } } } // Sort the GCDs array in ascending order for (int i = 0; i < k; i++) { for (int j = i + 1; j < k; j++) { if (gcds[i] > gcds[j]) { int temp = gcds[i]; gcds[i] = gcds[j]; gcds[j] = temp; } } } // Print the sorted GCDs array printf("Sorted GCDs: "); for (int i = 0; i < k; i++) { printf("%d ", gcds[i]); } printf("\n"); // Free the memory free(gcds); for (int i = 0; i < n; i++) { free(arr[i]); } free(arr); return 0; } This code defines a function gcd that calculates the greatest common divisor (GCD) of two integers, a and b. The GCD is calculated using the Euclidean algorithm, which repeatedly applies the following steps until the remainder is zero: Divide the larger number by the smaller number. Set the smaller number to the remainder of the division. Repeat the process with the new pair of numbers until the remainder is zero. For example, to find the GCD of 15 and 10: Divide 15 by 10 to get a quotient of 1 and a remainder of 5. Set the smaller number to 5 and repeat the process: divide 5 by 10 to get a quotient of 0 and a remainder of 5. Since the remainder is not zero, repeat the process again: divide 5 by 5 to get a quotient of 1 and a remainder of 0. Since the remainder is zero, the GCD is the smaller number from the previous step, which is 5. The gcd function in this code implements this algorithm using recursion. It takes two integers, a and b, as arguments and returns their GCD. The main function in this code uses the gcd function to calculate the GCDs of all possible pairs of elements in a two-dimensional array of integers, and then sorts the GCDs in ascending order and prints them. First, the main function prompts the user to enter the number of rows and columns in the array, and then dynamically allocates memory for the array. It then fills the array with values by prompting the user to enter each value. Next, the main function calculates the GCDs of all possible pairs of elements in the array and stores them in a one-dimensional array. It then sorts the GCDs array in ascending order using a nested loop. Finally, the main function prints the sorted GCDs array and frees the memory that was dynamically allocated for the two-dimensional array and the GCDs array. You can modify this code to suit your specific needs. For example, you can change the way the two-dimensional array is filled with values, or the way the GCDs array is printed.
gcd of all pair numbers in 2d array
It is given a two-dimensional array with N rows and M columns(N and M are entered by the user). Calculate the GCDs(greatest common divisors) of each possible pairs of elements in the two-dimensional array. And those GCDs have to be strored in one-dimensional array, sorted from smallest to largest(including duplicated numbers). I think I have mistake(s) in for loops and in storing values in the array. By the way, I haven't sorted the array here, I'll do it later #include <stdio.h> int N; int M; //Function, that returns the gcd of two numbers int gcd(int a, int b){ return b = 0 ? a : gcd(b, a % b); } //Function that takes the requared 2d array and stores all GCDs in one dimensional array(all[]) int * dimArr(int arr[N][M]){ int gcds; static int all[1000]; for(int i=0; i<N; i++){ for(int j=i; j<M; j++){ gcds = gcd(arr[i][j], arr[i][j]+1); all[j] = gcds; } } return all; } int main(){ //And I don't know why I can't declare this 2d array with N and M variables int Arr[3][3] = { 3, 5, 8, 4, 2, 7, 9, 5, 6 }; //This pointer p accept the array int * p = dimArr(Arr); //Loop through the array and print the values for(int i = 0; i<sizeof(p)/sizeof(p[0]); i++){ printf("%d", *(p + i)); } return 0; } The result : 00
[ "Here is some sample code that calculates the GCDs of all possible pairs of elements in a two-dimensional array and stores them in a one-dimensional array sorted from smallest to largest.\n#include <stdio.h>\n#include <stdlib.h>\n\nint gcd(int a, int b) {\n if (b == 0) return a;\n return gcd(b, a % b);\n}\n\nint main() {\n int n, m;\n printf(\"Enter the number of rows and columns in the array: \");\n scanf(\"%d%d\", &n, &m);\n\n // Create the two-dimensional array and fill it with values\n int** arr = (int**) malloc(n * sizeof(int*));\n for (int i = 0; i < n; i++) {\n arr[i] = (int*) malloc(m * sizeof(int));\n for (int j = 0; j < m; j++) {\n printf(\"Enter the value at arr[%d][%d]: \", i, j);\n scanf(\"%d\", &arr[i][j]);\n }\n }\n\n // Calculate the GCDs of all possible pairs of elements in the array\n // and store them in a one-dimensional array\n int* gcds = (int*) malloc((n * m * (n * m - 1) / 2) * sizeof(int));\n int k = 0;\n for (int i = 0; i < n; i++) {\n for (int j = 0; j < m; j++) {\n for (int p = i; p < n; p++) {\n for (int q = j; q < m; q++) {\n if (p == i && q == j) continue;\n gcds[k++] = gcd(arr[i][j], arr[p][q]);\n }\n }\n }\n }\n\n // Sort the GCDs array in ascending order\n for (int i = 0; i < k; i++) {\n for (int j = i + 1; j < k; j++) {\n if (gcds[i] > gcds[j]) {\n int temp = gcds[i];\n gcds[i] = gcds[j];\n gcds[j] = temp;\n }\n }\n }\n\n // Print the sorted GCDs array\n printf(\"Sorted GCDs: \");\n for (int i = 0; i < k; i++) {\n printf(\"%d \", gcds[i]);\n }\n printf(\"\\n\");\n\n // Free the memory\n free(gcds);\n for (int i = 0; i < n; i++) {\n free(arr[i]);\n }\n free(arr);\n\n return 0;\n}\n\nThis code defines a function gcd that calculates the greatest common divisor (GCD) of two integers, a and b. The GCD is calculated using the Euclidean algorithm, which repeatedly applies the following steps until the remainder is zero:\n\nDivide the larger number by the smaller number.\nSet the smaller number to the remainder of the division.\nRepeat the process with the new pair of numbers until the remainder is zero.\n\nFor example, to find the GCD of 15 and 10:\n\nDivide 15 by 10 to get a quotient of 1 and a remainder of 5.\nSet the smaller number to 5 and repeat the process: divide 5 by 10 to get a quotient of 0 and a remainder of 5.\nSince the remainder is not zero, repeat the process again: divide 5 by 5 to get a quotient of 1 and a remainder of 0.\nSince the remainder is zero, the GCD is the smaller number from the previous step, which is 5.\n\nThe gcd function in this code implements this algorithm using recursion. It takes two integers, a and b, as arguments and returns their GCD.\nThe main function in this code uses the gcd function to calculate the GCDs of all possible pairs of elements in a two-dimensional array of integers, and then sorts the GCDs in ascending order and prints them.\nFirst, the main function prompts the user to enter the number of rows and columns in the array, and then dynamically allocates memory for the array. It then fills the array with values by prompting the user to enter each value.\nNext, the main function calculates the GCDs of all possible pairs of elements in the array and stores them in a one-dimensional array. It then sorts the GCDs array in ascending order using a nested loop.\nFinally, the main function prints the sorted GCDs array and frees the memory that was dynamically allocated for the two-dimensional array and the GCDs array.\nYou can modify this code to suit your specific needs. For example, you can change the way the two-dimensional array is filled with values, or the way the GCDs array is printed.\n" ]
[ -3 ]
[]
[]
[ "c" ]
stackoverflow_0074681124_c.txt
Q: Tricky typing problems with a generic union type doing an intersection type I have stumbled upon a peculiar problem, and I have no idea how to fix this. I have a class with a generic type. This class contains a method with only one union parameters asking for the generic type or an object. Since this class is instantiated multiple times with different types, I wanted to create a generic method that retrieves one of those instances (generically typed), and make a call of the method within. Here's a code for reproduction: export interface A { a: any; } export interface B { b: any; } export interface MyClasses { a: MyClass<A>; b: MyClass<B>; } export interface C { c: any; } export declare class MyClass<T = { [prop: string]: any }> { myMethod(data: T | C): T; } export type MyClassesKeys = keyof MyClasses; export type MyClassInferGenericType<T> = T extends MyClass<infer G> ? G : T; export class MyService { private myClasses: MyClasses = { a: new MyClass<A>(), b: new MyClass<B>() }; public getMyClass<T extends MyClassesKeys>(keyName: T): MyClasses[T] { return this.myClasses[keyName]; } public callerMethod<T extends MyClassesKeys, U extends MyClassInferGenericType<MyClasses[T]>>(key: T, myData: U) { // This part is usually retrieved gener const myClassInstance = this.getMyClass(key); // My call myClassInstance.myMethod(myData); } } This returns, somehow, a compilation error: Argument of type 'A | B' is not assignable to parameter of type '(A | C) & (B | C)'. Type 'A' is not assignable to type '(A | C) & (B | C)'. Type 'A' is not assignable to type 'A & C'. Property 'c' is missing in type 'A' but required in type 'C'. With "myClassInstance" being of type MyClasses[T] and myData taking the generic type associated with MyClasses[T], everything should be correctly inferred. So, why is typescript trying to do an intersection of my types? A: The reason you get an intersection is because the compiler loses track of the correlation between the type of key and the type of myData, and so it only accepts something it knows is safe. If myData were both A and B (like {a: 1, b: 2}), then it would be safe to call myClassInstance.myMethod(myData) no matter which key were passed in. That's what happens because myClassInstance.myMethod is seen having a union type: const x = myClassInstance.myMethod; // const x: ((data: A | C) => A) | ((data: B | C) => B) And if you call a union of functions, you have to pass in an intersection of its parameters for the compiler to accept it. Of course, it's impossible for someone to pass in myData and key that are not properly matched. (Actually that's not true, since key could be of a union type. Without ms/TS#27808 it is possible. Let's say that it's possible but unlikely. Or at least that we won't worry about it here.) But the compiler can't see this. There isn't really support for the compiler to see arbitrarily complex correlations between different types. The basic issue is described in microsoft/TypeScript#30581 which frames it in terms of correlated unions. Luckily, there is often a way to refactor the types in a way that the compiler is able to follow the logic. This is described in microsoft/TypeScript#47109. The idea is to make a "basic" type that represents your input/output relationship as simply as possible: interface MyClassGenerics { a: A; b: B } And then we write MyClasses explicitly as a mapped type over that basic type: type MyClasses = { [K in keyof MyClassGenerics]: MyClass<MyClassGenerics[K]> } So now, when you write MyClasses[K], the compiler will automatically see its relationship to MyClass<MyClassGenerics[K]>. And you can refer to MyClassGenerics[K] without needing to use MyClassInferGenericType: public callerMethod<K extends MyClassesKeys>(key: K, myData: MyClassGenerics[K]) { const myClassInstance = this.getMyClass(key); const x = myClassInstance.myMethod // const x: (data: C | MyClassGenerics[K]) => MyClassGenerics[K] myClassInstance.myMethod(myData); // okay } Now the type of myClassInstance.myMethod is seen to be a single function type whose input and output types depend on the generic type parameter K. It's no longer a union, so it can be called more easily. Playground link to code
Tricky typing problems with a generic union type doing an intersection type
I have stumbled upon a peculiar problem, and I have no idea how to fix this. I have a class with a generic type. This class contains a method with only one union parameters asking for the generic type or an object. Since this class is instantiated multiple times with different types, I wanted to create a generic method that retrieves one of those instances (generically typed), and make a call of the method within. Here's a code for reproduction: export interface A { a: any; } export interface B { b: any; } export interface MyClasses { a: MyClass<A>; b: MyClass<B>; } export interface C { c: any; } export declare class MyClass<T = { [prop: string]: any }> { myMethod(data: T | C): T; } export type MyClassesKeys = keyof MyClasses; export type MyClassInferGenericType<T> = T extends MyClass<infer G> ? G : T; export class MyService { private myClasses: MyClasses = { a: new MyClass<A>(), b: new MyClass<B>() }; public getMyClass<T extends MyClassesKeys>(keyName: T): MyClasses[T] { return this.myClasses[keyName]; } public callerMethod<T extends MyClassesKeys, U extends MyClassInferGenericType<MyClasses[T]>>(key: T, myData: U) { // This part is usually retrieved gener const myClassInstance = this.getMyClass(key); // My call myClassInstance.myMethod(myData); } } This returns, somehow, a compilation error: Argument of type 'A | B' is not assignable to parameter of type '(A | C) & (B | C)'. Type 'A' is not assignable to type '(A | C) & (B | C)'. Type 'A' is not assignable to type 'A & C'. Property 'c' is missing in type 'A' but required in type 'C'. With "myClassInstance" being of type MyClasses[T] and myData taking the generic type associated with MyClasses[T], everything should be correctly inferred. So, why is typescript trying to do an intersection of my types?
[ "The reason you get an intersection is because the compiler loses track of the correlation between the type of key and the type of myData, and so it only accepts something it knows is safe. If myData were both A and B (like {a: 1, b: 2}), then it would be safe to call myClassInstance.myMethod(myData) no matter which key were passed in.\nThat's what happens because myClassInstance.myMethod is seen having a union type:\nconst x = myClassInstance.myMethod;\n// const x: ((data: A | C) => A) | ((data: B | C) => B)\n\nAnd if you call a union of functions, you have to pass in an intersection of its parameters for the compiler to accept it.\nOf course, it's impossible for someone to pass in myData and key that are not properly matched. (Actually that's not true, since key could be of a union type. Without ms/TS#27808 it is possible. Let's say that it's possible but unlikely. Or at least that we won't worry about it here.) But the compiler can't see this.\nThere isn't really support for the compiler to see arbitrarily complex correlations between different types. The basic issue is described in microsoft/TypeScript#30581 which frames it in terms of correlated unions.\n\nLuckily, there is often a way to refactor the types in a way that the compiler is able to follow the logic. This is described in microsoft/TypeScript#47109. The idea is to make a \"basic\" type that represents your input/output relationship as simply as possible:\ninterface MyClassGenerics {\n a: A;\n b: B\n}\n\nAnd then we write MyClasses explicitly as a mapped type over that basic type:\ntype MyClasses = {\n [K in keyof MyClassGenerics]: MyClass<MyClassGenerics[K]>\n}\n\nSo now, when you write MyClasses[K], the compiler will automatically see its relationship to MyClass<MyClassGenerics[K]>. And you can refer to MyClassGenerics[K] without needing to use MyClassInferGenericType:\npublic callerMethod<K extends MyClassesKeys>(key: K, myData: MyClassGenerics[K]) {\n const myClassInstance = this.getMyClass(key);\n\n const x = myClassInstance.myMethod\n // const x: (data: C | MyClassGenerics[K]) => MyClassGenerics[K]\n\n myClassInstance.myMethod(myData); // okay\n}\n\nNow the type of myClassInstance.myMethod is seen to be a single function type whose input and output types depend on the generic type parameter K. It's no longer a union, so it can be called more easily.\nPlayground link to code\n" ]
[ 2 ]
[]
[]
[ "typescript", "typescript_generics" ]
stackoverflow_0074680936_typescript_typescript_generics.txt
Q: How to convert a MYSQL database to an Oracle database I am editing a MYSQL database using phpMyAdmin. I want to turn this into an Oracle database. How can this be done? A: Use mysqldump to export your data from MySQL. shell> mysqldump [options] db_name [tbl_name ...] In the [options] you'll probably have to tell MySQL to export your database in a format that is recognizable by Oracle. You can to this with the --compatible=name option, where name can be oracle. shell> mysqldump --compatible=oracle [options] db_name [tbl_name ...] After this you import the data by executing the script (in the dump) in Oracle and hope there won't be any errors. Or use something like Oracle's Sql*Loader. (I don't have experience with that, however I've found an article that describes your scenario.) (I've found a tutorial on using phpMyAdmin to do something similar. Maybe you're interested in it.) Update The --compatible option might not be supported for your particular version of MySQL. For example the documentation for MySQL 5.5 lists oracle as a supported value for this parameter, but the documentation for MySQL 8.0 does not. A: This is old topic, but for those who are still seeking answers on how to convert MySQL database to Oracle, use SQLines (SQL Developer failed on migration). How to do it: Skip if you have .sql script of your database. Use mysqldump to extract MySQL database: mysqldump -u _user_ -R _your_database_ > path_to_extracting_file.sql Use then SQLines to convert MySQL to Oracle: https://www.sqlines.com/online You will have to change some data manually, but in most cases it will do the job. You can also try: https://www.oracle.com/database/technologies/migrating-mysql-oracle-database.html, but you will need Third-party driver for MySQL. Personally, I had to do SQL Developer Migration + SQLines + manual editing to do successful migration.
How to convert a MYSQL database to an Oracle database
I am editing a MYSQL database using phpMyAdmin. I want to turn this into an Oracle database. How can this be done?
[ "Use mysqldump to export your data from MySQL.\nshell> mysqldump [options] db_name [tbl_name ...]\n\nIn the [options] you'll probably have to tell MySQL to export your database in a format that is recognizable by Oracle. You can to this with the --compatible=name option, where name can be oracle.\nshell> mysqldump --compatible=oracle [options] db_name [tbl_name ...]\n\nAfter this you import the data by executing the script (in the dump) in Oracle and hope there won't be any errors. Or use something like Oracle's Sql*Loader. (I don't have experience with that, however I've found an article that describes your scenario.)\n(I've found a tutorial on using phpMyAdmin to do something similar. Maybe you're interested in it.)\nUpdate\nThe --compatible option might not be supported for your particular version of MySQL. For example the documentation for MySQL 5.5 lists oracle as a supported value for this parameter, but the documentation for MySQL 8.0 does not.\n", "This is old topic, but for those who are still seeking answers on how to convert MySQL database to Oracle, use SQLines (SQL Developer failed on migration). How to do it:\nSkip if you have .sql script of your database.\nUse mysqldump to extract MySQL database:\nmysqldump -u _user_ -R _your_database_ > path_to_extracting_file.sql\nUse then SQLines to convert MySQL to Oracle: https://www.sqlines.com/online\nYou will have to change some data manually, but in most cases it will do the job. You can also try: https://www.oracle.com/database/technologies/migrating-mysql-oracle-database.html, but you will need Third-party driver for MySQL. Personally, I had to do SQL Developer Migration + SQLines + manual editing to do successful migration.\n" ]
[ 4, 0 ]
[]
[]
[ "database", "mysql", "oracle10g", "sqlplus" ]
stackoverflow_0008395612_database_mysql_oracle10g_sqlplus.txt
Q: Images take -1 second to load in Flutter I'm using precacheImage in my app to load faster my assets. Despite this, my first screen images take less than a second to show, causing a bad user experience. I think that this happens because I'm calling precacheImage in the same screen where I have to show some of the images, but this i the very first screen of the app. How can i avoid this behaviour? Is there a way to cache the images for the next app opens in order not to wait for them each time the user open the app? A: Yes, there is a way to cache the images for the next time the app is opened in Flutter. This can be done using the flutter_cache_manager package, which provides caching functionality for images and other files. To use this package, first add it to your pubspec.yaml file: dependencies: flutter_cache_manager: ^1.3.1 Then, import the package and use the CachedNetworkImage widget to display the images in your app. This widget automatically uses the cache manager to cache the images, so they will be loaded faster the next time the app is opened. Here is an example of how you can use the CachedNetworkImage widget to display images in your app: import 'package:flutter_cache_manager/flutter_cache_manager.dart'; import 'package:flutter/material.dart'; void main() => runApp(MyApp()); class MyApp extends StatelessWidget { @override Widget build(BuildContext context) { return MaterialApp( home: Scaffold( body: Center( child: CachedNetworkImage( imageUrl: 'https://example.com/my_image.png', placeholder: (context, url) => CircularProgressIndicator(), errorWidget: (context, url, error) => Icon(Icons.error), ), ), ), ); } } In the example code above, the CachedNetworkImage widget is used to display the image at the URL https://example.com/my_image.png. This widget automatically uses the cache manager to cache the image, so it will be loaded faster the next time the app is opened. The placeholder and errorWidget properties of the CachedNetworkImage widget are used to specify what should be displayed while the image is being loaded, and in case of an error, respectively. In this example, a circular progress indicator is shown while the image is being loaded, and an error icon is shown if there is an error loading the image. You can use the CachedNetworkImage widget in the same way to display any image that you want to cache for faster loading the next time the app is opened.
Images take -1 second to load in Flutter
I'm using precacheImage in my app to load faster my assets. Despite this, my first screen images take less than a second to show, causing a bad user experience. I think that this happens because I'm calling precacheImage in the same screen where I have to show some of the images, but this i the very first screen of the app. How can i avoid this behaviour? Is there a way to cache the images for the next app opens in order not to wait for them each time the user open the app?
[ "Yes, there is a way to cache the images for the next time the app is opened in Flutter. This can be done using the flutter_cache_manager package, which provides caching functionality for images and other files.\nTo use this package, first add it to your pubspec.yaml file:\ndependencies:\n flutter_cache_manager: ^1.3.1\n\nThen, import the package and use the CachedNetworkImage widget to display the images in your app. This widget automatically uses the cache manager to cache the images, so they will be loaded faster the next time the app is opened.\nHere is an example of how you can use the CachedNetworkImage widget to display images in your app:\nimport 'package:flutter_cache_manager/flutter_cache_manager.dart';\nimport 'package:flutter/material.dart';\n\nvoid main() => runApp(MyApp());\n\nclass MyApp extends StatelessWidget {\n @override\n Widget build(BuildContext context) {\n return MaterialApp(\n home: Scaffold(\n body: Center(\n child: CachedNetworkImage(\n imageUrl: 'https://example.com/my_image.png',\n placeholder: (context, url) => CircularProgressIndicator(),\n errorWidget: (context, url, error) => Icon(Icons.error),\n ),\n ),\n ),\n );\n }\n}\n\nIn the example code above, the CachedNetworkImage widget is used to display the image at the URL https://example.com/my_image.png. This widget automatically uses the cache manager to cache the image, so it will be loaded faster the next time the app is opened.\nThe placeholder and errorWidget properties of the CachedNetworkImage widget are used to specify what should be displayed while the image is being loaded, and in case of an error, respectively. In this example, a circular progress indicator is shown while the image is being loaded, and an error icon is shown if there is an error loading the image.\nYou can use the CachedNetworkImage widget in the same way to display any image that you want to cache for faster loading the next time the app is opened.\n" ]
[ 0 ]
[]
[]
[ "flutter" ]
stackoverflow_0074680890_flutter.txt
Q: I can't deploy my react nodejs application on the internet I have developed an application on React and NodeJs, everything works locally (localhost), but I do not understand how to deploy my application on the internet. Either on Cpanel (namecheap) or on netlify for example. I tried different ways, I think my problem is either the organization of my files or the scripts in my package.json file. Can someone tell me the best way to deploy my application on the internet? Thanks in advance Three of my file I have tried different ways of organizing my files, server part and client part in the same folder, server part and client part in separate folders, to modify the scripts calls in .json package I tried to deploy in a private hosting on CPanel All put in a zip, upload my files on mywebsite.com/myApp extract files, setup a nodejs app, give the path, and start the npm and js script but it doesn't work ... the only things i have is this is the result of my try to upload on CPanel that's my package.json code : ` { "name": "react-node-app", "version": "1.0.0", "description": "", "main": "index.js", "scripts": { "start": "node server/index.js", "build": "cd client && npm install && npm run build" }, "engines": { "node": "16.14.2" }, "keywords": [], "author": "", "license": "ISC", "dependencies": { "body-parser": "^1.20.1", "express": "^4.18.2", "n-readlines": "^1.0.1" } } ` A: Does your server run on a port? If so you need to add some extra configuration code in a file .htaccess DirectoryIndex disabled RewriteEngine On RewriteRule ^$ http://127.0.0.1:30000/ [P,L] RewriteCond %{REQUEST_FILENAME} !-f RewriteCond %{REQUEST_FILENAME} !-d RewriteRule ^(.*)$ http://127.0.0.1:30000/$1 [P,L] Please see this video for steps https://youtu.be/sIcy3q3Ib_s *If you are using a private VPN then you just need to skip the Cron Job section.
I can't deploy my react nodejs application on the internet
I have developed an application on React and NodeJs, everything works locally (localhost), but I do not understand how to deploy my application on the internet. Either on Cpanel (namecheap) or on netlify for example. I tried different ways, I think my problem is either the organization of my files or the scripts in my package.json file. Can someone tell me the best way to deploy my application on the internet? Thanks in advance Three of my file I have tried different ways of organizing my files, server part and client part in the same folder, server part and client part in separate folders, to modify the scripts calls in .json package I tried to deploy in a private hosting on CPanel All put in a zip, upload my files on mywebsite.com/myApp extract files, setup a nodejs app, give the path, and start the npm and js script but it doesn't work ... the only things i have is this is the result of my try to upload on CPanel that's my package.json code : ` { "name": "react-node-app", "version": "1.0.0", "description": "", "main": "index.js", "scripts": { "start": "node server/index.js", "build": "cd client && npm install && npm run build" }, "engines": { "node": "16.14.2" }, "keywords": [], "author": "", "license": "ISC", "dependencies": { "body-parser": "^1.20.1", "express": "^4.18.2", "n-readlines": "^1.0.1" } } `
[ "Does your server run on a port?\nIf so you need to add some extra configuration code in a file .htaccess\n DirectoryIndex disabled\n RewriteEngine On\n RewriteRule ^$ http://127.0.0.1:30000/ [P,L]\n RewriteCond %{REQUEST_FILENAME} !-f\n RewriteCond %{REQUEST_FILENAME} !-d\n RewriteRule ^(.*)$ http://127.0.0.1:30000/$1 [P,L]\n\nPlease see this video for steps https://youtu.be/sIcy3q3Ib_s\n*If you are using a private VPN then you just need to skip the Cron Job section.\n" ]
[ 0 ]
[]
[]
[ "cpanel", "node.js", "reactjs", "web", "web_deployment" ]
stackoverflow_0074665417_cpanel_node.js_reactjs_web_web_deployment.txt
Q: How to change iPython error highlighting color I'm using iPython with iterm2 in macOS. I had never had issues before with the color scheme, but this time when an exception occurs, it highlights certain parts in a color combination that I find very hard to read. I've tried with different color setups in iterm and also adjusting highlighting_style and colors in the ipython_config.py file, without much luck. I've seen there is an option to set specific colors highlighting_style_overrides but I haven't been lucky finding the right pygments option for this. See Position below. This is the best contrast setup I've achieved, I still find hard it to read without focusing. A: There is an open issue regarding this: https://github.com/ipython/ipython/issues/13446 Here is the commit which introduced this change: https://github.com/ipython/ipython/commit/3026c205487897f6874b2ff580fe0be33e36e033 To get the file path on your system, run the following: import IPython, os os.path.join(os.path.dirname(IPython.__file__), 'core/ultratb.py') Finally, open the file and look for: style = stack_data.style_with_executing_node(style, "bg:ansiyellow") For the time being, you can manually patch it by changing bg:ansiyellow to something that works best given your color scheme, e.g. bg:ansired or bg:#ff0000. A: Here's an option you can drop in your ipython_config.py to duck punch in a better background color: try: from IPython.core import ultratb ultratb.VerboseTB._tb_highlight = "bg:ansired" except Exception: print("Error patching background color for tracebacks, they'll be the ugly default instead") Verified to work with IPython version 8.7.0
How to change iPython error highlighting color
I'm using iPython with iterm2 in macOS. I had never had issues before with the color scheme, but this time when an exception occurs, it highlights certain parts in a color combination that I find very hard to read. I've tried with different color setups in iterm and also adjusting highlighting_style and colors in the ipython_config.py file, without much luck. I've seen there is an option to set specific colors highlighting_style_overrides but I haven't been lucky finding the right pygments option for this. See Position below. This is the best contrast setup I've achieved, I still find hard it to read without focusing.
[ "There is an open issue regarding this: https://github.com/ipython/ipython/issues/13446\n\nHere is the commit which introduced this change:\nhttps://github.com/ipython/ipython/commit/3026c205487897f6874b2ff580fe0be33e36e033\nTo get the file path on your system, run the following:\nimport IPython, os\nos.path.join(os.path.dirname(IPython.__file__), 'core/ultratb.py')\n\nFinally, open the file and look for:\nstyle = stack_data.style_with_executing_node(style, \"bg:ansiyellow\")\n\nFor the time being, you can manually patch it by changing bg:ansiyellow to something that works best given your color scheme, e.g. bg:ansired or bg:#ff0000.\n", "Here's an option you can drop in your ipython_config.py to duck punch in a better background color:\ntry:\n from IPython.core import ultratb\n ultratb.VerboseTB._tb_highlight = \"bg:ansired\"\nexcept Exception:\n print(\"Error patching background color for tracebacks, they'll be the ugly default instead\")\n\nVerified to work with IPython version 8.7.0\n" ]
[ 13, 0 ]
[]
[]
[ "ipython" ]
stackoverflow_0070766518_ipython.txt
Q: Replace word in package names and variables in code In my android project I got very intericting task My company wants to hide all mintions about her in code (variables names, packages and etc) But only for one flavour, so I cannot do it once for all project. My first idea, was writing a simple gradle task, that will replace all strings that fit in code, but in this case package names will remain unchanged. Secondly, since we have ci on Jenkins, I thought about jenkins script, that will rename all files and its content, if it has keyword. But this solution looks very bulky for me. Maybe there is another, elegant way? A: Replacing the package name/variable names blindly seems a bit risky, as it could replace other overlapping strings as well, which may lead to various issues. Assuming your package name is unique and you don't have any overlapping names and it doesn't result in any directory name changes, you can use different methods to replace the package name. Option 1 Using shell to achieve this. There are plenty of different ways to do this, following is one option with grep and sed sh ''' grep -rl ${PACKAGE_NAME_TO_REPLACE} ${DESTINATION_DIR} | xargs sed -i "s&${PACKAGE_NAME_TO_REPLACE}&${PACKAGE_NAME_NEW}&g" ''' You can take a look at this and this to understand different methods you can use. Option 2 If you want a more controlled approach, you can achieve this with some groovy code as well. Simply run the following within your Pipeline. def dirToSearchIn = "/where/to/replace" // Change the content on specific files. You can improve the regex pattern below to fine-tune it. With the following pattern only files with extensions .java and .md will be changed. def filterFilePattern = ~/.*\.java|.*\.md$/ def oldString = "replaceme" def newString = "newme" new File(dirToSearchIn).traverse(type: groovy.io.FileType.FILES, nameFilter: filterFilePattern) { file -> println "Processing file: " + file.getPath() def fileContent = file.text; if (fileContent.contains(oldString)) { println "Replacing the content of the file: " + file.getPath() file.write(fileContent.replaceAll(oldString, newString)); } else { println "Skipping file: " + file.getPath() } } A: #!/bin/bash # Replace all instances of "old_string" with "new_string" in the current directory and subdirectories find . -type f -exec sed -i "s/old_string/new_string/g" {} + This script uses the find command to search for all files in the current directory and its subdirectories, and the sed command to perform the string replacement on each file. Note that this script will only work for plain text files, and may not work for binary files. You may need to adjust the script to exclude certain file types or directories from the search. Additionally, you may want to add more functionality to the script, such as prompting the user for the old and new strings, or providing options to customize the search and replace behavior. You can refer to the find and sed documentation for more information on how to do this. A: Adding something like the following to your top-level build.gradle file should do the trick (assuming your company is called β€œACME” and you rather want it to be called β€œfoobar”): def duplicateProjDirName = 'duplicateProj' def duplicateProjDir = project.layout.buildDirectory.dir(duplicateProjDirName) def duplicateProj = tasks.register('createDuplicateProject', Copy) { enabled = (projectDir.name != duplicateProjDirName) from(project.layout.projectDirectory) into(duplicateProjDir) exclude('build', '.gradle') def acmePattern = /(?i)acme/ def newCompanyName = 'foobar' eachFile { it.path = it.sourcePath.replaceAll(acmePattern, newCompanyName) } filter { it.replaceAll(acmePattern, newCompanyName) } includeEmptyDirs = false } def duplicateBuild = tasks.register('buildDuplicateProject', GradleBuild) { enabled = (projectDir.name != duplicateProjDirName) dependsOn(duplicateProj) dir = duplicateProjDir tasks = ['build'] } tasks.named('build').configure { dependsOn(duplicateBuild) } This essentially adds two tasks to the project: createDuplicateProject duplicates the project under build/duplicateProj/ with all mentions of β€œACME” replaced with β€œfoobar”. It also takes care of renaming files/directories (in contrast to the solutions in other answers so far). buildDuplicateProject builds the duplicate project. While this may work in basic scenarios (I’ve successfully tested it with a small dummy Java project and Gradle 7.6), there are some edge cases to think about: There may be dependencies (libraries, services, etc.) which contain the company name and which won’t work anymore after they’ve been renamed. This way of replacing may not work well for binary files. does not catch occurrences in code such as "AC" + "ME". case-insensitively may lead to weird-looking names that don’t follow common conventions. In the worst case, this could lead to different behavior, too. There may be downstream projects which depend on package names or the like that are renamed here. Your company may not only be identifiable by name, e.g., there may also be logos in image files, etc. and probably others
Replace word in package names and variables in code
In my android project I got very intericting task My company wants to hide all mintions about her in code (variables names, packages and etc) But only for one flavour, so I cannot do it once for all project. My first idea, was writing a simple gradle task, that will replace all strings that fit in code, but in this case package names will remain unchanged. Secondly, since we have ci on Jenkins, I thought about jenkins script, that will rename all files and its content, if it has keyword. But this solution looks very bulky for me. Maybe there is another, elegant way?
[ "Replacing the package name/variable names blindly seems a bit risky, as it could replace other overlapping strings as well, which may lead to various issues. Assuming your package name is unique and you don't have any overlapping names and it doesn't result in any directory name changes, you can use different methods to replace the package name.\nOption 1\nUsing shell to achieve this. There are plenty of different ways to do this, following is one option with grep and sed\nsh '''\ngrep -rl ${PACKAGE_NAME_TO_REPLACE} ${DESTINATION_DIR} | xargs sed -i \"s&${PACKAGE_NAME_TO_REPLACE}&${PACKAGE_NAME_NEW}&g\"\n'''\n\nYou can take a look at this and this to understand different methods you can use.\nOption 2\nIf you want a more controlled approach, you can achieve this with some groovy code as well. Simply run the following within your Pipeline.\ndef dirToSearchIn = \"/where/to/replace\"\n\n// Change the content on specific files. You can improve the regex pattern below to fine-tune it. With the following pattern only files with extensions .java and .md will be changed. \ndef filterFilePattern = ~/.*\\.java|.*\\.md$/\n\ndef oldString = \"replaceme\"\ndef newString = \"newme\"\n\nnew File(dirToSearchIn).traverse(type: groovy.io.FileType.FILES, nameFilter: filterFilePattern) { file ->\n println \"Processing file: \" + file.getPath()\n def fileContent = file.text;\n if (fileContent.contains(oldString)) {\n println \"Replacing the content of the file: \" + file.getPath()\n file.write(fileContent.replaceAll(oldString, newString));\n } else {\n println \"Skipping file: \" + file.getPath()\n }\n}\n\n", "#!/bin/bash\n\n# Replace all instances of \"old_string\" with \"new_string\" in the current directory and subdirectories\nfind . -type f -exec sed -i \"s/old_string/new_string/g\" {} +\n\nThis script uses the find command to search for all files in the current directory and its subdirectories, and the sed command to perform the string replacement on each file.\nNote that this script will only work for plain text files, and may not work for binary files. You may need to adjust the script to exclude certain file types or directories from the search.\nAdditionally, you may want to add more functionality to the script, such as prompting the user for the old and new strings, or providing options to customize the search and replace behavior. You can refer to the find and sed documentation for more information on how to do this.\n", "Adding something like the following to your top-level build.gradle file should do the trick (assuming your company is called β€œACME” and you rather want it to be called β€œfoobar”):\ndef duplicateProjDirName = 'duplicateProj'\ndef duplicateProjDir = project.layout.buildDirectory.dir(duplicateProjDirName)\n\ndef duplicateProj = tasks.register('createDuplicateProject', Copy) {\n enabled = (projectDir.name != duplicateProjDirName)\n\n from(project.layout.projectDirectory)\n into(duplicateProjDir)\n exclude('build', '.gradle')\n\n def acmePattern = /(?i)acme/\n def newCompanyName = 'foobar'\n eachFile { it.path = it.sourcePath.replaceAll(acmePattern, newCompanyName) }\n filter { it.replaceAll(acmePattern, newCompanyName) }\n includeEmptyDirs = false\n}\n\ndef duplicateBuild = tasks.register('buildDuplicateProject', GradleBuild) {\n enabled = (projectDir.name != duplicateProjDirName)\n\n dependsOn(duplicateProj)\n\n dir = duplicateProjDir\n tasks = ['build']\n}\n\ntasks.named('build').configure {\n dependsOn(duplicateBuild)\n}\n\nThis essentially adds two tasks to the project:\n\ncreateDuplicateProject duplicates the project under build/duplicateProj/ with all mentions of β€œACME” replaced with β€œfoobar”. It also takes care of renaming files/directories (in contrast to the solutions in other answers so far).\nbuildDuplicateProject builds the duplicate project.\n\n\nWhile this may work in basic scenarios (I’ve successfully tested it with a small dummy Java project and Gradle 7.6), there are some edge cases to think about:\n\nThere may be dependencies (libraries, services, etc.) which contain the company name and which won’t work anymore after they’ve been renamed.\nThis way of replacing\n\nmay not work well for binary files.\ndoes not catch occurrences in code such as \"AC\" + \"ME\".\ncase-insensitively may lead to weird-looking names that don’t follow common conventions. In the worst case, this could lead to different behavior, too.\n\n\nThere may be downstream projects which depend on package names or the like that are renamed here.\nYour company may not only be identifiable by name, e.g., there may also be logos in image files, etc.\nand probably others\n\n" ]
[ 1, 0, 0 ]
[]
[]
[ "android", "gradle", "jenkins", "jenkins_groovy" ]
stackoverflow_0074571088_android_gradle_jenkins_jenkins_groovy.txt
Q: Firebase emulator token revoked I have been using the Firebase Emulator for quite some time now and everthing have been working fine. Recenttly, I set up some seed data with a user in the seed as well. My problem is that each time I start the emulator (with the seed) and I try to execute an action, the auth.verifyIdToken(token, true) throws an error: The Firebase ID token has been revoked. Do I need to reconnect each time I start the emulator or is it a bug ? A: If the auth.verifyIdToken(token, true) method is throwing an error saying that the Firebase ID token has been revoked, it means that the ID token is no longer valid and can no longer be used to authenticate the user. There are a few possible reasons why this could happen: The user's account may have been deleted or disabled, in which case the ID token will no longer be valid. The ID token may have expired. ID tokens have a limited lifespan and will no longer be valid after a certain amount of time. The ID token may have been manually revoked by an administrator. If you are using the Firebase Emulator, it is possible that the seed data you have set up is causing the issue. If you are using an ID token that was generated by the seed data, it is possible that the token has been revoked as part of the seed data setup. To fix this issue, you will need to obtain a new ID token for the user and use that to authenticate the user. You can do this by using the signInWithEmailAndPassword method to sign the user in, and then using the getIdToken method to get a new ID token for the user. For example: // Sign the user in with their email and password. const user = await firebase.auth().signInWithEmailAndPassword(email, password); // Get a new ID token for the user. const idToken = await user.getIdToken(true); // Use the ID token to authenticate the user. const decodedToken = await firebase.auth().verifyIdToken(idToken); Once you have obtained a new ID token, you should be able to use it to authenticate the user without any issues.
Firebase emulator token revoked
I have been using the Firebase Emulator for quite some time now and everthing have been working fine. Recenttly, I set up some seed data with a user in the seed as well. My problem is that each time I start the emulator (with the seed) and I try to execute an action, the auth.verifyIdToken(token, true) throws an error: The Firebase ID token has been revoked. Do I need to reconnect each time I start the emulator or is it a bug ?
[ "If the auth.verifyIdToken(token, true) method is throwing an error saying that the Firebase ID token has been revoked, it means that the ID token is no longer valid and can no longer be used to authenticate the user.\nThere are a few possible reasons why this could happen:\n\nThe user's account may have been deleted or disabled, in which case\nthe ID token will no longer be valid.\nThe ID token may have expired.\nID tokens have a limited lifespan and will no longer be valid after\na certain amount of time.\nThe ID token may have been manually\nrevoked by an administrator.\n\nIf you are using the Firebase Emulator, it is possible that the seed data you have set up is causing the\nissue. If you are using an ID token that was generated by the seed\ndata, it is possible that the token has been revoked as part of the\nseed data setup.\nTo fix this issue, you will need to obtain a new ID token for the user and use that to authenticate the user. You can do this by using the signInWithEmailAndPassword method to sign the user in, and then using the getIdToken method to get a new ID token for the user.\nFor example:\n// Sign the user in with their email and password.\nconst user = await firebase.auth().signInWithEmailAndPassword(email, password);\n\n// Get a new ID token for the user.\nconst idToken = await user.getIdToken(true);\n\n// Use the ID token to authenticate the user.\nconst decodedToken = await firebase.auth().verifyIdToken(idToken);\n\nOnce you have obtained a new ID token, you should be able to use it to authenticate the user without any issues.\n" ]
[ 0 ]
[]
[]
[ "firebase", "firebase_authentication", "firebase_tools", "javascript" ]
stackoverflow_0074649632_firebase_firebase_authentication_firebase_tools_javascript.txt
Q: Difference between (function designator) and {function pointer}? In clang, a pointer to a function and the designation/designator have distinct object grouping symbols, parenthesis compared to curly braces. Why does the initial have parenthesis enclosing the symbol whereas the second symbol uses curly braces? For example, the following code fragment when compiled, . (*function)(1); mentions the symbol in the comments after compiling is noted as: (*type *(*)(type)) with parenthesis in the compilation log of the console. . whereas: . function(1); mentions the symbol in the comments after compiling is noted as: Note: {type *(type)} with curly braces in the compilation log of the console. . What is the distinction between the two for, and under what circumstances is it practically applied in any usual/useful contexts? My general observations of the syntax and semantics of the C language suggest to me the first appears to be a basic ordered-group expression which may be used in a comma-separated list or as a function parameter whereas the second appears to be a struct, union or block statement. I was exploring function pointers and so I tried both and those two notes were clang's comments of the compilation. None of the other 5 posts I read seemed to clarify the nature of the grouping symbols in any context but did provided additional insight to function pointers and their applications. Thank you! A: function designator is an expression with function type. for example in a = foo(x); foo is a function designator. Function pointer is a reference to the function. Function designators decay to function pointers when used as values. int (*fptr)(int) = foo;
Difference between (function designator) and {function pointer}?
In clang, a pointer to a function and the designation/designator have distinct object grouping symbols, parenthesis compared to curly braces. Why does the initial have parenthesis enclosing the symbol whereas the second symbol uses curly braces? For example, the following code fragment when compiled, . (*function)(1); mentions the symbol in the comments after compiling is noted as: (*type *(*)(type)) with parenthesis in the compilation log of the console. . whereas: . function(1); mentions the symbol in the comments after compiling is noted as: Note: {type *(type)} with curly braces in the compilation log of the console. . What is the distinction between the two for, and under what circumstances is it practically applied in any usual/useful contexts? My general observations of the syntax and semantics of the C language suggest to me the first appears to be a basic ordered-group expression which may be used in a comma-separated list or as a function parameter whereas the second appears to be a struct, union or block statement. I was exploring function pointers and so I tried both and those two notes were clang's comments of the compilation. None of the other 5 posts I read seemed to clarify the nature of the grouping symbols in any context but did provided additional insight to function pointers and their applications. Thank you!
[ "function designator is an expression with function type.\nfor example in\na = foo(x);\n\nfoo is a function designator.\nFunction pointer is a reference to the function. Function designators decay to function pointers when used as values.\nint (*fptr)(int) = foo;\n\n" ]
[ 0 ]
[]
[]
[ "c", "function_pointers", "grouping", "struct" ]
stackoverflow_0074681176_c_function_pointers_grouping_struct.txt
Q: Adding type class to the function defenition doesn't help in Haskell In the line eval :: (Enum Instruction) => [Instruction] -> [Value] -> [Value] eval inst mem = evalSecond (evalStep ((fromList Halt inst), (fromList 0 mem), (0,0,0,0), (ZeroFlag, EvenFlag))) where fromList is fromList ::(Enum a) => a -> [a] -> Tape a fromList a xs = (Tape 0 inf (xs ++ inf)) where inf = [a, a..] I'm getting this error when calling the function eval: β€’ No instance for (Enum Instruction) arising from a use of β€˜eval’ β€’ In the expression: eval [Test M, Halt] [32, 123, 0] In an equation for β€˜it’: it = eval [Test M, Halt] [32, 123, 0] Can someone explain me please? I can't understand why it asks for Enum Instruction when it is already there A: In eval you are requiring that the Instruction type is an instance of the Enum type class, but no such instance definition can be found. Thus, eval cannot be called. In order to make Instruction an instance of Enum, you have to provide definitions for toEnum :: Int -> Instruction and fromEnum :: Instruction -> Int: instance Enum Instruction where toEnum _ = _ -- Replace with your actual code fromEnum _ = _ Of course, you want toEnum . fromEnum to be equal to id (but this is not necessarily true for fromEnum . toEnum, as that function might not be total.) If your data type is actually a sum of nullary constructors, you can make Haskell write those functions for you by either writing deriving Enum directly after the data type definition or by writing deriving instance Enum Instruction somewhere else. For other data types, there might be different mechanisms.
Adding type class to the function defenition doesn't help in Haskell
In the line eval :: (Enum Instruction) => [Instruction] -> [Value] -> [Value] eval inst mem = evalSecond (evalStep ((fromList Halt inst), (fromList 0 mem), (0,0,0,0), (ZeroFlag, EvenFlag))) where fromList is fromList ::(Enum a) => a -> [a] -> Tape a fromList a xs = (Tape 0 inf (xs ++ inf)) where inf = [a, a..] I'm getting this error when calling the function eval: β€’ No instance for (Enum Instruction) arising from a use of β€˜eval’ β€’ In the expression: eval [Test M, Halt] [32, 123, 0] In an equation for β€˜it’: it = eval [Test M, Halt] [32, 123, 0] Can someone explain me please? I can't understand why it asks for Enum Instruction when it is already there
[ "In eval you are requiring that the Instruction type is an instance of the Enum type class, but no such instance definition can be found. Thus, eval cannot be called.\nIn order to make Instruction an instance of Enum, you have to provide definitions for toEnum :: Int -> Instruction and fromEnum :: Instruction -> Int:\ninstance Enum Instruction where\n toEnum _ = _ -- Replace with your actual code\n fromEnum _ = _\n\nOf course, you want toEnum . fromEnum to be equal to id (but this is not necessarily true for fromEnum . toEnum, as that function might not be total.)\nIf your data type is actually a sum of nullary constructors, you can make Haskell write those functions for you by either writing\nderiving Enum\n\ndirectly after the data type definition or by writing\nderiving instance Enum Instruction\n\nsomewhere else. For other data types, there might be different mechanisms.\n" ]
[ 1 ]
[]
[]
[ "enums", "haskell", "typeclass" ]
stackoverflow_0074680532_enums_haskell_typeclass.txt
Q: Push Notifications not working when Android inactive I developed a PWA that subscribes to a push notification, and when a notification is received the Service Worker (SW) does a showNotification and a postMessage to each client, triggering them to update a display field in the browser. The client I'm testing on is Android 11 on a Pixel 5a, Chrome 99.0.4844.58, and the PSA is installed as an app. Everything works fine while the phone screen is on (i.e. the notification pops up and the PWA display field is updated on the client), and it still works for several minutes after the screen times out and turns off/locks, but at some point (within about 5 minutes) the notifications no longer goes through. In the SW push event listener, I added a timestamp to the message that gets sent/displayed on the client browser, and is shows that the SW push event listener is not getting triggered until the phone is unlocked. I've turned Settings>Battery>Adaptive Battery Off, and Battery Optimization to Not Optimized in the app settings for Chrome and the PSA app. Is this the expected behavior? The intent of my PSA is for home monitoring, so it is essential that a notification occur even if the phone is "asleep". A: It is possible that this behavior is expected, as Android devices are designed to conserve battery power when the screen is turned off. This can involve suspending certain background processes, including push notifications, until the device is unlocked again. You can try a few things to see if they help resolve the issue: 1.Make sure that your PWA has been added to the list of "protected apps" in the device's battery settings. This will ensure that the app is not suspended when the screen is turned off. 2.Consider using a "background sync" in your PWA, which allows the PWA to periodically check for new notifications and display them when they are received, even if the device is in a low-power state. 3.If your PWA is installed as an "installed web app" (versus being launched from the browser), you can try enabling "Push notifications" in the app's settings. This will allow the app to receive push notifications even when the device is in a low-power state. Overall, it is important to keep in mind that the behavior of push notifications on Android devices can vary depending on the device and Android version, so it may be necessary to experiment with different settings and approaches to find a solution that works for your particular use case.
Push Notifications not working when Android inactive
I developed a PWA that subscribes to a push notification, and when a notification is received the Service Worker (SW) does a showNotification and a postMessage to each client, triggering them to update a display field in the browser. The client I'm testing on is Android 11 on a Pixel 5a, Chrome 99.0.4844.58, and the PSA is installed as an app. Everything works fine while the phone screen is on (i.e. the notification pops up and the PWA display field is updated on the client), and it still works for several minutes after the screen times out and turns off/locks, but at some point (within about 5 minutes) the notifications no longer goes through. In the SW push event listener, I added a timestamp to the message that gets sent/displayed on the client browser, and is shows that the SW push event listener is not getting triggered until the phone is unlocked. I've turned Settings>Battery>Adaptive Battery Off, and Battery Optimization to Not Optimized in the app settings for Chrome and the PSA app. Is this the expected behavior? The intent of my PSA is for home monitoring, so it is essential that a notification occur even if the phone is "asleep".
[ "It is possible that this behavior is expected, as Android devices are designed to conserve battery power when the screen is turned off. This can involve suspending certain background processes, including push notifications, until the device is unlocked again.\nYou can try a few things to see if they help resolve the issue:\n1.Make sure that your PWA has been added to the list of \"protected apps\" in the device's battery settings. This will ensure that the app is not suspended when the screen is turned off.\n2.Consider using a \"background sync\" in your PWA, which allows the PWA to periodically check for new notifications and display them when they are received, even if the device is in a low-power state.\n3.If your PWA is installed as an \"installed web app\" (versus being launched from the browser), you can try enabling \"Push notifications\" in the app's settings. This will allow the app to receive push notifications even when the device is in a low-power state.\nOverall, it is important to keep in mind that the behavior of push notifications on Android devices can vary depending on the device and Android version, so it may be necessary to experiment with different settings and approaches to find a solution that works for your particular use case.\n" ]
[ 0 ]
[]
[]
[ "progressive_web_apps", "push_notification" ]
stackoverflow_0071405598_progressive_web_apps_push_notification.txt
Q: Passing variable arguments to another function that accepts a variable argument list So I have 2 functions that both have similar arguments void example(int a, int b, ...); void exampleB(int b, ...); Now example calls exampleB, but how can I pass along the variables in the variable argument list without modifying exampleB (as this is already used elsewhere too). A: You can't do it directly; you have to create a function that takes a va_list: #include <stdarg.h> static void exampleV(int b, va_list args); void exampleA(int a, int b, ...) // Renamed for consistency { va_list args; do_something(a); // Use argument a somehow va_start(args, b); exampleV(b, args); va_end(args); } void exampleB(int b, ...) { va_list args; va_start(args, b); exampleV(b, args); va_end(args); } static void exampleV(int b, va_list args) { ...whatever you planned to have exampleB do... ...except it calls neither va_start nor va_end... } A: Maybe throwin a rock in a pond here, but it seems to work pretty OK with C++11 variadic templates: #include <stdio.h> template<typename... Args> void test(const char * f, Args... args) { printf(f, args...); } int main() { int a = 2; test("%s\n", "test"); test("%s %d %d %p\n", "second test", 2, a, &a); } At the very least, it works with g++. A: you should create versions of these functions which take a va_list, and pass those. Look at vprintf as an example: int vprintf ( const char * format, va_list arg ); A: I also wanted to wrap printf and found a helpful answer here: How to pass variable number of arguments to printf/sprintf I was not at all interested in performance (I'm sure this piece of code can be improved in a number of ways, feel free to do so :) ), this is for general debugprinting only so I did this: //Helper function std::string osprintf(const char *fmt, ...) { va_list args; char buf[1000]; va_start(args, fmt); vsnprintf(buf, sizeof(buf), fmt, args ); va_end(args); return buf; } which I then can use like this Point2d p; cout << osprintf("Point2d: (%3i, %3i)", p.x, p.y); instead of for example: cout << "Point2d: ( " << setw(3) << p.x << ", " << p.y << " )"; The c++ ostreams are beautiful in some aspects, but practically the become horrific if you want to print something like this with some small strings such as parenthesis, colons and commas inserted between the numbers. A: A possible way is to use #define: #define exampleB(int b, ...) example(0, b, __VA_ARGS__) A: It might not be exactly the same situation as described here, but if you were to define a wrapper for a string format function (e.g. logger): void logger(const char *name, const char *format, ...); void wrapper(const char *format, ...); when you implement a wrapper that calls logger, we can just create a string first with vasprintf and then pass it to logger. #include <stdio.h> #include <stdlib.h> #include <stdarg.h> static void wrapper(const char *format, ...) { char *string; va_list args; va_start(args, format); // variadic printf with allocated string. must free() vasprintf(&string, format, args); logger("wrapper", "%s", string); free(string); va_end(args); } Not the cleanest, but works. Try this when you must avoid using macro functions. A: Incidentally, many C implementations have an internal v?printf variation which IMHO should have been part of the C standard. The exact details vary, but a typical implementation will accept a struct containing a character-output function pointer and information saying what's supposed to happen. This allows printf, sprintf, and fprintf to all use the same 'core' mechanism. For example, vsprintf might be something like: void s_out(PRINTF_INFO *p_inf, char ch) { (*(p_inf->destptr)++) = ch; p_inf->result++; } int vsprintf(char *dest, const char *fmt, va_list args) { PRINTF_INFO p_inf; p_inf.destptr = dest; p_inf.result = 0; p_inf.func = s_out; core_printf(&p_inf,fmt,args); } The core_printf function then calls p_inf->func for each character to be output; the output function can then send the characters to the console, a file, a string, or something else. If one's implementation exposes the core_printf function (and whatever setup mechanism it uses) one can extend it with all sorts of variations. A: Based on the comment that you're wrapping vsprintf, and that this is tagged as C++ I'd suggest not trying to do this, but change up your interface to use C++ iostreams instead. They have advantages over the print line of functions, such as type safety and being able to print items that printf wouldn't be able to handle. Some rework now could save a significant amount of pain in the future. A: Using the new C++0x standard, you may be able to get this done using variadic templates or even convert that old code to the new template syntax without breaking anything. A: This is the only way to do it.. and the best way to do it too.. static BOOL(__cdecl *OriginalVarArgsFunction)(BYTE variable1, char* format, ...)(0x12345678); //TODO: change address lolz BOOL __cdecl HookedVarArgsFunction(BYTE variable1, char* format, ...) { BOOL res; va_list vl; va_start(vl, format); // Get variable arguments count from disasm. -2 because of existing 'format', 'variable1' uint32_t argCount = *((uint8_t*)_ReturnAddress() + 2) / sizeof(void*) - 2; printf("arg count = %d\n", argCount); // ((int( __cdecl* )(const char*, ...))&oldCode)(fmt, ...); __asm { mov eax, argCount test eax, eax je noLoop mov edx, vl loop1 : push dword ptr[edx + eax * 4 - 4] sub eax, 1 jnz loop1 noLoop : push format push variable1 //lea eax, [oldCode] // oldCode - original function pointer mov eax, OriginalVarArgsFunction call eax mov res, eax mov eax, argCount lea eax, [eax * 4 + 8] //+8 because 2 parameters (format and variable1) add esp, eax } return res; } A: Using GNU C extensions: int FIRST_FUNC(...){ __builtin_return( __builtin_apply( (void(*)())SECOND_FUNC, __builtin_apply_args(), 100)); } Also clones return value with _builtin_return.
Passing variable arguments to another function that accepts a variable argument list
So I have 2 functions that both have similar arguments void example(int a, int b, ...); void exampleB(int b, ...); Now example calls exampleB, but how can I pass along the variables in the variable argument list without modifying exampleB (as this is already used elsewhere too).
[ "You can't do it directly; you have to create a function that takes a va_list:\n#include <stdarg.h>\n\nstatic void exampleV(int b, va_list args);\n\nvoid exampleA(int a, int b, ...) // Renamed for consistency\n{\n va_list args;\n do_something(a); // Use argument a somehow\n va_start(args, b);\n exampleV(b, args);\n va_end(args);\n}\n\nvoid exampleB(int b, ...)\n{\n va_list args;\n va_start(args, b);\n exampleV(b, args);\n va_end(args);\n}\n\nstatic void exampleV(int b, va_list args)\n{\n ...whatever you planned to have exampleB do...\n ...except it calls neither va_start nor va_end...\n}\n\n", "Maybe throwin a rock in a pond here, but it seems to work pretty OK with C++11 variadic templates:\n#include <stdio.h>\n\ntemplate<typename... Args> void test(const char * f, Args... args) {\n printf(f, args...);\n}\n\nint main()\n{\n int a = 2;\n test(\"%s\\n\", \"test\");\n test(\"%s %d %d %p\\n\", \"second test\", 2, a, &a);\n}\n\nAt the very least, it works with g++.\n", "you should create versions of these functions which take a va_list, and pass those. Look at vprintf as an example:\nint vprintf ( const char * format, va_list arg );\n\n", "I also wanted to wrap printf and found a helpful answer here:\nHow to pass variable number of arguments to printf/sprintf\nI was not at all interested in performance (I'm sure this piece of code can be improved in a number of ways, feel free to do so :) ), this is for general debugprinting only so I did this:\n//Helper function\nstd::string osprintf(const char *fmt, ...)\n{\n va_list args;\n char buf[1000];\n va_start(args, fmt);\n vsnprintf(buf, sizeof(buf), fmt, args );\n va_end(args);\n return buf;\n}\n\nwhich I then can use like this\nPoint2d p;\n\ncout << osprintf(\"Point2d: (%3i, %3i)\", p.x, p.y);\ninstead of for example:\ncout << \"Point2d: ( \" << setw(3) << p.x << \", \" << p.y << \" )\";\n\nThe c++ ostreams are beautiful in some aspects, but practically the become horrific if you want to print something like this with some small strings such as parenthesis, colons and commas inserted between the numbers. \n", "A possible way is to use #define:\n#define exampleB(int b, ...) example(0, b, __VA_ARGS__)\n\n", "It might not be exactly the same situation as described here, but if you were to define a wrapper for a string format function (e.g. logger):\nvoid logger(const char *name, const char *format, ...);\nvoid wrapper(const char *format, ...);\n\nwhen you implement a wrapper that calls logger, we can just create a string first with vasprintf and then pass it to logger.\n#include <stdio.h>\n#include <stdlib.h>\n#include <stdarg.h>\n\nstatic void wrapper(const char *format, ...)\n{\n char *string;\n va_list args;\n va_start(args, format);\n\n // variadic printf with allocated string. must free()\n vasprintf(&string, format, args);\n logger(\"wrapper\", \"%s\", string);\n\n free(string);\n va_end(args);\n}\n\nNot the cleanest, but works. Try this when you must avoid using macro functions.\n", "Incidentally, many C implementations have an internal v?printf variation which IMHO should have been part of the C standard. The exact details vary, but a typical implementation will accept a struct containing a character-output function pointer and information saying what's supposed to happen. This allows printf, sprintf, and fprintf to all use the same 'core' mechanism. For example, vsprintf might be something like:\nvoid s_out(PRINTF_INFO *p_inf, char ch)\n{\n (*(p_inf->destptr)++) = ch;\n p_inf->result++;\n}\n\nint vsprintf(char *dest, const char *fmt, va_list args)\n{\n PRINTF_INFO p_inf;\n p_inf.destptr = dest;\n p_inf.result = 0;\n p_inf.func = s_out;\n core_printf(&p_inf,fmt,args);\n}\n\nThe core_printf function then calls p_inf->func for each character to be output; the output function can then send the characters to the console, a file, a string, or something else. If one's implementation exposes the core_printf function (and whatever setup mechanism it uses) one can extend it with all sorts of variations.\n", "Based on the comment that you're wrapping vsprintf, and that this is tagged as C++ I'd suggest not trying to do this, but change up your interface to use C++ iostreams instead. They have advantages over the print line of functions, such as type safety and being able to print items that printf wouldn't be able to handle. Some rework now could save a significant amount of pain in the future.\n", "Using the new C++0x standard, you may be able to get this done using variadic templates or even convert that old code to the new template syntax without breaking anything.\n", "This is the only way to do it.. and the best way to do it too..\nstatic BOOL(__cdecl *OriginalVarArgsFunction)(BYTE variable1, char* format, ...)(0x12345678); //TODO: change address lolz\n\nBOOL __cdecl HookedVarArgsFunction(BYTE variable1, char* format, ...)\n{\n BOOL res;\n\n va_list vl;\n va_start(vl, format);\n\n // Get variable arguments count from disasm. -2 because of existing 'format', 'variable1'\n uint32_t argCount = *((uint8_t*)_ReturnAddress() + 2) / sizeof(void*) - 2;\n printf(\"arg count = %d\\n\", argCount);\n\n // ((int( __cdecl* )(const char*, ...))&oldCode)(fmt, ...);\n __asm\n {\n mov eax, argCount\n test eax, eax\n je noLoop\n mov edx, vl\n loop1 :\n push dword ptr[edx + eax * 4 - 4]\n sub eax, 1\n jnz loop1\n noLoop :\n push format\n push variable1\n //lea eax, [oldCode] // oldCode - original function pointer\n mov eax, OriginalVarArgsFunction\n call eax\n mov res, eax\n mov eax, argCount\n lea eax, [eax * 4 + 8] //+8 because 2 parameters (format and variable1)\n add esp, eax\n }\n return res;\n}\n\n", "Using GNU C extensions:\nint FIRST_FUNC(...){\n __builtin_return(\n __builtin_apply(\n (void(*)())SECOND_FUNC, __builtin_apply_args(), 100));\n}\n\nAlso clones return value with _builtin_return.\n" ]
[ 153, 89, 16, 8, 5, 4, 3, 1, 0, 0, 0 ]
[]
[]
[ "c++", "variadic_functions" ]
stackoverflow_0003530771_c++_variadic_functions.txt
Q: Data record becomes double in database when updated I have 2 features that I'm working on, say Clock in and Clock out. The thing is that when I clock out(updated the Time Clock out and Location Clock out), suddenly there's an extra record as seen on this pic, https://paste.pics/586336ef0bf2517da832678425125655 . Although the data is updated but there's an extra row with an updated info in 'time_checkOut' and 'location_checkOut'. I am wondering what is wrong here ? I tried inserting and updating the data by injecting time manually by using the code below before and it's working just fine as seen on the screenshot picture above but when I am using Carbon::now() and update the info using this. The data table in the database will double when I clock out. $date = date('Y-m-d'); $date = $r->date_checkIn; $time = date('H:i:s'); $time = $r->time_checkIn; Below are my 2 controllers : public function userClockIn(Request $r) { $result = []; $result['status'] = false; $result['message'] = "something error"; $users = User::where('staff_id', $r->staff_id)->select(['staff_id', 'date_checkIn', 'time_checkIn', 'location_checkIn'])->first(); $mytime = Carbon::now(); $date = $mytime->format('Y-m-d'); $time = $mytime->format('H:i:s'); $users->date_checkIn = $date; $users->time_checkIn = $time; $users->location_checkIn = $r->location_checkIn; $users->save(); // Retrieve current data $currentData = $users->toArray(); // Store current data into attendace record table $attendanceRecord = new AttendanceRecord(); $attendanceRecord->fill($currentData); $attendanceRecord->save(); $result['data'] = $users; $result['status'] = true; $result['message'] = "suksess add data"; return response()->json($result); } public function userClockOut(Request $r) { $result = []; $result['status'] = false; $result['message'] = "something error"; $users = User::where('staff_id', $r->staff_id)->select(['staff_id', 'time_checkOut', 'location_checkOut'])->first(); $mytime = Carbon::now(); $date = $mytime->format('Y-m-d'); $time = $mytime->format('H:i:s'); $users->date_checkIn = $date; $users->time_checkOut = $time; $users->location_checkOut = $r->location_checkOut; // Save the updated data to the database AttendanceRecord::updateOrCreate( ['staff_id' => $users->staff_id, 'date_checkIn' => $date], $users->toArray() ); // Retrieve current data $currentData = $users->toArray(); // Store current data into attendace record table $attendanceRecord = new AttendanceRecord(); $attendanceRecord->fill($currentData); $attendanceRecord->save(); $result['data'] = $users; $result['status'] = true; $result['message'] = "suksess add data"; return response()->json($result); } A: in function userClockOut: AttendanceRecord::updateOrCreate( ['staff_id' => $users->staff_id, 'date_checkIn' => $date], $users->toArray() ); and // I suggest you comment these lines $attendanceRecord = new AttendanceRecord(); $attendanceRecord->fill($currentData); $attendanceRecord->save(); are duplicate codes.
Data record becomes double in database when updated
I have 2 features that I'm working on, say Clock in and Clock out. The thing is that when I clock out(updated the Time Clock out and Location Clock out), suddenly there's an extra record as seen on this pic, https://paste.pics/586336ef0bf2517da832678425125655 . Although the data is updated but there's an extra row with an updated info in 'time_checkOut' and 'location_checkOut'. I am wondering what is wrong here ? I tried inserting and updating the data by injecting time manually by using the code below before and it's working just fine as seen on the screenshot picture above but when I am using Carbon::now() and update the info using this. The data table in the database will double when I clock out. $date = date('Y-m-d'); $date = $r->date_checkIn; $time = date('H:i:s'); $time = $r->time_checkIn; Below are my 2 controllers : public function userClockIn(Request $r) { $result = []; $result['status'] = false; $result['message'] = "something error"; $users = User::where('staff_id', $r->staff_id)->select(['staff_id', 'date_checkIn', 'time_checkIn', 'location_checkIn'])->first(); $mytime = Carbon::now(); $date = $mytime->format('Y-m-d'); $time = $mytime->format('H:i:s'); $users->date_checkIn = $date; $users->time_checkIn = $time; $users->location_checkIn = $r->location_checkIn; $users->save(); // Retrieve current data $currentData = $users->toArray(); // Store current data into attendace record table $attendanceRecord = new AttendanceRecord(); $attendanceRecord->fill($currentData); $attendanceRecord->save(); $result['data'] = $users; $result['status'] = true; $result['message'] = "suksess add data"; return response()->json($result); } public function userClockOut(Request $r) { $result = []; $result['status'] = false; $result['message'] = "something error"; $users = User::where('staff_id', $r->staff_id)->select(['staff_id', 'time_checkOut', 'location_checkOut'])->first(); $mytime = Carbon::now(); $date = $mytime->format('Y-m-d'); $time = $mytime->format('H:i:s'); $users->date_checkIn = $date; $users->time_checkOut = $time; $users->location_checkOut = $r->location_checkOut; // Save the updated data to the database AttendanceRecord::updateOrCreate( ['staff_id' => $users->staff_id, 'date_checkIn' => $date], $users->toArray() ); // Retrieve current data $currentData = $users->toArray(); // Store current data into attendace record table $attendanceRecord = new AttendanceRecord(); $attendanceRecord->fill($currentData); $attendanceRecord->save(); $result['data'] = $users; $result['status'] = true; $result['message'] = "suksess add data"; return response()->json($result); }
[ "in function userClockOut:\nAttendanceRecord::updateOrCreate(\n ['staff_id' => $users->staff_id, 'date_checkIn' => $date],\n $users->toArray()\n);\n\nand\n// I suggest you comment these lines\n$attendanceRecord = new AttendanceRecord();\n$attendanceRecord->fill($currentData);\n$attendanceRecord->save();\n\nare duplicate codes.\n" ]
[ 0 ]
[]
[]
[ "api", "laravel", "php" ]
stackoverflow_0074675407_api_laravel_php.txt
Q: Using HTTPS with Azure functions running on Azure iot Edge Background I have a system running on Azure iot edge. The system is composed of multiple modules that expose REST interfaces. To make everything look tidy from the client's perspective (a browser on another machine in the same network) we use an Azure Function and its reverse proxy capabilities. So, basically, the client makes a request to an endpoint of the function, if the route matches one in the "proxies" config, it is routed to the correct module using the docker network provided by the iot edge product. Problem Now, what I would like to accomplish is that the client would use an https connection to make the request to the function. So the browser would make a request to https://:8000/Somemodule/Resource and this request would be routed by the af proxy to http://Somemodule:80/Resource . So my question is, how do I enable https in a function running locally in a docker container, and can the reverse proxy work as described above? Thanks for any help! A: For HTTPS, you primarily need a SSL certificate and reverse proxy like nginx that can do SSL Termination since I believe Azure Functions doesn't support it as part of the runtime itself (which is what the docker container has). Nginx is a popular and fairly common choice to use for SSL Termination. You would have to configure it for SSL with your domain information and setup your Azure Function as its upstream. That being said, you could actually just use nginx as your proxy directly too, completely removing the need for Azure Functions, unless you are using it for Functions and Proxies. Your current proxy entries would just become an upstream definition for each module and separate locations (basically path) that would route the requests.
Using HTTPS with Azure functions running on Azure iot Edge
Background I have a system running on Azure iot edge. The system is composed of multiple modules that expose REST interfaces. To make everything look tidy from the client's perspective (a browser on another machine in the same network) we use an Azure Function and its reverse proxy capabilities. So, basically, the client makes a request to an endpoint of the function, if the route matches one in the "proxies" config, it is routed to the correct module using the docker network provided by the iot edge product. Problem Now, what I would like to accomplish is that the client would use an https connection to make the request to the function. So the browser would make a request to https://:8000/Somemodule/Resource and this request would be routed by the af proxy to http://Somemodule:80/Resource . So my question is, how do I enable https in a function running locally in a docker container, and can the reverse proxy work as described above? Thanks for any help!
[ "For HTTPS, you primarily need a SSL certificate and reverse proxy like nginx that can do SSL Termination since I believe Azure Functions doesn't support it as part of the runtime itself (which is what the docker container has).\nNginx is a popular and fairly common choice to use for SSL Termination. You would have to configure it for SSL with your domain information and setup your Azure Function as its upstream.\nThat being said, you could actually just use nginx as your proxy directly too, completely removing the need for Azure Functions, unless you are using it for Functions and Proxies. Your current proxy entries would just become an upstream definition for each module and separate locations (basically path) that would route the requests.\n" ]
[ 0 ]
[]
[]
[ "azure", "azure_functions", "azure_functions_proxies", "azure_iot_edge", "https" ]
stackoverflow_0072227639_azure_azure_functions_azure_functions_proxies_azure_iot_edge_https.txt
Q: Restarting game upon player death is deleting game objects I am in the process of creating a call of duty zombies style game. The issue I'm currently running into is when my player's health = 0 the game restarts but for some reason throws out the following errors: MissingReferenceException: The object of type 'Transform' has been destroyed but you are still trying to access it. Your script should either check if it is null or you should not destroy the object. UnityEngine.Transform.get_position () (at <bae255e3e08e46f7bc2fbd23dde96338>:0) Gun.Shoot () (at Assets/Scripts/Gun.cs:55) PlayerShoot.Update () (at Assets/Scripts/PlayerShoot.cs:16) I am using the unity scene management function and when my player's health reaches 0 I run the code: Debug.Log("Dead"); SceneManager.LoadScene("Zombie Game"); Nowhere in the Gun script or the Player Shoot script is there anything about destroying any game objects or transforms on death. PlayerShoot Script: using System; using System.Collections; using System.Collections.Generic; using UnityEngine; public class PlayerShoot : MonoBehaviour { public static Action shootInput; public static Action reloadInput; [SerializeField] private KeyCode reloadKey = KeyCode.R; private void Update() { if (Input.GetMouseButton(0)) shootInput?.Invoke(); if (Input.GetKeyDown(reloadKey)) reloadInput?.Invoke(); } } Gun Script: using System; using System.Collections; using System.Collections.Generic; using UnityEngine; using System.Linq; public class Gun : MonoBehaviour { Ray ray; // the ray with origin amd direction for the cast float distance; // if needed a maximum distance LayerMask layers; // the general raycast layer - not different ones for each player ;) GameObject Player; // your player object [Header("References")] [SerializeField] private GunData gunData; [SerializeField] private Transform cam; float timeSinceLastShot; public void Start() { PlayerShoot.shootInput += Shoot; PlayerShoot.reloadInput += StartReload; } public void OnDisable() => gunData.reloading = false; public void StartReload() { if (!gunData.reloading && this.gameObject.activeSelf) StartCoroutine(Reload()); } private IEnumerator Reload() { gunData.reloading = true; yield return new WaitForSeconds(gunData.reloadTime); gunData.currentAmmo = gunData.magSize; gunData.reloading = false; } public bool CanShoot() => !gunData.reloading && timeSinceLastShot > 1f / (gunData.fireRate / 60f); public void Shoot() { if (gunData.currentAmmo > 0) { if (CanShoot()) { if (Physics.Raycast(cam.position, cam.forward, out RaycastHit hitInfo, gunData.maxDistance)) { IDamageable damageable = hitInfo.transform.GetComponent<IDamageable>(); damageable?.TakeDamage(gunData.damage); } gunData.currentAmmo--; timeSinceLastShot = 0; OnGunShot(); } } } public void Update() { timeSinceLastShot += Time.deltaTime; Debug.DrawRay(cam.position, cam.forward * gunData.maxDistance); } public void OnGunShot() {} } I am New to coding, so this as been a tough one for me to try and work out. I feel confident that the issue has something to do with the player transform or the ray cast, but I have no idea what I can do to prevent this error from occurring when the game is reset. Any help would be HUGELY appreciated. A: When you're calling SceneManager.LoadScene("Zombie Game") to reload your scene. This effectively destroys the objects in the scene which do not have a DontDestroyOnLoad attribute. It is generally good practice to unsubscribe from events when an object is destroyed. This is because events are a type of delegate, a reference to a method that can be called later. If you do not unsubscribe from an event, the delegate will continue to reference the method, even if the object registered for the event is destroyed. This can cause problems if the event is triggered after the object is destroyed, as the delegate will still try to call the method on the destroyed object. This can result in errors or exceptions being thrown, depending on the event's implementation and the method it calls. To avoid these problems, it is recommended to unsubscribe from events in the OnDestroy() method of the registered object for the event. This ensures that the delegate is no longer referencing the method on the object, and the event can be safely triggered without causing any errors: public class Gun : MonoBehaviour { public void Start() { PlayerShoot.shootInput += Shoot; PlayerShoot.reloadInput += StartReload; } private void OnDestroy() { PlayerShoot.shootInput -= Shoot; PlayerShoot.reloadInput -= StartReload; } } You should also ensure that any references assigned through the inspector remain valid once your scene reloads.
Restarting game upon player death is deleting game objects
I am in the process of creating a call of duty zombies style game. The issue I'm currently running into is when my player's health = 0 the game restarts but for some reason throws out the following errors: MissingReferenceException: The object of type 'Transform' has been destroyed but you are still trying to access it. Your script should either check if it is null or you should not destroy the object. UnityEngine.Transform.get_position () (at <bae255e3e08e46f7bc2fbd23dde96338>:0) Gun.Shoot () (at Assets/Scripts/Gun.cs:55) PlayerShoot.Update () (at Assets/Scripts/PlayerShoot.cs:16) I am using the unity scene management function and when my player's health reaches 0 I run the code: Debug.Log("Dead"); SceneManager.LoadScene("Zombie Game"); Nowhere in the Gun script or the Player Shoot script is there anything about destroying any game objects or transforms on death. PlayerShoot Script: using System; using System.Collections; using System.Collections.Generic; using UnityEngine; public class PlayerShoot : MonoBehaviour { public static Action shootInput; public static Action reloadInput; [SerializeField] private KeyCode reloadKey = KeyCode.R; private void Update() { if (Input.GetMouseButton(0)) shootInput?.Invoke(); if (Input.GetKeyDown(reloadKey)) reloadInput?.Invoke(); } } Gun Script: using System; using System.Collections; using System.Collections.Generic; using UnityEngine; using System.Linq; public class Gun : MonoBehaviour { Ray ray; // the ray with origin amd direction for the cast float distance; // if needed a maximum distance LayerMask layers; // the general raycast layer - not different ones for each player ;) GameObject Player; // your player object [Header("References")] [SerializeField] private GunData gunData; [SerializeField] private Transform cam; float timeSinceLastShot; public void Start() { PlayerShoot.shootInput += Shoot; PlayerShoot.reloadInput += StartReload; } public void OnDisable() => gunData.reloading = false; public void StartReload() { if (!gunData.reloading && this.gameObject.activeSelf) StartCoroutine(Reload()); } private IEnumerator Reload() { gunData.reloading = true; yield return new WaitForSeconds(gunData.reloadTime); gunData.currentAmmo = gunData.magSize; gunData.reloading = false; } public bool CanShoot() => !gunData.reloading && timeSinceLastShot > 1f / (gunData.fireRate / 60f); public void Shoot() { if (gunData.currentAmmo > 0) { if (CanShoot()) { if (Physics.Raycast(cam.position, cam.forward, out RaycastHit hitInfo, gunData.maxDistance)) { IDamageable damageable = hitInfo.transform.GetComponent<IDamageable>(); damageable?.TakeDamage(gunData.damage); } gunData.currentAmmo--; timeSinceLastShot = 0; OnGunShot(); } } } public void Update() { timeSinceLastShot += Time.deltaTime; Debug.DrawRay(cam.position, cam.forward * gunData.maxDistance); } public void OnGunShot() {} } I am New to coding, so this as been a tough one for me to try and work out. I feel confident that the issue has something to do with the player transform or the ray cast, but I have no idea what I can do to prevent this error from occurring when the game is reset. Any help would be HUGELY appreciated.
[ "When you're calling SceneManager.LoadScene(\"Zombie Game\") to reload your scene. This effectively destroys the objects in the scene which do not have a DontDestroyOnLoad attribute.\nIt is generally good practice to unsubscribe from events when an object is destroyed. This is because events are a type of delegate, a reference to a method that can be called later. If you do not unsubscribe from an event, the delegate will continue to reference the method, even if the object registered for the event is destroyed.\nThis can cause problems if the event is triggered after the object is destroyed, as the delegate will still try to call the method on the destroyed object. This can result in errors or exceptions being thrown, depending on the event's implementation and the method it calls.\nTo avoid these problems, it is recommended to unsubscribe from events in the OnDestroy() method of the registered object for the event. This ensures that the delegate is no longer referencing the method on the object, and the event can be safely triggered without causing any errors:\npublic class Gun : MonoBehaviour {\n \n public void Start() {\n PlayerShoot.shootInput += Shoot;\n PlayerShoot.reloadInput += StartReload;\n }\n\n private void OnDestroy() {\n PlayerShoot.shootInput -= Shoot;\n PlayerShoot.reloadInput -= StartReload;\n }\n}\n\nYou should also ensure that any references assigned through the inspector remain valid once your scene reloads.\n" ]
[ 0 ]
[]
[]
[ "c#", "unity3d" ]
stackoverflow_0074681063_c#_unity3d.txt
Q: fftshift c++ implemetation for openCV I have already looked in this question fftshift/ifftshift C/C++ source code I'm trying to implement fftshift from matlab this is the code from the matlab function for 1D array numDims = ndims(x); idx = cell(1, numDims); for k = 1:numDims m = size(x, k); p = ceil(m/2); idx{k} = [p+1:m 1:p]; end y = x(idx{:}); my c++/openCV code is, what fftshift basically does is swap the values from a certain pivot place. since I can't seem to understand how is the matrix built in opencv for complex numbers. it says here http://docs.opencv.org/modules/core/doc/operations_on_arrays.html#dft CCS (complex-conjugate-symmetrical I thought it will be easier to split the complex numbers into real and imaginary and swap them. and then merge back to one matrix. cv::vector<float> distanceF (f.size()); //ff = fftshift(ff); cv::Mat ff; cv::dft(distanceF, ff, cv::DFT_COMPLEX_OUTPUT); //Make place for both the complex and the real values cv::Mat planes[] = {cv::Mat::zeros(distanceF.size(),1, CV_32F), cv::Mat::zeros(distanceF.size(),1, CV_32F)}; cv::split(ff, planes); // planes[0] = Re(DFT(I), planes[1] = Im(DFT(I)) int numDims = ff.dims; for (int i = 0; i < numDims; i++) { int m = ff.rows; int p = ceil(m/2); } my problem is that because of my input to the DFT is a vector<float> I can't seem to be able to create planes mat in order to split the complex numbers? Can you think how a better way to make the swap of the values inside the cv::mat data struct? A: Ok, this thread is may be out of date in the meantime but maybe for other users.. Take a look at the samples: opencv/samples/cpp/dft.cpp (line 66 - 80) int cx = mag.cols/2; int cy = mag.rows/2; // rearrange the quadrants of Fourier image // so that the origin is at the image center Mat tmp; Mat q0(mag, Rect(0, 0, cx, cy)); Mat q1(mag, Rect(cx, 0, cx, cy)); Mat q2(mag, Rect(0, cy, cx, cy)); Mat q3(mag, Rect(cx, cy, cx, cy)); q0.copyTo(tmp); q3.copyTo(q0); tmp.copyTo(q3); q1.copyTo(tmp); q2.copyTo(q1); tmp.copyTo(q2); I think that's a short and clean way for different dimensions. A: I know, this is quite an old thread, but I found it today while looking for a solution to shift the fft-result. and maybe the little function I wrote with the help of this site and other sources, could be helpful for future readers searching the net and ending up here too. bool FftShift(const Mat& src, Mat& dst) { if(src.empty()) return true; const uint h=src.rows, w=src.cols; // height and width of src-image const uint qh=h>>1, qw=w>>1; // height and width of the quadrants Mat qTL(src, Rect( 0, 0, qw, qh)); // define the quadrants in respect to Mat qTR(src, Rect(w-qw, 0, qw, qh)); // the outer dimensions of the matrix. Mat qBL(src, Rect( 0, h-qh, qw, qh)); // thus, with odd sizes, the center Mat qBR(src, Rect(w-qw, h-qh, qw, qh)); // line(s) get(s) omitted. Mat tmp; hconcat(qBR, qBL, dst); // build destination matrix with switched hconcat(qTR, qTL, tmp); // quadrants 0 & 2 and 1 & 3 from source vconcat(dst, tmp, dst); return false; } A: How about using adjustROI and copyTo instead of .at()? It would certainly be more efficient: Something in the lines of (for your 1D case): Mat shifted(ff.size(),ff.type()); pivot = ff.cols / 2; ff(Range::all(),Range(pivot + 1, ff.cols)).copyTo(shifted(Range::all(),Range(0,pivot))); ff(Range::all(),Range(0,pivot+1)).copyTo(shifted(Range::all(),Range(pivot,ff.cols))); For the 2D case, two more lines should be added, and the rows ranges modified... A: I have been implementing it myself based on this post, I used Fabian implementation which is working fine. But there is a problem when there is an odd number of row or column, the shift is then not correct. You need then to padd your matrix and after to get rid of the extra row or column. {bool flag_row = false; bool flag_col = false; if( (inputMatrix.rows % 2)>0) { cv::Mat row = cv::Mat::zeros(1,inputMatrix.cols, CV_64F); inputMatrix.push_back(row); flag_row =true; } if( (inputMatrix.cols % 2)>0) { cv::Mat col = cv::Mat::zeros(1,inputMatrix.rows, CV_64F); cv::Mat tmp; inputMatrix.copyTo(tmp); tmp=tmp.t(); tmp.push_back(col); tmp=tmp.t(); tmp.copyTo(inputMatrix); flag_col = true; } int cx = inputMatrix.cols/2.0; int cy = inputMatrix.rows/2.0; cv::Mat outputMatrix; inputMatrix.copyTo(outputMatrix); // rearrange the quadrants of Fourier image // so that the origin is at the image center cv::Mat tmp; cv::Mat q0(outputMatrix, cv::Rect(0, 0, cx, cy)); cv::Mat q1(outputMatrix, cv::Rect(cx, 0, cx, cy)); cv::Mat q2(outputMatrix, cv::Rect(0, cy, cx, cy)); cv::Mat q3(outputMatrix, cv::Rect(cx, cy, cx, cy)); q0.copyTo(tmp); q3.copyTo(q0); tmp.copyTo(q3); q1.copyTo(tmp); q2.copyTo(q1); tmp.copyTo(q2); int row = inputMatrix.rows; int col = inputMatrix.cols; if(flag_row) { outputMatrix = Tools::removerow(outputMatrix,row/2-1); } if(flag_col) { outputMatrix = Tools::removecol(outputMatrix,col/2-1); } return outputMatrix; A: Here is what I do (quick and dirty, can be optimized): // taken from the opencv DFT example (see opencv/samples/cpp/dft.cpp within opencv v440 sourcecode package) cv::Mat fftshift(const cv::Mat& mat){ // create copy to not mess up the original matrix (ret is only a "window" over the provided matrix) cv::Mat cpy; mat.copyTo(cpy); // crop the spectrum, if it has an odd number of rows or columns cv::Mat ret = cpy(cv::Rect(0, 0, cpy.cols & -2, cpy.rows & -2)); // rearrange the quadrants of Fourier image so that the origin is at the image center int cx = ret.cols/2; int cy = ret.rows/2; cv::Mat q0(ret, cv::Rect(0, 0, cx, cy)); // Top-Left - Create a ROI per quadrant cv::Mat q1(ret, cv::Rect(cx, 0, cx, cy)); // Top-Right cv::Mat q2(ret, cv::Rect(0, cy, cx, cy)); // Bottom-Left cv::Mat q3(ret, cv::Rect(cx, cy, cx, cy)); // Bottom-Right cv::Mat tmp; // swap quadrants (Top-Left with Bottom-Right) q0.copyTo(tmp); q3.copyTo(q0); tmp.copyTo(q3); q1.copyTo(tmp); // swap quadrant (Top-Right with Bottom-Left) q2.copyTo(q1); tmp.copyTo(q2); return ret; } // reverse the swapping of fftshift. (-> reverse the quadrant swapping) cv::Mat ifftshift(const cv::Mat& mat){ // create copy to not mess up the original matrix (ret is only a "window" over the provided matrix) cv::Mat cpy; mat.copyTo(cpy); // crop the spectrum, if it has an odd number of rows or columns cv::Mat ret = cpy(cv::Rect(0, 0, cpy.cols & -2, cpy.rows & -2)); // rearrange the quadrants of Fourier image so that the origin is at the image center int cx = ret.cols/2; int cy = ret.rows/2; cv::Mat q0(ret, cv::Rect(0, 0, cx, cy)); // Top-Left - Create a ROI per quadrant cv::Mat q1(ret, cv::Rect(cx, 0, cx, cy)); // Top-Right cv::Mat q2(ret, cv::Rect(0, cy, cx, cy)); // Bottom-Left cv::Mat q3(ret, cv::Rect(cx, cy, cx, cy)); // Bottom-Right cv::Mat tmp; // swap quadrants (Bottom-Right with Top-Left) q3.copyTo(tmp); q0.copyTo(q3); tmp.copyTo(q0); q2.copyTo(tmp); // swap quadrant (Bottom-Left with Top-Right) q1.copyTo(q2); tmp.copyTo(q1); return ret; } A: this is for future reference: been tested and is bit accurate for 1D cv::Mat ff; cv::dft(distanceF, ff, cv::DFT_ROWS|cv::DFT_COMPLEX_OUTPUT); //Make place for both the complex and the real values cv::Mat planes[] = {cv::Mat::zeros(distanceF.size(),1, CV_32F), cv::Mat::zeros(distanceF.size(),1, CV_32F)}; cv::split(ff, planes); // planes[0] = Re(DFT(I), planes[1] = Im(DFT(I)) int m = planes[0].cols; int pivot = ceil(m/2); //duplicate FFT results with Complex conjugate in order to get exact matlab results for (int i = pivot + 1, k = pivot; i < planes[1].cols; i++, k--) { planes[1].at<float>(i) = planes[1].at<float>(k) * -1; planes[0].at<float>(i) = planes[0].at<float>(k); } //TODO maybe we need to see what happens for pair and odd ?? float im = planes[1].at<float>(0); float re = planes[0].at<float>(0); for (int i = 0; i < pivot; i++) { //IM planes[1].at<float>(i) = planes[1].at<float>(pivot + i +1); planes[1].at<float>(pivot +i +1) = planes[1].at<float>(i +1); //Real planes[0].at<float>(i) = planes[0].at<float>(pivot + i +1); planes[0].at<float>(pivot +i +1) = planes[0].at<float>(i +1); } planes[1].at<float>(pivot) = im; planes[0].at<float>(pivot) = re; A: There are no implementations in earlier answers that work correctly for odd-sized images. fftshift moves the origin from the top-left to the center (at size/2). ifftshift moves the origin from the center to the top-left. These two actions are identical for even sizes, but differ for odd-sizes. For an odd size, fftshift swaps the first (size+1)/2 pixels with the remaining size/2 pixels, which moves the pixel at index 0 to size/2. ifftshift does the reverse, swapping the first size/2 pixels with the remaining (size+1)/2 pixels. This code is the most simple implementation of both these actions that I can come up with. (Note that (size+1)/2 == size/2 if size is even.) bool forward = true; // true for fftshift, false for ifftshift cv::Mat img = ...; // the image to process // input sizes int sx = img.cols; int sy = img.rows; // size of top-left quadrant int cx = forward ? (sx + 1) / 2 : sx / 2; int cy = forward ? (sy + 1) / 2 : sy / 2; // split the quadrants cv::Mat top_left(img, cv::Rect(0, 0, cx, cy)); cv::Mat top_right(img, cv::Rect(cx, 0, sx - cx, cy)); cv::Mat bottom_left(img, cv::Rect(0, cy, cx, sy - cy)); cv::Mat bottom_right(img, cv::Rect(cx, cy, sx - cx, sy - cy)); // merge the quadrants in right order cv::Mat tmp1, tmp2; cv::hconcat(bottom_right, bottom_left, tmp1); cv::hconcat(top_right, top_left, tmp2); cv::vconcat(tmp1, tmp2, img); This code makes a copy of the full image twice, but it is easy and quick to implement. A more performant implementation would swap values in-place. This answer has correct code to do so on a single line, it would have to be applied to each column and each row of the image.
fftshift c++ implemetation for openCV
I have already looked in this question fftshift/ifftshift C/C++ source code I'm trying to implement fftshift from matlab this is the code from the matlab function for 1D array numDims = ndims(x); idx = cell(1, numDims); for k = 1:numDims m = size(x, k); p = ceil(m/2); idx{k} = [p+1:m 1:p]; end y = x(idx{:}); my c++/openCV code is, what fftshift basically does is swap the values from a certain pivot place. since I can't seem to understand how is the matrix built in opencv for complex numbers. it says here http://docs.opencv.org/modules/core/doc/operations_on_arrays.html#dft CCS (complex-conjugate-symmetrical I thought it will be easier to split the complex numbers into real and imaginary and swap them. and then merge back to one matrix. cv::vector<float> distanceF (f.size()); //ff = fftshift(ff); cv::Mat ff; cv::dft(distanceF, ff, cv::DFT_COMPLEX_OUTPUT); //Make place for both the complex and the real values cv::Mat planes[] = {cv::Mat::zeros(distanceF.size(),1, CV_32F), cv::Mat::zeros(distanceF.size(),1, CV_32F)}; cv::split(ff, planes); // planes[0] = Re(DFT(I), planes[1] = Im(DFT(I)) int numDims = ff.dims; for (int i = 0; i < numDims; i++) { int m = ff.rows; int p = ceil(m/2); } my problem is that because of my input to the DFT is a vector<float> I can't seem to be able to create planes mat in order to split the complex numbers? Can you think how a better way to make the swap of the values inside the cv::mat data struct?
[ "Ok, this thread is may be out of date in the meantime but maybe for other users.. Take a look at the samples: \n\nopencv/samples/cpp/dft.cpp (line 66 - 80)\n\nint cx = mag.cols/2;\nint cy = mag.rows/2;\n\n// rearrange the quadrants of Fourier image\n// so that the origin is at the image center\nMat tmp;\nMat q0(mag, Rect(0, 0, cx, cy));\nMat q1(mag, Rect(cx, 0, cx, cy));\nMat q2(mag, Rect(0, cy, cx, cy));\nMat q3(mag, Rect(cx, cy, cx, cy));\n\nq0.copyTo(tmp);\nq3.copyTo(q0);\ntmp.copyTo(q3);\n\nq1.copyTo(tmp);\nq2.copyTo(q1);\ntmp.copyTo(q2);\n\nI think that's a short and clean way for different dimensions. \n", "I know, this is quite an old thread, but I found it today while looking for a solution to shift the fft-result. and maybe the little function I wrote with the help of this site and other sources, could be helpful for future readers searching the net and ending up here too.\nbool FftShift(const Mat& src, Mat& dst)\n{\n if(src.empty()) return true;\n\n const uint h=src.rows, w=src.cols; // height and width of src-image\n const uint qh=h>>1, qw=w>>1; // height and width of the quadrants\n\n Mat qTL(src, Rect( 0, 0, qw, qh)); // define the quadrants in respect to\n Mat qTR(src, Rect(w-qw, 0, qw, qh)); // the outer dimensions of the matrix.\n Mat qBL(src, Rect( 0, h-qh, qw, qh)); // thus, with odd sizes, the center\n Mat qBR(src, Rect(w-qw, h-qh, qw, qh)); // line(s) get(s) omitted.\n\n Mat tmp;\n hconcat(qBR, qBL, dst); // build destination matrix with switched\n hconcat(qTR, qTL, tmp); // quadrants 0 & 2 and 1 & 3 from source\n vconcat(dst, tmp, dst);\n\n return false;\n}\n\n", "How about using adjustROI and copyTo instead of .at()? It would certainly be more efficient:\nSomething in the lines of (for your 1D case):\nMat shifted(ff.size(),ff.type());\npivot = ff.cols / 2;\nff(Range::all(),Range(pivot + 1, ff.cols)).copyTo(shifted(Range::all(),Range(0,pivot)));\nff(Range::all(),Range(0,pivot+1)).copyTo(shifted(Range::all(),Range(pivot,ff.cols)));\n\nFor the 2D case, two more lines should be added, and the rows ranges modified...\n", "I have been implementing it myself based on this post, I used Fabian implementation which is working fine. But there is a problem when there is an odd number of row or column, the shift is then not correct.\nYou need then to padd your matrix and after to get rid of the extra row or column.\n {bool flag_row = false;\n bool flag_col = false;\n\n if( (inputMatrix.rows % 2)>0)\n {\n cv::Mat row = cv::Mat::zeros(1,inputMatrix.cols, CV_64F); \n inputMatrix.push_back(row);\n flag_row =true;\n }\n\n if( (inputMatrix.cols % 2)>0)\n {\n cv::Mat col = cv::Mat::zeros(1,inputMatrix.rows, CV_64F); \n cv::Mat tmp;\n inputMatrix.copyTo(tmp);\n tmp=tmp.t();\n tmp.push_back(col);\n tmp=tmp.t();\n tmp.copyTo(inputMatrix);\n\n flag_col = true;\n }\n\n int cx = inputMatrix.cols/2.0;\n int cy = inputMatrix.rows/2.0;\n\n cv::Mat outputMatrix;\n inputMatrix.copyTo(outputMatrix);\n\n // rearrange the quadrants of Fourier image\n // so that the origin is at the image center\n cv::Mat tmp;\n cv::Mat q0(outputMatrix, cv::Rect(0, 0, cx, cy));\n cv::Mat q1(outputMatrix, cv::Rect(cx, 0, cx, cy));\n cv::Mat q2(outputMatrix, cv::Rect(0, cy, cx, cy));\n cv::Mat q3(outputMatrix, cv::Rect(cx, cy, cx, cy));\n\n q0.copyTo(tmp);\n q3.copyTo(q0);\n tmp.copyTo(q3);\n\n q1.copyTo(tmp);\n q2.copyTo(q1);\n tmp.copyTo(q2);\n\n int row = inputMatrix.rows;\n int col = inputMatrix.cols;\n if(flag_row)\n {\n outputMatrix = Tools::removerow(outputMatrix,row/2-1);\n }\n if(flag_col)\n {\n outputMatrix = Tools::removecol(outputMatrix,col/2-1);\n }\n\n return outputMatrix;\n\n", "Here is what I do (quick and dirty, can be optimized):\n// taken from the opencv DFT example (see opencv/samples/cpp/dft.cpp within opencv v440 sourcecode package)\ncv::Mat fftshift(const cv::Mat& mat){\n \n // create copy to not mess up the original matrix (ret is only a \"window\" over the provided matrix)\n cv::Mat cpy;\n mat.copyTo(cpy);\n\n // crop the spectrum, if it has an odd number of rows or columns\n cv::Mat ret = cpy(cv::Rect(0, 0, cpy.cols & -2, cpy.rows & -2));\n\n // rearrange the quadrants of Fourier image so that the origin is at the image center\n int cx = ret.cols/2;\n int cy = ret.rows/2;\n cv::Mat q0(ret, cv::Rect(0, 0, cx, cy)); // Top-Left - Create a ROI per quadrant\n cv::Mat q1(ret, cv::Rect(cx, 0, cx, cy)); // Top-Right\n cv::Mat q2(ret, cv::Rect(0, cy, cx, cy)); // Bottom-Left\n cv::Mat q3(ret, cv::Rect(cx, cy, cx, cy)); // Bottom-Right\n\n cv::Mat tmp; // swap quadrants (Top-Left with Bottom-Right)\n q0.copyTo(tmp);\n q3.copyTo(q0);\n tmp.copyTo(q3);\n q1.copyTo(tmp); // swap quadrant (Top-Right with Bottom-Left)\n q2.copyTo(q1);\n tmp.copyTo(q2);\n\n return ret;\n}\n\n// reverse the swapping of fftshift. (-> reverse the quadrant swapping)\ncv::Mat ifftshift(const cv::Mat& mat){\n\n // create copy to not mess up the original matrix (ret is only a \"window\" over the provided matrix)\n cv::Mat cpy;\n mat.copyTo(cpy);\n\n // crop the spectrum, if it has an odd number of rows or columns\n cv::Mat ret = cpy(cv::Rect(0, 0, cpy.cols & -2, cpy.rows & -2));\n\n // rearrange the quadrants of Fourier image so that the origin is at the image center\n int cx = ret.cols/2;\n int cy = ret.rows/2;\n cv::Mat q0(ret, cv::Rect(0, 0, cx, cy)); // Top-Left - Create a ROI per quadrant\n cv::Mat q1(ret, cv::Rect(cx, 0, cx, cy)); // Top-Right\n cv::Mat q2(ret, cv::Rect(0, cy, cx, cy)); // Bottom-Left\n cv::Mat q3(ret, cv::Rect(cx, cy, cx, cy)); // Bottom-Right\n\n cv::Mat tmp; // swap quadrants (Bottom-Right with Top-Left)\n q3.copyTo(tmp);\n q0.copyTo(q3);\n tmp.copyTo(q0);\n q2.copyTo(tmp); // swap quadrant (Bottom-Left with Top-Right)\n q1.copyTo(q2);\n tmp.copyTo(q1);\n\n return ret;\n}\n\n", "this is for future reference:\nbeen tested and is bit accurate for 1D\n cv::Mat ff;\n cv::dft(distanceF, ff, cv::DFT_ROWS|cv::DFT_COMPLEX_OUTPUT);\n\n //Make place for both the complex and the real values\n cv::Mat planes[] = {cv::Mat::zeros(distanceF.size(),1, CV_32F), cv::Mat::zeros(distanceF.size(),1, CV_32F)};\n cv::split(ff, planes); // planes[0] = Re(DFT(I), planes[1] = Im(DFT(I))\n\n int m = planes[0].cols;\n int pivot = ceil(m/2);\n //duplicate FFT results with Complex conjugate in order to get exact matlab results\n\n for (int i = pivot + 1, k = pivot; i < planes[1].cols; i++, k--)\n {\n planes[1].at<float>(i) = planes[1].at<float>(k) * -1; \n planes[0].at<float>(i) = planes[0].at<float>(k);\n } \n\n //TODO maybe we need to see what happens for pair and odd ??\n float im = planes[1].at<float>(0);\n float re = planes[0].at<float>(0);\n\n for (int i = 0; i < pivot; i++)\n { \n //IM\n planes[1].at<float>(i) = planes[1].at<float>(pivot + i +1); \n planes[1].at<float>(pivot +i +1) = planes[1].at<float>(i +1);\n\n //Real\n planes[0].at<float>(i) = planes[0].at<float>(pivot + i +1); \n planes[0].at<float>(pivot +i +1) = planes[0].at<float>(i +1);\n }\n planes[1].at<float>(pivot) = im;\n planes[0].at<float>(pivot) = re;\n\n", "There are no implementations in earlier answers that work correctly for odd-sized images.\nfftshift moves the origin from the top-left to the center (at size/2).\nifftshift moves the origin from the center to the top-left.\nThese two actions are identical for even sizes, but differ for odd-sizes.\nFor an odd size, fftshift swaps the first (size+1)/2 pixels with the remaining size/2 pixels, which moves the pixel at index 0 to size/2. ifftshift does the reverse, swapping the first size/2 pixels with the remaining (size+1)/2 pixels. This code is the most simple implementation of both these actions that I can come up with. (Note that (size+1)/2 == size/2 if size is even.)\nbool forward = true; // true for fftshift, false for ifftshift\ncv::Mat img = ...; // the image to process\n\n// input sizes\nint sx = img.cols;\nint sy = img.rows;\n\n// size of top-left quadrant\nint cx = forward ? (sx + 1) / 2 : sx / 2;\nint cy = forward ? (sy + 1) / 2 : sy / 2;\n\n// split the quadrants\ncv::Mat top_left(img, cv::Rect(0, 0, cx, cy));\ncv::Mat top_right(img, cv::Rect(cx, 0, sx - cx, cy));\ncv::Mat bottom_left(img, cv::Rect(0, cy, cx, sy - cy));\ncv::Mat bottom_right(img, cv::Rect(cx, cy, sx - cx, sy - cy));\n\n// merge the quadrants in right order\ncv::Mat tmp1, tmp2;\ncv::hconcat(bottom_right, bottom_left, tmp1);\ncv::hconcat(top_right, top_left, tmp2);\ncv::vconcat(tmp1, tmp2, img);\n\nThis code makes a copy of the full image twice, but it is easy and quick to implement. A more performant implementation would swap values in-place. This answer has correct code to do so on a single line, it would have to be applied to each column and each row of the image.\n" ]
[ 5, 2, 1, 1, 1, 0, 0 ]
[ "In Matlab's implementation, the main code are the two lines:\nidx{k} = [p+1:m 1:p];\ny = x(idx{:});\n\nThe first one obtains the correct index order against the original one; then the second one assigns the output array according to the index order. Therefore, if you want to re-write Matlab's implementation without data swapping, you need to allocate a new array and assign the array. \n" ]
[ -1 ]
[ "c++", "image_processing", "matlab", "opencv" ]
stackoverflow_0029226465_c++_image_processing_matlab_opencv.txt
Q: Angular Node.js CPanel Deployment I am trying to deploy my Angular Universal using Node.js option in CPanel. My configuration is as follows: Node.js Version: 12.9.0 Application Mode: Development Application Root: public_html/ssr Application URL: ssr Application Startup File: main.js It has managed to run npm install and node_modules are now installed. But when I press 'RUN JS SCRIPT' it gives me an error. Following is the log file 0 info it worked if it ends with ok 1 verbose cli [ '/opt/alt/alt-nodejs11/root/usr/bin/node', 1 verbose cli '/opt/alt/alt-nodejs11/root/usr/bin/npm', 1 verbose cli 'run-script', 1 verbose cli 'build', 1 verbose cli '--' ] 2 info using npm@6.7.0 3 info using node@v11.15.0 4 verbose run-script [ 'prebuild', 'build', 'postbuild' ] 5 info lifecycle sunnyssr@0.0.0~prebuild: sunnyssr@0.0.0 6 info lifecycle sunnyssr@0.0.0~build: sunnyssr@0.0.0 7 warn lifecycle The node binary used for scripts is /home/sunnytex/nodevenv/public_html/ssr/11/bin/node but npm is using /opt/alt/alt-nodejs11/root/usr/bin/node itself. Use the `--scripts-prepend-node-path` option to include the path for the node binary npm was executed with. 8 verbose lifecycle sunnyssr@0.0.0~build: unsafe-perm in lifecycle true 9 verbose lifecycle sunnyssr@0.0.0~build: PATH: /opt/alt/alt-nodejs11/root/usr/lib/node_modules/npm/node_modules.bundled/npm-lifecycle/node-gyp-bin:/home/sunnytex/public_html/ssr/node_modules/.bin:/home/sunnytex/nodevenv/public_html/ssr/11/bin:/opt/alt/alt-nodejs11/root/usr/bin:/home/sunnytex/nodevenv/public_html/ssr/11/lib/bin/:/usr/local/jdk/bin:/usr/kerberos/sbin:/usr/kerberos/bin:/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/usr/X11R6/bin:/usr/local/bin:/usr/X11R6/bin:/root/bin:/opt/bin 10 verbose lifecycle sunnyssr@0.0.0~build: CWD: /home/sunnytex/public_html/ssr 11 silly lifecycle sunnyssr@0.0.0~build: Args: [ '-c', 'ng build' ] 12 info lifecycle sunnyssr@0.0.0~build: Failed to exec build script 13 verbose stack Error: sunnyssr@0.0.0 build: `ng build` 13 verbose stack spawn ENOENT 13 verbose stack at ChildProcess.<anonymous> (/opt/alt/alt-nodejs11/root/usr/lib/node_modules/npm/node_modules.bundled/npm-lifecycle/lib/spawn.js:48:18) 13 verbose stack at ChildProcess.emit (events.js:193:13) 13 verbose stack at maybeClose (internal/child_process.js:999:16) 13 verbose stack at Process.ChildProcess._handle.onexit (internal/child_process.js:266:5) 14 verbose pkgid sunnyssr@0.0.0 15 verbose cwd /home/sunnytex/public_html/ssr 16 verbose Linux 2.6.32-954.3.5.lve1.4.76.el6.x86_64 17 verbose argv "/opt/alt/alt-nodejs11/root/usr/bin/node" "/opt/alt/alt-nodejs11/root/usr/bin/npm" "run-script" "build" "--" 18 verbose node v11.15.0 19 verbose npm v6.7.0 20 error file sh 21 error code ELIFECYCLE 22 error errno ENOENT 23 error syscall spawn 24 error sunnyssr@0.0.0 build: `ng build` 24 error spawn ENOENT 25 error Failed at the sunnyssr@0.0.0 build script. 25 error This is probably not a problem with npm. There is likely additional logging output above. 26 verbose exit [ 1, true ] My package.json is as follows { "name": "sunnyssr", "version": "0.0.0", "scripts": { "ng": "ng", "start": "ng serve", "build": "ng build", "test": "ng test", "lint": "ng lint", "e2e": "ng e2e", "dev:ssr": "ng run sunnyssr:serve-ssr", "serve:ssr": "node dist/sunnyssr/server/main.js", "build:ssr": "ng build --prod && ng run sunnyssr:server:production", "prerender": "ng run sunnyssr:prerender" }, "private": true, "dependencies": { "@angular/animations": "~10.0.4", "@angular/common": "~10.0.4", "@angular/compiler": "~10.0.4", "@angular/core": "~10.0.4", "@angular/forms": "~10.0.4", "@angular/localize": "^10.0.4", "@angular/platform-browser": "~10.0.4", "@angular/platform-browser-dynamic": "~10.0.4", "@angular/platform-server": "~10.0.4", "@angular/router": "~10.0.4", "@ng-bootstrap/ng-bootstrap": "^7.0.0", "@nguniversal/express-engine": "^10.0.1", "animate.css": "^4.1.0", "bootstrap": "^4.5.0", "express": "^4.15.2", "ngx-image-zoom": "^0.6.0", "ngx-infinite-scroll": "^9.0.0", "ngx-owl-carousel-o": "^3.0.1", "ngx-page-scroll": "^7.0.1", "ngx-page-scroll-core": "^7.0.1", "node": "^10.21.0", "rxjs": "~6.5.5", "tslib": "^2.0.0", "zone.js": "~0.10.3" }, "devDependencies": { "@angular-devkit/build-angular": "~0.1000.3", "@angular/cli": "^10.0.3", "@angular/compiler-cli": "~10.0.4", "@nguniversal/builders": "^10.0.1", "@types/express": "^4.17.0", "@types/jasmine": "~3.5.0", "@types/jasminewd2": "~2.0.3", "@types/node": "^12.11.1", "codelyzer": "^6.0.0", "jasmine-core": "~3.5.0", "jasmine-spec-reporter": "~5.0.0", "karma": "~5.0.0", "karma-chrome-launcher": "~3.1.0", "karma-coverage-istanbul-reporter": "~3.0.2", "karma-jasmine": "~3.3.0", "karma-jasmine-html-reporter": "^1.5.0", "protractor": "~7.0.0", "ts-node": "~8.3.0", "tslint": "~6.1.0", "typescript": "~3.9.5" } } I would be grateful if someone could kindly guide me how to fix this issue. Thank in advance. A: This post is old, but if anyone else is interested... Please see this video for steps on how to deploy node.js via cPanel "setup nodejs app" module https://youtu.be/sIcy3q3Ib_s
Angular Node.js CPanel Deployment
I am trying to deploy my Angular Universal using Node.js option in CPanel. My configuration is as follows: Node.js Version: 12.9.0 Application Mode: Development Application Root: public_html/ssr Application URL: ssr Application Startup File: main.js It has managed to run npm install and node_modules are now installed. But when I press 'RUN JS SCRIPT' it gives me an error. Following is the log file 0 info it worked if it ends with ok 1 verbose cli [ '/opt/alt/alt-nodejs11/root/usr/bin/node', 1 verbose cli '/opt/alt/alt-nodejs11/root/usr/bin/npm', 1 verbose cli 'run-script', 1 verbose cli 'build', 1 verbose cli '--' ] 2 info using npm@6.7.0 3 info using node@v11.15.0 4 verbose run-script [ 'prebuild', 'build', 'postbuild' ] 5 info lifecycle sunnyssr@0.0.0~prebuild: sunnyssr@0.0.0 6 info lifecycle sunnyssr@0.0.0~build: sunnyssr@0.0.0 7 warn lifecycle The node binary used for scripts is /home/sunnytex/nodevenv/public_html/ssr/11/bin/node but npm is using /opt/alt/alt-nodejs11/root/usr/bin/node itself. Use the `--scripts-prepend-node-path` option to include the path for the node binary npm was executed with. 8 verbose lifecycle sunnyssr@0.0.0~build: unsafe-perm in lifecycle true 9 verbose lifecycle sunnyssr@0.0.0~build: PATH: /opt/alt/alt-nodejs11/root/usr/lib/node_modules/npm/node_modules.bundled/npm-lifecycle/node-gyp-bin:/home/sunnytex/public_html/ssr/node_modules/.bin:/home/sunnytex/nodevenv/public_html/ssr/11/bin:/opt/alt/alt-nodejs11/root/usr/bin:/home/sunnytex/nodevenv/public_html/ssr/11/lib/bin/:/usr/local/jdk/bin:/usr/kerberos/sbin:/usr/kerberos/bin:/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/usr/X11R6/bin:/usr/local/bin:/usr/X11R6/bin:/root/bin:/opt/bin 10 verbose lifecycle sunnyssr@0.0.0~build: CWD: /home/sunnytex/public_html/ssr 11 silly lifecycle sunnyssr@0.0.0~build: Args: [ '-c', 'ng build' ] 12 info lifecycle sunnyssr@0.0.0~build: Failed to exec build script 13 verbose stack Error: sunnyssr@0.0.0 build: `ng build` 13 verbose stack spawn ENOENT 13 verbose stack at ChildProcess.<anonymous> (/opt/alt/alt-nodejs11/root/usr/lib/node_modules/npm/node_modules.bundled/npm-lifecycle/lib/spawn.js:48:18) 13 verbose stack at ChildProcess.emit (events.js:193:13) 13 verbose stack at maybeClose (internal/child_process.js:999:16) 13 verbose stack at Process.ChildProcess._handle.onexit (internal/child_process.js:266:5) 14 verbose pkgid sunnyssr@0.0.0 15 verbose cwd /home/sunnytex/public_html/ssr 16 verbose Linux 2.6.32-954.3.5.lve1.4.76.el6.x86_64 17 verbose argv "/opt/alt/alt-nodejs11/root/usr/bin/node" "/opt/alt/alt-nodejs11/root/usr/bin/npm" "run-script" "build" "--" 18 verbose node v11.15.0 19 verbose npm v6.7.0 20 error file sh 21 error code ELIFECYCLE 22 error errno ENOENT 23 error syscall spawn 24 error sunnyssr@0.0.0 build: `ng build` 24 error spawn ENOENT 25 error Failed at the sunnyssr@0.0.0 build script. 25 error This is probably not a problem with npm. There is likely additional logging output above. 26 verbose exit [ 1, true ] My package.json is as follows { "name": "sunnyssr", "version": "0.0.0", "scripts": { "ng": "ng", "start": "ng serve", "build": "ng build", "test": "ng test", "lint": "ng lint", "e2e": "ng e2e", "dev:ssr": "ng run sunnyssr:serve-ssr", "serve:ssr": "node dist/sunnyssr/server/main.js", "build:ssr": "ng build --prod && ng run sunnyssr:server:production", "prerender": "ng run sunnyssr:prerender" }, "private": true, "dependencies": { "@angular/animations": "~10.0.4", "@angular/common": "~10.0.4", "@angular/compiler": "~10.0.4", "@angular/core": "~10.0.4", "@angular/forms": "~10.0.4", "@angular/localize": "^10.0.4", "@angular/platform-browser": "~10.0.4", "@angular/platform-browser-dynamic": "~10.0.4", "@angular/platform-server": "~10.0.4", "@angular/router": "~10.0.4", "@ng-bootstrap/ng-bootstrap": "^7.0.0", "@nguniversal/express-engine": "^10.0.1", "animate.css": "^4.1.0", "bootstrap": "^4.5.0", "express": "^4.15.2", "ngx-image-zoom": "^0.6.0", "ngx-infinite-scroll": "^9.0.0", "ngx-owl-carousel-o": "^3.0.1", "ngx-page-scroll": "^7.0.1", "ngx-page-scroll-core": "^7.0.1", "node": "^10.21.0", "rxjs": "~6.5.5", "tslib": "^2.0.0", "zone.js": "~0.10.3" }, "devDependencies": { "@angular-devkit/build-angular": "~0.1000.3", "@angular/cli": "^10.0.3", "@angular/compiler-cli": "~10.0.4", "@nguniversal/builders": "^10.0.1", "@types/express": "^4.17.0", "@types/jasmine": "~3.5.0", "@types/jasminewd2": "~2.0.3", "@types/node": "^12.11.1", "codelyzer": "^6.0.0", "jasmine-core": "~3.5.0", "jasmine-spec-reporter": "~5.0.0", "karma": "~5.0.0", "karma-chrome-launcher": "~3.1.0", "karma-coverage-istanbul-reporter": "~3.0.2", "karma-jasmine": "~3.3.0", "karma-jasmine-html-reporter": "^1.5.0", "protractor": "~7.0.0", "ts-node": "~8.3.0", "tslint": "~6.1.0", "typescript": "~3.9.5" } } I would be grateful if someone could kindly guide me how to fix this issue. Thank in advance.
[ "This post is old, but if anyone else is interested...\nPlease see this video for steps on how to deploy node.js via cPanel \"setup nodejs app\" module https://youtu.be/sIcy3q3Ib_s\n" ]
[ 0 ]
[]
[]
[ "angular", "angular_universal", "cpanel", "node.js", "npm" ]
stackoverflow_0062949543_angular_angular_universal_cpanel_node.js_npm.txt
Q: Error using SQS with multiple Laravel queue readers I am using Laravel Jobs to read messages from an SQS queue (Laravel version 5.7) Following Laravel indications I am using supervisor to run multiple queue:work processes at the same time. All goes well until I get this SQS error related to the message availability: InvalidParameterValue (client): Value ... for parameter ReceiptHandle is invalid. Reason: Message does not exist or is not available for visibility timeout change. - <?xml version="1.0"?> <ErrorResponse xmlns="http://queue.amazonaws.com/doc/2012-11-05/"><Error> <Type>Sender</Type><Code>InvalidParameterValue</Code><Message>Value ... for parameter ReceiptHandle is invalid. Reason: Message does not exist or is not available for visibility timeout change.</Message><Detail/></Error> <RequestId>8c1d28b7-a02c-5059-8b65-7c6292a0e56e</RequestId></ErrorResponse> {"exception":"[object] (Aws\\Sqs\\Exception\\SqsException(code: 0): Error executing \"ChangeMessageVisibility\" on \"https://sqs.eu-central- 1.amazonaws.com/123123123123/myQueue\"; AWS HTTP error: Client error: `POST https://sqs.eu-central-1.amazonaws.com/123123123123/myQueue` resulted in a `400 Bad Request` response: In particular, the strange thing is Message does not exist or is not available for visibility timeout change. Each supervisor process calls command=php /home/application/artisan queue:work without a --sleep=3 (I'd like the process to be reactive and not waiting for 3 seconds in case nothing was in the queue) nor a --tries=3 (I need all the tasks to be completed, so I don't put a limit to the tries parameter) In case the message is not existing (and I can't exclude this possibility) why does the process fetches it from the queue ? Is there anything I can do to prevent it ? A: I've seen this error intermittently in production too, where we run a good number of consumers for a single SQS queue. In our case, I'm pretty convinced that the error is due to SQS's at-least-once delivery semantics. Essentially, a message can be delivered twice or more on rare occasions. Laravel's queue worker command isn't strictly idempotent because it will throw an exception when trying to release or delete an SQS message that is no longer available (i.e., because it has been deleted by another queue worker process, which received a duplicate of the message from SQS). Our workaround is to try to detect when a duplicate message has been received, and then attempt to safely release the message back onto the queue. If the other queue worker that is currently working on the message succeeds, it will delete the message, and it won't be received again. If the other queue worker fails, then the message will be released and received again later. Something like this: <?php use Aws\Sqs\Exception\SqsException; use Illuminate\Bus\Queueable; use Illuminate\Contracts\Queue\ShouldQueue; use Illuminate\Foundation\Bus\Dispatchable; use Illuminate\Queue\InteractsWithQueue; use Illuminate\Queue\SerializesModels; use Illuminate\Support\Facades\Cache; use Illuminate\Support\Str; class ProcessPodcast implements ShouldQueue { use Dispatchable, InteractsWithQueue, Queueable, SerializesModels; private $jobId; public function __construct($jobId) { $this->jobId = $jobId; } public function handle() { $acquired = Cache::lock("process-podcast-$this->jobId")->get(function () { // Process the podcast (NB: this should be idempotent) }); if (!$acquired) { $this->releaseDuplicateMessage($delay = 60); } } private function releaseDuplicateMessage($delay) { try { $this->release($delay); } catch (Exception $ex) { if (!$this->causedByMessageNoLongerAvailable($ex)) { throw $ex; } } } private function causedByMessageNoLongerAvailable(Exception $ex): bool { return $ex instanceof SqsException && Str::contains( $ex->getAwsErrorMessage(), "Message does not exist or is not available for visibility timeout change" ); } } A: Another potential for these duplicate messages is that SQS has a default Visibility Timeout of 30secs. Visibility timeout sets the length of time that a message received from a queue (by one consumer) will not be visible to the other message consumers. So if one worker reads a message from the queue and it takes longer than 30secs to process, the message will become visible again and another worker will start processing. When the first worker finishes, it will delete it from the queue. Then when the second worker finishes processing the same message and tries to delete it, it can't because the first worker already deleted it. We're having the same issue at the moment and are implementing a fix/workaround similar to Louis. Will post our version when done and confirmed working. Note: You can increase the Visibility Timeout on SQS.
Error using SQS with multiple Laravel queue readers
I am using Laravel Jobs to read messages from an SQS queue (Laravel version 5.7) Following Laravel indications I am using supervisor to run multiple queue:work processes at the same time. All goes well until I get this SQS error related to the message availability: InvalidParameterValue (client): Value ... for parameter ReceiptHandle is invalid. Reason: Message does not exist or is not available for visibility timeout change. - <?xml version="1.0"?> <ErrorResponse xmlns="http://queue.amazonaws.com/doc/2012-11-05/"><Error> <Type>Sender</Type><Code>InvalidParameterValue</Code><Message>Value ... for parameter ReceiptHandle is invalid. Reason: Message does not exist or is not available for visibility timeout change.</Message><Detail/></Error> <RequestId>8c1d28b7-a02c-5059-8b65-7c6292a0e56e</RequestId></ErrorResponse> {"exception":"[object] (Aws\\Sqs\\Exception\\SqsException(code: 0): Error executing \"ChangeMessageVisibility\" on \"https://sqs.eu-central- 1.amazonaws.com/123123123123/myQueue\"; AWS HTTP error: Client error: `POST https://sqs.eu-central-1.amazonaws.com/123123123123/myQueue` resulted in a `400 Bad Request` response: In particular, the strange thing is Message does not exist or is not available for visibility timeout change. Each supervisor process calls command=php /home/application/artisan queue:work without a --sleep=3 (I'd like the process to be reactive and not waiting for 3 seconds in case nothing was in the queue) nor a --tries=3 (I need all the tasks to be completed, so I don't put a limit to the tries parameter) In case the message is not existing (and I can't exclude this possibility) why does the process fetches it from the queue ? Is there anything I can do to prevent it ?
[ "I've seen this error intermittently in production too, where we run a good number of consumers for a single SQS queue. In our case, I'm pretty convinced that the error is due to SQS's at-least-once delivery semantics. Essentially, a message can be delivered twice or more on rare occasions.\nLaravel's queue worker command isn't strictly idempotent because it will throw an exception when trying to release or delete an SQS message that is no longer available (i.e., because it has been deleted by another queue worker process, which received a duplicate of the message from SQS).\nOur workaround is to try to detect when a duplicate message has been received, and then attempt to safely release the message back onto the queue. If the other queue worker that is currently working on the message succeeds, it will delete the message, and it won't be received again. If the other queue worker fails, then the message will be released and received again later. Something like this:\n<?php\n\nuse Aws\\Sqs\\Exception\\SqsException;\nuse Illuminate\\Bus\\Queueable;\nuse Illuminate\\Contracts\\Queue\\ShouldQueue;\nuse Illuminate\\Foundation\\Bus\\Dispatchable;\nuse Illuminate\\Queue\\InteractsWithQueue;\nuse Illuminate\\Queue\\SerializesModels;\nuse Illuminate\\Support\\Facades\\Cache;\nuse Illuminate\\Support\\Str;\n\nclass ProcessPodcast implements ShouldQueue\n{\n use Dispatchable, InteractsWithQueue, Queueable, SerializesModels;\n\n private $jobId;\n\n public function __construct($jobId)\n {\n $this->jobId = $jobId;\n }\n\n public function handle()\n {\n $acquired = Cache::lock(\"process-podcast-$this->jobId\")->get(function () {\n // Process the podcast (NB: this should be idempotent)\n });\n\n if (!$acquired) {\n $this->releaseDuplicateMessage($delay = 60);\n }\n }\n\n private function releaseDuplicateMessage($delay)\n {\n try {\n $this->release($delay);\n } catch (Exception $ex) {\n if (!$this->causedByMessageNoLongerAvailable($ex)) {\n throw $ex;\n }\n }\n }\n\n private function causedByMessageNoLongerAvailable(Exception $ex): bool\n {\n return $ex instanceof SqsException &&\n Str::contains(\n $ex->getAwsErrorMessage(),\n \"Message does not exist or is not available for visibility timeout change\"\n );\n }\n}\n\n", "Another potential for these duplicate messages is that SQS has a default Visibility Timeout of 30secs.\n\nVisibility timeout sets the length of time that a message received from a queue (by one consumer) will not be visible to the other message consumers.\n\nSo if one worker reads a message from the queue and it takes longer than 30secs to process, the message will become visible again and another worker will start processing.\nWhen the first worker finishes, it will delete it from the queue. Then when the second worker finishes processing the same message and tries to delete it, it can't because the first worker already deleted it.\nWe're having the same issue at the moment and are implementing a fix/workaround similar to Louis. Will post our version when done and confirmed working.\nNote: You can increase the Visibility Timeout on SQS.\n" ]
[ 0, 0 ]
[]
[]
[ "amazon_sqs", "laravel", "php" ]
stackoverflow_0054569337_amazon_sqs_laravel_php.txt
Q: PDF to txt python3 I'm trying to convert pdf file to txt. ` import re import PyPDF2 with open('123.pdf', 'rb') as pdfFileObj: pdfreader = PyPDF2.PdfFileReader(pdfFileObj) x = pdfreader.numPages pageObj = pdfreader.getPage(x + 1) text = pageObj.extractText() file1 = open(f"C:\\Users\\honorr\\Desktop\\ssssssss\{re.sub('pdf$','txt',pdfFileObj)}", "a") file1.writelines(text) file1.close() Errors: Traceback (most recent call last): File "C:\Users\honorr\Desktop\ssssssss\main.py", line 5, in <module> pageobj = pdfreader.getPage(x + 1) File "C:\Users\honorr\Desktop\ssssssss\venv\lib\site-packages\PyPDF2\_reader.py", line 477, in getPage return self._get_page(pageNumber) File "C:\Users\honorr\Desktop\ssssssss\venv\lib\site-packages\PyPDF2\_reader.py", line 492, in _get_page return self.flattened_pages[page_number] IndexError: list index out of range ` How to fix it? So i don't know why i have this errors. Maybe somebody tell me another way to convert from PDF to TXT? A: You're setting x to the number of pages, but then trying to get page x + 1, which doesn't exist. Depending on how the library is implemented (I'm not familiar with PyPDF2), you may need to try pdfreader.getPage(x) or pdfreader.getPage(x - 1) to get it to work. This will only get the last page in the document though.
PDF to txt python3
I'm trying to convert pdf file to txt. ` import re import PyPDF2 with open('123.pdf', 'rb') as pdfFileObj: pdfreader = PyPDF2.PdfFileReader(pdfFileObj) x = pdfreader.numPages pageObj = pdfreader.getPage(x + 1) text = pageObj.extractText() file1 = open(f"C:\\Users\\honorr\\Desktop\\ssssssss\{re.sub('pdf$','txt',pdfFileObj)}", "a") file1.writelines(text) file1.close() Errors: Traceback (most recent call last): File "C:\Users\honorr\Desktop\ssssssss\main.py", line 5, in <module> pageobj = pdfreader.getPage(x + 1) File "C:\Users\honorr\Desktop\ssssssss\venv\lib\site-packages\PyPDF2\_reader.py", line 477, in getPage return self._get_page(pageNumber) File "C:\Users\honorr\Desktop\ssssssss\venv\lib\site-packages\PyPDF2\_reader.py", line 492, in _get_page return self.flattened_pages[page_number] IndexError: list index out of range ` How to fix it? So i don't know why i have this errors. Maybe somebody tell me another way to convert from PDF to TXT?
[ "You're setting x to the number of pages, but then trying to get page x + 1, which doesn't exist. Depending on how the library is implemented (I'm not familiar with PyPDF2), you may need to try pdfreader.getPage(x) or pdfreader.getPage(x - 1) to get it to work. This will only get the last page in the document though.\n" ]
[ 0 ]
[]
[]
[ "pdf", "pypdf2", "txt" ]
stackoverflow_0074681138_pdf_pypdf2_txt.txt
Q: Understanding a code snippet in pine script I'm new to pine-script. I'm trying to understand an indicator 'HalfTrend' by Alex Orekhov (everget) at TradingView. I'm having hard time in understanding following snippet, may you please explain this: if not na(trend[1]) and trend[1] != 1 Does the above line mean the following: na(trend[1]) //check if trend[1] exists trend[1] != 1 //if trend[1] exists, check if it is not equal to 1 And not of the whole expression In other words, are we checking if trend[1] exists and if it is equal to 1, am i right??? A: Yes, your understanding is correct. na() will check if it is NaN. [1] refers to the previous value of the series. Since it refers to a historical value with [1], for the very first bar of the chart it will return na. Because there is no previous value yet. That's why that check is there for. //@version=5 indicator("My script") trend = 0 plot(trend[1]) A: Stuck in the same problem. trend is declared an int variable. So how come we check its historical value as it can store a single value? var int trend = 0
Understanding a code snippet in pine script
I'm new to pine-script. I'm trying to understand an indicator 'HalfTrend' by Alex Orekhov (everget) at TradingView. I'm having hard time in understanding following snippet, may you please explain this: if not na(trend[1]) and trend[1] != 1 Does the above line mean the following: na(trend[1]) //check if trend[1] exists trend[1] != 1 //if trend[1] exists, check if it is not equal to 1 And not of the whole expression In other words, are we checking if trend[1] exists and if it is equal to 1, am i right???
[ "Yes, your understanding is correct. na() will check if it is NaN. [1] refers to the previous value of the series.\nSince it refers to a historical value with [1], for the very first bar of the chart it will return na. Because there is no previous value yet. That's why that check is there for.\n//@version=5\nindicator(\"My script\")\ntrend = 0\nplot(trend[1])\n\n\n", "Stuck in the same problem. trend is declared an int variable. So how come we check its historical value as it can store a single value?\nvar int trend = 0\n" ]
[ 0, 0 ]
[]
[]
[ "pine_script", "pine_script_v4" ]
stackoverflow_0071231338_pine_script_pine_script_v4.txt
Q: How do I multiply tensors like this? I am working on a project where I need to multiply 2 tensors which look like this. The first tensor has 5 matrices and the second one has 5 column vectors. I need to multiply these two to get the resultant tensor such that each element of that tensor is the column vector I get after multiplying the corresponding matrix by the corresponding column vector. ` tensor([[[8.1776, 0.6560], [0.6560, 2.3653]], [[8.1776, 0.6560], [0.6560, 2.3104]], [[8.9871, 0.6560], [0.6560, 2.2535]], [[1.3231, 0.6560], [0.6560, 2.3331]], [[4.8677, 0.6560], [0.6560, 2.2935]]], grad_fn=<AddBackward0>) tensor([[-0.1836, -0.9153], [-0.1836, -0.8057], [-0.2288, -0.6442], [ 0.1017, -0.8555], [-0.0175, -0.7637]], grad_fn=<AddBackward0>) ` I simple @ or * does not work. What should I do? I need to call backward and hence cannot lose the gradients. I tried other functions like @ or * and looked up some docs like torch.split but none of them really worked. A: It seems like what you are trying to do is best done with torch.einsum which is a function allowing you to perform custom product-summation operations. Say first tensor is named t1 & second tensor is named t2, then to obtain a matrix-vector multiplication, resulting in a 5x5x2 shaped tensor, you should use the following command: torch.einsum('bij,ci->bcj', t1, t2) The first string argument defines the product-summation operation. I suggest you read more about it here (it's the NumPy's equivalent einsum operation but the format is similar): Understanding NumPy's einsum A: You should familiarize yourself with the tensor product if you want to multiply tensors (https://pytorch.org/docs/stable/generated/torch.tensordot.html). I hope I understand correctly of what it is you are trying to achieve here. Let's say your first array is called a with shape (5, 2, 2) and your second array is called b with shape (5, 2). There is a very short solution to your problem using einsum: result = einsum('ijk, ik -> ij', a, b) will give you the desired result. However, to make clear what is happening, I'll provide a lengthier version. To achieve a matrix vector multiplication of a and b, you first need to sum over the last axis of a and over the second axis of b. c = tensordot(a, b, dims = ([-1], [1])) c has shape (5, 2, 5) this is because you took all possible combinations of multiplying each of the (2, 2) matrices of a with the columns of b. This means that: c[0, :, 0] gives the matrix multiplication of a[0] with b[0] c[1, :, 0] gives the matrix multiplication of a[1] with b[0] c[1, :, 1] gives the matrix multiplication of a[1] with b[1] ... However, if you are not interested in the "mixed" terms, only in the diagonal entries, then: result = diagonal(c, offset=0, dim1=0, dim2=2) Just note that the diagonal entries are put to the last dimension, you will still need to transpose the result to get shape (5, 2). I hope this helps!
How do I multiply tensors like this?
I am working on a project where I need to multiply 2 tensors which look like this. The first tensor has 5 matrices and the second one has 5 column vectors. I need to multiply these two to get the resultant tensor such that each element of that tensor is the column vector I get after multiplying the corresponding matrix by the corresponding column vector. ` tensor([[[8.1776, 0.6560], [0.6560, 2.3653]], [[8.1776, 0.6560], [0.6560, 2.3104]], [[8.9871, 0.6560], [0.6560, 2.2535]], [[1.3231, 0.6560], [0.6560, 2.3331]], [[4.8677, 0.6560], [0.6560, 2.2935]]], grad_fn=<AddBackward0>) tensor([[-0.1836, -0.9153], [-0.1836, -0.8057], [-0.2288, -0.6442], [ 0.1017, -0.8555], [-0.0175, -0.7637]], grad_fn=<AddBackward0>) ` I simple @ or * does not work. What should I do? I need to call backward and hence cannot lose the gradients. I tried other functions like @ or * and looked up some docs like torch.split but none of them really worked.
[ "It seems like what you are trying to do is best done with torch.einsum which is a function allowing you to perform custom product-summation operations.\nSay first tensor is named t1 & second tensor is named t2, then to obtain a matrix-vector multiplication, resulting in a 5x5x2 shaped tensor, you should use the following command:\ntorch.einsum('bij,ci->bcj', t1, t2)\nThe first string argument defines the product-summation operation. I suggest you read more about it here (it's the NumPy's equivalent einsum operation but the format is similar):\nUnderstanding NumPy's einsum\n", "You should familiarize yourself with the tensor product if you want to multiply tensors (https://pytorch.org/docs/stable/generated/torch.tensordot.html).\nI hope I understand correctly of what it is you are trying to achieve here.\nLet's say your first array is called a with shape (5, 2, 2) and your second array is called b with shape (5, 2).\nThere is a very short solution to your problem using einsum:\nresult = einsum('ijk, ik -> ij', a, b)\n\nwill give you the desired result.\nHowever, to make clear what is happening, I'll provide a lengthier version.\nTo achieve a matrix vector multiplication of a and b, you first need to sum over the last axis of a and over the second axis of b.\nc = tensordot(a, b, dims = ([-1], [1]))\n\nc has shape (5, 2, 5) this is because you took all possible combinations of multiplying each of the (2, 2) matrices of a with the columns of b.\nThis means that:\n\nc[0, :, 0] gives the matrix multiplication of a[0] with b[0]\nc[1, :, 0] gives the matrix multiplication of a[1] with b[0]\nc[1, :, 1] gives the matrix multiplication of a[1] with b[1]\n...\n\nHowever, if you are not interested in the \"mixed\" terms, only in the diagonal entries, then:\nresult = diagonal(c, offset=0, dim1=0, dim2=2)\n\nJust note that the diagonal entries are put to the last dimension, you will still need to transpose the result to get shape (5, 2).\nI hope this helps!\n" ]
[ 0, 0 ]
[]
[]
[ "autograd", "pytorch" ]
stackoverflow_0074680817_autograd_pytorch.txt
Q: Display a list with corresponding numbers for user to select //PASSWORD Y/N LOOP //Does not want to add password, will list stores websites and corresponding numbers while (yesNo == 'n') { //WEBSITE LIST MENU LOOP //FIXME website num is exponentially increasing everytime no is entered. for (i = 0; i < (websites.getwebList().size()); i++) { System.out.print(websiteNum + " - "); System.out.println(websites.getwebList().get(i)); websiteNum = websiteNum + 1; } //Asks for user input of number of website they want to access System.out.println("Enter the number of the website you want to access:"); userNum = scnr.nextInt(); j = userNum - 1; //j corresponds to index in array since userNum is i + 1 //PRINT WEBSITE LIST while (alwaysTrue == true) { //FIXME always results in cannot access if (userNum == websiteNum) { System.out.println(websites.getwebList().get(j)); System.out.println(usernames.getuserNameList().get(j)); System.out.println(passwords.getPassList().get(j)); break; } else { System.out.println("Cannot access, try again."); break; } } }}} I am currently trying to make a password manager, I am having trouble with the last part and creating the list for user selection. The plan is to have it look something like this 1 - website1.com 2 - website2.com and when the user enters the corresponding number it will print the username and password that goes with the website as well. I am currently trying to make a password manager, I am having trouble with the last part and creating the list for user selection. The plan is to have it look something like this 1 - website1.com 2 - website2.com and when the user enters the corresponding number it will print the username and password that goes with the website as well. currently what is happening is the list will print, but none of the selections can ever be accessed and when it loops they numbers will exponentially increase 1 - website1.com 2 - website2.com 2 - website1.come 4 - website2.com A: when it loops they numbers will exponentially increase You're incrementing websiteNum in the first for loop, but it never resets back to zero. Every time you enter that loop you're increasing it's value. It's not 'exponentially' increasing, but it always increasing without resetting back to zero.
Display a list with corresponding numbers for user to select
//PASSWORD Y/N LOOP //Does not want to add password, will list stores websites and corresponding numbers while (yesNo == 'n') { //WEBSITE LIST MENU LOOP //FIXME website num is exponentially increasing everytime no is entered. for (i = 0; i < (websites.getwebList().size()); i++) { System.out.print(websiteNum + " - "); System.out.println(websites.getwebList().get(i)); websiteNum = websiteNum + 1; } //Asks for user input of number of website they want to access System.out.println("Enter the number of the website you want to access:"); userNum = scnr.nextInt(); j = userNum - 1; //j corresponds to index in array since userNum is i + 1 //PRINT WEBSITE LIST while (alwaysTrue == true) { //FIXME always results in cannot access if (userNum == websiteNum) { System.out.println(websites.getwebList().get(j)); System.out.println(usernames.getuserNameList().get(j)); System.out.println(passwords.getPassList().get(j)); break; } else { System.out.println("Cannot access, try again."); break; } } }}} I am currently trying to make a password manager, I am having trouble with the last part and creating the list for user selection. The plan is to have it look something like this 1 - website1.com 2 - website2.com and when the user enters the corresponding number it will print the username and password that goes with the website as well. I am currently trying to make a password manager, I am having trouble with the last part and creating the list for user selection. The plan is to have it look something like this 1 - website1.com 2 - website2.com and when the user enters the corresponding number it will print the username and password that goes with the website as well. currently what is happening is the list will print, but none of the selections can ever be accessed and when it loops they numbers will exponentially increase 1 - website1.com 2 - website2.com 2 - website1.come 4 - website2.com
[ "\nwhen it loops they numbers will exponentially increase\n\nYou're incrementing websiteNum in the first for loop, but it never resets back to zero. Every time you enter that loop you're increasing it's value. It's not 'exponentially' increasing, but it always increasing without resetting back to zero.\n" ]
[ 0 ]
[]
[]
[ "java" ]
stackoverflow_0074679787_java.txt
Q: Creating a code verifier and challenge for PKCE auth on Spotify API in ReactJS I'm trying to add Spotify auth to my single page react application following the doc from their api. So far this is how I generate the codes based on solutions I found online: const generateVerifier = () => { return crypto.randomBytes(64).toString('hex'); } const getChallenge = verifier => { return crypto.createHash('sha256') .update(verifier) .digest('base64') .replace(/\+/g, '-') .replace(/\//g, '_') .replace(/=/g, '') } An example of a pair of codes I created using that technique: verifier: e8c3745e93a9c25ce5c2653ee36f5b4fa010b4f4df8dfbad7055f4d88551dd960fb5b7602cdfa61088951eac36429862946e86d20b15250a8f0159f1ad001605 challenge: CxF5ZvoXa6Cz6IcX3VyRHxMPRXYbv4PADxko3dwPF-I An example of an old pair of codes I created: verifier: 1jp6ku6-16xxjfi-1uteidc-9gjfso-1mcc0wn-tju0lh-tr2d8k-1auq4zk challenge: SRvuz5GW2HhXzHs6b3O_wzJq4sWN0W2ma96QBx_Z77s I then get a response from the API saying "code_verifier was incorrect." What am I doing wrong here? A: Try following this guide for generating code for generating code challenge and verifier Here are the important parts: Generate Code Verifier // GENERATING CODE VERIFIER function dec2hex(dec) { return ("0" + dec.toString(16)).substr(-2); } function generateCodeVerifier() { var array = new Uint32Array(56 / 2); window.crypto.getRandomValues(array); return Array.from(array, dec2hex).join(""); } Generate code challenge from code verifier function sha256(plain) { // returns promise ArrayBuffer const encoder = new TextEncoder(); const data = encoder.encode(plain); return window.crypto.subtle.digest("SHA-256", data); } function base64urlencode(a) { var str = ""; var bytes = new Uint8Array(a); var len = bytes.byteLength; for (var i = 0; i < len; i++) { str += String.fromCharCode(bytes[i]); } return btoa(str) .replace(/\+/g, "-") .replace(/\//g, "_") .replace(/=+$/, ""); } async function generateCodeChallengeFromVerifier(v) { var hashed = await sha256(v); var base64encoded = base64urlencode(hashed); return base64encoded; } Here's a working example You can also check the validity of the codes here A: I took this snippet from the passport oauth2 library to generate code verifier and code challenge. const code_verifier = base64url(crypto.pseudoRandomBytes(32)); const code_challenge = crypto .createHash("sha256") .update(code_verifier) .digest(); A: Fully working and verified example: const {randomBytes, createHash} = require("node:crypto"); // OR: import {randomBytes, createHash} from "crypto"; function createPKCEPair() { const NUM_OF_BYTES = 22; // Total of 44 characters (1 Bytes = 2 char) (standard states that: 43 chars <= verifier <= 128 chars) const HASH_ALG = "sha256"; const randomVerfier = randomBytes(NUM_OF_BYTES).toString('hex') const hash = createHash(HASH_ALG).update(randomVerfier).digest('base64'); const challange = hash.replace(/\+/g, '-').replace(/\//g, '_').replace(/=+$/, ''); // Clean base64 to make it URL safe return {verifier: randomVerfier, challange} } Run example: createPKCEPair(); // Result: { verifier: '3e2727957a1bd9f47b11ff347fca362b6060941decb4', challange: '1SF5UEwYplIjmAwHUwcitzp9qz8zv98uYflt-tBmwLc' }
Creating a code verifier and challenge for PKCE auth on Spotify API in ReactJS
I'm trying to add Spotify auth to my single page react application following the doc from their api. So far this is how I generate the codes based on solutions I found online: const generateVerifier = () => { return crypto.randomBytes(64).toString('hex'); } const getChallenge = verifier => { return crypto.createHash('sha256') .update(verifier) .digest('base64') .replace(/\+/g, '-') .replace(/\//g, '_') .replace(/=/g, '') } An example of a pair of codes I created using that technique: verifier: e8c3745e93a9c25ce5c2653ee36f5b4fa010b4f4df8dfbad7055f4d88551dd960fb5b7602cdfa61088951eac36429862946e86d20b15250a8f0159f1ad001605 challenge: CxF5ZvoXa6Cz6IcX3VyRHxMPRXYbv4PADxko3dwPF-I An example of an old pair of codes I created: verifier: 1jp6ku6-16xxjfi-1uteidc-9gjfso-1mcc0wn-tju0lh-tr2d8k-1auq4zk challenge: SRvuz5GW2HhXzHs6b3O_wzJq4sWN0W2ma96QBx_Z77s I then get a response from the API saying "code_verifier was incorrect." What am I doing wrong here?
[ "Try following this guide for generating code for generating code challenge and verifier\nHere are the important parts:\nGenerate Code Verifier\n// GENERATING CODE VERIFIER\nfunction dec2hex(dec) {\n return (\"0\" + dec.toString(16)).substr(-2);\n}\n\nfunction generateCodeVerifier() {\n var array = new Uint32Array(56 / 2);\n window.crypto.getRandomValues(array);\n return Array.from(array, dec2hex).join(\"\");\n}\n\nGenerate code challenge from code verifier\nfunction sha256(plain) {\n // returns promise ArrayBuffer\n const encoder = new TextEncoder();\n const data = encoder.encode(plain);\n return window.crypto.subtle.digest(\"SHA-256\", data);\n}\n\nfunction base64urlencode(a) {\n var str = \"\";\n var bytes = new Uint8Array(a);\n var len = bytes.byteLength;\n for (var i = 0; i < len; i++) {\n str += String.fromCharCode(bytes[i]);\n }\n return btoa(str)\n .replace(/\\+/g, \"-\")\n .replace(/\\//g, \"_\")\n .replace(/=+$/, \"\");\n}\n\nasync function generateCodeChallengeFromVerifier(v) {\n var hashed = await sha256(v);\n var base64encoded = base64urlencode(hashed);\n return base64encoded;\n}\n\nHere's a working example\nYou can also check the validity of the codes here\n", "I took this snippet from the passport oauth2 library to generate code verifier and code challenge.\nconst code_verifier = base64url(crypto.pseudoRandomBytes(32));\n\nconst code_challenge = crypto\n .createHash(\"sha256\")\n .update(code_verifier)\n .digest();\n\n", "Fully working and verified example:\nconst {randomBytes, createHash} = require(\"node:crypto\");\n// OR: import {randomBytes, createHash} from \"crypto\";\n\nfunction createPKCEPair() {\n const NUM_OF_BYTES = 22; // Total of 44 characters (1 Bytes = 2 char) (standard states that: 43 chars <= verifier <= 128 chars)\n const HASH_ALG = \"sha256\";\n const randomVerfier = randomBytes(NUM_OF_BYTES).toString('hex')\n const hash = createHash(HASH_ALG).update(randomVerfier).digest('base64');\n const challange = hash.replace(/\\+/g, '-').replace(/\\//g, '_').replace(/=+$/, ''); // Clean base64 to make it URL safe\n return {verifier: randomVerfier, challange}\n}\n\nRun example:\ncreatePKCEPair();\n// Result:\n{\n verifier: '3e2727957a1bd9f47b11ff347fca362b6060941decb4',\n challange: '1SF5UEwYplIjmAwHUwcitzp9qz8zv98uYflt-tBmwLc'\n}\n\n" ]
[ 15, 1, 0 ]
[]
[]
[ "javascript", "oauth", "pkce", "reactjs", "spotify" ]
stackoverflow_0063309409_javascript_oauth_pkce_reactjs_spotify.txt
Q: Search github file contents by filename I am trying to identify use cases of a specific python package on github. Is there a way you could search all requirements.txt files on repositories written in python for a string ? A: From the Web UI In https://github.com/search, type : django filename:requirements.txt language:python in:requirements.txt like this : From Github API https://api.github.com/search/code?q=django+in:requirements.txt+filename:requirements.txt+language:python+org:openmicroscopy For the Github API case, you have to give a user, an organization or a repository Check Search Code doc Note that filename filter & string data are no exact match A: It's 2022 and GitHub search has changed a bit. You can now just use the more flexible path: operator like this: path:/^requirements.txt$/ django Search results here. By default a search term now searches both the filename and the content, so to limit it to just filename/path we use path:. Also we use a regex to specify we want files named exactly requirements.txt and not things like dev-requirements.txt which path:requirements.txt would match. I couldn't find a way to just search for requirements files in Python repos though. You can specify a language with language:python but that applies at a per-file level, not at the repo level and those files aren't Python themselves. Fortunately requirements.txt files seem to almost entirely be used in Python projects. More info on GitHub's powerful new search can be found here.
Search github file contents by filename
I am trying to identify use cases of a specific python package on github. Is there a way you could search all requirements.txt files on repositories written in python for a string ?
[ "From the Web UI\nIn https://github.com/search, type :\ndjango filename:requirements.txt language:python in:requirements.txt\n\nlike this :\n\nFrom Github API\nhttps://api.github.com/search/code?q=django+in:requirements.txt+filename:requirements.txt+language:python+org:openmicroscopy\n\nFor the Github API case, you have to give a user, an organization or a repository\nCheck Search Code doc\nNote that filename filter & string data are no exact match\n", "It's 2022 and GitHub search has changed a bit.\nYou can now just use the more flexible path: operator like this:\npath:/^requirements.txt$/ django\n\nSearch results here. By default a search term now searches both the filename and the content, so to limit it to just filename/path we use path:. Also we use a regex to specify we want files named exactly requirements.txt and not things like dev-requirements.txt which path:requirements.txt would match.\nI couldn't find a way to just search for requirements files in Python repos though. You can specify a language with language:python but that applies at a per-file level, not at the repo level and those files aren't Python themselves. Fortunately requirements.txt files seem to almost entirely be used in Python projects.\nMore info on GitHub's powerful new search can be found here.\n" ]
[ 4, 0 ]
[]
[]
[ "github" ]
stackoverflow_0041858706_github.txt
Q: How/when to use expressions instead of returns in rust I'm doing this advent of code in rust to learn it (started today with the rust book too since the language is becoming more interesting to me) and I'm having some doubts as of how to comply with rust style. Just read in the book that in rust is more idiomatic to use an expression at the end of a function than a return statement, so I've been going through these past days challenges and refactoring them for this but I have some doubts. First the commits where I change it from returns to expressions: https://github.com/nerock/AdventOfCode2022/commit/db9649760b18b92bf56de6586791285522caf2b4 https://github.com/nerock/AdventOfCode2022/commit/b98b68c0fa8c7df0dcdba14eb642400468781084 If you look at day1.rs method get_top_three, I've modified it where I create a variable, and I assign it in an if, else if, else but my initial idea was to not have the else at all and have something like if current > first { (current, first, second); } else if current > second { top_three = (first, current, second); } else if current > third { top_three = (first, second, current); } (first, second, third) would this be possible in some way and maybe better? I've gotten used to avoid having an else expression and just returning the "default" result but maybe this is not the way in rust. Besides this I'm still not sure when to use match in place of if, so if any of you look at my code and has some comments about my uses (or anything else to be honest) it would be greatly appreciated. A: With a lot of missing context and guessing, I assume that your question is as follows. You had the code: fn get_top_three(current: i32, first: i32, second: i32, third: i32) -> (i32, i32, i32) { if current > first { return (current, first, second); } else if current > second { return (first, current, second); } else if current > third { return (first, second, current); } (first, second, third) } And you heard that it's better to not do return, so you refactored it to: fn get_top_three(current: i32, first: i32, second: i32, third: i32) -> (i32, i32, i32) { let top_three: (i32, i32, i32); if current > first { top_three = (current, first, second); } else if current > second { top_three = (first, current, second); } else if current > third { top_three = (first, second, current); } else { top_three = (first, second, third) } top_three } And now your question is, is this better? If yes, why? What could you do differently? The answer is: Yes and no. What you are missing is that everything can be used as a return expression. Including, and very important in this case, if statements. So you can rewrite the entire function like this: fn get_top_three(current: i32, first: i32, second: i32, third: i32) -> (i32, i32, i32) { if current > first { (current, first, second) } else if current > second { (first, current, second) } else if current > third { (first, second, current) } else { (first, second, third) } } And maybe with this in mind, you now realize how powerful those expressions really are :) So why does this work? Everything in Rust has a value. For example: Every block of code in a {} has a value: fn main() { let x = { let y = 10; y }; println!("{}", x); } 10 Every if-else has a value: fn main() { let condition = false; let y = if condition { 5 } else { 15 }; println!("{}", y); } 15 And so on. Basically every block of code that is surrounded by {} has a value, and you can set its value by a value expression in its last line. Just like at the end of a function. And you can assign its value directly to a variable, or forward it to its surrounding code block (like the way I refactored your code, where the value of the if block gets directly forwarded to the function). That is what makes this language feature so powerful.
How/when to use expressions instead of returns in rust
I'm doing this advent of code in rust to learn it (started today with the rust book too since the language is becoming more interesting to me) and I'm having some doubts as of how to comply with rust style. Just read in the book that in rust is more idiomatic to use an expression at the end of a function than a return statement, so I've been going through these past days challenges and refactoring them for this but I have some doubts. First the commits where I change it from returns to expressions: https://github.com/nerock/AdventOfCode2022/commit/db9649760b18b92bf56de6586791285522caf2b4 https://github.com/nerock/AdventOfCode2022/commit/b98b68c0fa8c7df0dcdba14eb642400468781084 If you look at day1.rs method get_top_three, I've modified it where I create a variable, and I assign it in an if, else if, else but my initial idea was to not have the else at all and have something like if current > first { (current, first, second); } else if current > second { top_three = (first, current, second); } else if current > third { top_three = (first, second, current); } (first, second, third) would this be possible in some way and maybe better? I've gotten used to avoid having an else expression and just returning the "default" result but maybe this is not the way in rust. Besides this I'm still not sure when to use match in place of if, so if any of you look at my code and has some comments about my uses (or anything else to be honest) it would be greatly appreciated.
[ "With a lot of missing context and guessing, I assume that your question is as follows.\nYou had the code:\nfn get_top_three(current: i32, first: i32, second: i32, third: i32) -> (i32, i32, i32) {\n if current > first {\n return (current, first, second);\n } else if current > second {\n return (first, current, second);\n } else if current > third {\n return (first, second, current);\n }\n\n (first, second, third)\n}\n\nAnd you heard that it's better to not do return, so you refactored it to:\nfn get_top_three(current: i32, first: i32, second: i32, third: i32) -> (i32, i32, i32) {\n let top_three: (i32, i32, i32);\n\n if current > first {\n top_three = (current, first, second);\n } else if current > second {\n top_three = (first, current, second);\n } else if current > third {\n top_three = (first, second, current);\n } else {\n top_three = (first, second, third)\n }\n\n top_three\n}\n\nAnd now your question is, is this better? If yes, why? What could you do differently?\n\nThe answer is: Yes and no. What you are missing is that everything can be used as a return expression. Including, and very important in this case, if statements.\nSo you can rewrite the entire function like this:\nfn get_top_three(current: i32, first: i32, second: i32, third: i32) -> (i32, i32, i32) {\n if current > first {\n (current, first, second)\n } else if current > second {\n (first, current, second)\n } else if current > third {\n (first, second, current)\n } else {\n (first, second, third)\n }\n}\n\nAnd maybe with this in mind, you now realize how powerful those expressions really are :)\nSo why does this work?\nEverything in Rust has a value. For example:\n\nEvery block of code in a {} has a value:\nfn main() {\n let x = {\n let y = 10;\n y\n };\n println!(\"{}\", x);\n}\n\n10\n\n\nEvery if-else has a value:\nfn main() {\n let condition = false;\n let y = if condition { 5 } else { 15 };\n println!(\"{}\", y);\n}\n\n15\n\n\n\nAnd so on. Basically every block of code that is surrounded by {} has a value, and you can set its value by a value expression in its last line. Just like at the end of a function. And you can assign its value directly to a variable, or forward it to its surrounding code block (like the way I refactored your code, where the value of the if block gets directly forwarded to the function).\nThat is what makes this language feature so powerful.\n" ]
[ 0 ]
[]
[]
[ "expression", "match", "return", "rust" ]
stackoverflow_0074680672_expression_match_return_rust.txt
Q: Firebase - emulators start but attempt to view hosting emulator throws error in log On running firebase emulators:start, everything loads up correctly. I currently have an express web app deployed to firebase using cloud functions combined with firebase hosting, and my code is identical on my local machine. When I attempt to emulate it, I can access the hub but hosting throws the below error on an attempted load: > Error: EBADF: bad file descriptor, uv_pipe_open > at Object._forkChild (node:child_process:175:5) > at setupChildProcessIpcChannel (node:internal/bootstrap/pre_execution:451:30) > at prepareMainThreadExecution (node:internal/bootstrap/pre_execution:71:3) > at node:internal/main/run_main_module:9:1 { > errno: -4083, > code: 'EBADF', > syscall: 'uv_pipe_open' These are the contents of my firebase.json { "hosting": { "rewrites": [ { "source": "**", "function": "app" } ], "public": "static", "ignore": [ "firebase.json", "**/.*", "**/node_modules/**" ] }, "firestore": { "rules": "firestore.rules", "indexes": "firestore.indexes.json" }, "functions": [ { "runtime": "nodejs16", "source": "/functions", "codebase": "default", "ignore": [ "node_modules", ".git", "firebase-debug.log", "firebase-debug.*.log" ] } ], "storage": { "rules": "storage.rules" }, "emulators": { "auth": { "port": 9099 }, "functions": { "port": 5001 }, "firestore": { "port": 8080 }, "hosting": { "port": 5000 }, "storage": { "port": 9199 }, "ui": { "enabled": true }, "singleProjectMode": true } } My file tree is as so: β”œβ”€β”€ firebase.json β”œβ”€β”€ firestore-debug.log β”œβ”€β”€ firestore.indexes.json β”œβ”€β”€ firestore.rules β”œβ”€β”€ functions β”‚ β”œβ”€β”€ firebase-debug.log β”‚ β”œβ”€β”€ firestore-debug.log β”‚ β”œβ”€β”€ index.js β”‚ β”œβ”€β”€ index.pug β”‚ β”œβ”€β”€ package-lock.json β”‚ β”œβ”€β”€ package.json β”‚ └── ui-debug.log β”œβ”€β”€ index.html β”œβ”€β”€ main.js β”œβ”€β”€ main.pug β”œβ”€β”€ package-lock.json β”œβ”€β”€ package.json β”œβ”€β”€ static β”‚ β”œβ”€β”€ android-chrome-192x192.png β”‚ β”œβ”€β”€ apple-touch-icon.png β”‚ β”œβ”€β”€ browserconfig.xml β”‚ β”œβ”€β”€ css β”‚ β”‚ └── styles.css β”‚ β”œβ”€β”€ favicon-16x16.png β”‚ β”œβ”€β”€ favicon-32x32.png β”‚ β”œβ”€β”€ favicon.ico β”‚ β”œβ”€β”€ js β”‚ β”‚ β”œβ”€β”€ firebaseComponents.js β”‚ β”‚ └── getUserState.js β”‚ β”œβ”€β”€ mstile-150x150.png β”‚ β”œβ”€β”€ safari-pinned-tab.svg β”‚ β”œβ”€β”€ site.webmanifest β”‚ β”œβ”€β”€ tapewinder_draft1.png β”‚ β”œβ”€β”€ tapewinder_draft3.png β”‚ β”œβ”€β”€ tapewinder_placeholder.png β”‚ β”œβ”€β”€ tapewinder_placeholder_blue.png β”‚ β”œβ”€β”€ tapewinder_placeholder_green.png β”‚ β”œβ”€β”€ tapewinder_placeholder_orange.png β”‚ β”œβ”€β”€ tapewinder_placeholder_pink.png β”‚ β”œβ”€β”€ tapewinder_placeholder_raisin.png β”‚ β”œβ”€β”€ tapewinder_placeholder_red.png β”‚ └── tapewinder_whitebg.png β”œβ”€β”€ storage.rules └── ui-debug.log I stashed all changes since my last deploy to firebase, messed with my firebase.json and so far no result. A: Fixed the problem! I updated my version of node using npm i node@latest and everything started working again just fine.
Firebase - emulators start but attempt to view hosting emulator throws error in log
On running firebase emulators:start, everything loads up correctly. I currently have an express web app deployed to firebase using cloud functions combined with firebase hosting, and my code is identical on my local machine. When I attempt to emulate it, I can access the hub but hosting throws the below error on an attempted load: > Error: EBADF: bad file descriptor, uv_pipe_open > at Object._forkChild (node:child_process:175:5) > at setupChildProcessIpcChannel (node:internal/bootstrap/pre_execution:451:30) > at prepareMainThreadExecution (node:internal/bootstrap/pre_execution:71:3) > at node:internal/main/run_main_module:9:1 { > errno: -4083, > code: 'EBADF', > syscall: 'uv_pipe_open' These are the contents of my firebase.json { "hosting": { "rewrites": [ { "source": "**", "function": "app" } ], "public": "static", "ignore": [ "firebase.json", "**/.*", "**/node_modules/**" ] }, "firestore": { "rules": "firestore.rules", "indexes": "firestore.indexes.json" }, "functions": [ { "runtime": "nodejs16", "source": "/functions", "codebase": "default", "ignore": [ "node_modules", ".git", "firebase-debug.log", "firebase-debug.*.log" ] } ], "storage": { "rules": "storage.rules" }, "emulators": { "auth": { "port": 9099 }, "functions": { "port": 5001 }, "firestore": { "port": 8080 }, "hosting": { "port": 5000 }, "storage": { "port": 9199 }, "ui": { "enabled": true }, "singleProjectMode": true } } My file tree is as so: β”œβ”€β”€ firebase.json β”œβ”€β”€ firestore-debug.log β”œβ”€β”€ firestore.indexes.json β”œβ”€β”€ firestore.rules β”œβ”€β”€ functions β”‚ β”œβ”€β”€ firebase-debug.log β”‚ β”œβ”€β”€ firestore-debug.log β”‚ β”œβ”€β”€ index.js β”‚ β”œβ”€β”€ index.pug β”‚ β”œβ”€β”€ package-lock.json β”‚ β”œβ”€β”€ package.json β”‚ └── ui-debug.log β”œβ”€β”€ index.html β”œβ”€β”€ main.js β”œβ”€β”€ main.pug β”œβ”€β”€ package-lock.json β”œβ”€β”€ package.json β”œβ”€β”€ static β”‚ β”œβ”€β”€ android-chrome-192x192.png β”‚ β”œβ”€β”€ apple-touch-icon.png β”‚ β”œβ”€β”€ browserconfig.xml β”‚ β”œβ”€β”€ css β”‚ β”‚ └── styles.css β”‚ β”œβ”€β”€ favicon-16x16.png β”‚ β”œβ”€β”€ favicon-32x32.png β”‚ β”œβ”€β”€ favicon.ico β”‚ β”œβ”€β”€ js β”‚ β”‚ β”œβ”€β”€ firebaseComponents.js β”‚ β”‚ └── getUserState.js β”‚ β”œβ”€β”€ mstile-150x150.png β”‚ β”œβ”€β”€ safari-pinned-tab.svg β”‚ β”œβ”€β”€ site.webmanifest β”‚ β”œβ”€β”€ tapewinder_draft1.png β”‚ β”œβ”€β”€ tapewinder_draft3.png β”‚ β”œβ”€β”€ tapewinder_placeholder.png β”‚ β”œβ”€β”€ tapewinder_placeholder_blue.png β”‚ β”œβ”€β”€ tapewinder_placeholder_green.png β”‚ β”œβ”€β”€ tapewinder_placeholder_orange.png β”‚ β”œβ”€β”€ tapewinder_placeholder_pink.png β”‚ β”œβ”€β”€ tapewinder_placeholder_raisin.png β”‚ β”œβ”€β”€ tapewinder_placeholder_red.png β”‚ └── tapewinder_whitebg.png β”œβ”€β”€ storage.rules └── ui-debug.log I stashed all changes since my last deploy to firebase, messed with my firebase.json and so far no result.
[ "Fixed the problem! I updated my version of node using npm i node@latest and everything started working again just fine.\n" ]
[ 0 ]
[]
[]
[ "express", "firebase", "google_cloud_functions", "google_cloud_platform", "node.js" ]
stackoverflow_0074680610_express_firebase_google_cloud_functions_google_cloud_platform_node.js.txt
Q: Deployed on cPanel, how to avoid access to my app.js file by writing it after the link? I deployed my app on cPanel. However, if you type 'mylink/'+app.js you have access to my app.js file and that goes for all other js files in my backend. How can I revoke access to those files on cPanel without revoking access to all .js files? Thanks I tried going on Hotlink Protection and removing access to .js butt hat simply removed access to all javascript files. A: You need to set your app in a different folder than your domain name. Please see this video for steps on how to deploy https://youtu.be/sIcy3q3Ib_s
Deployed on cPanel, how to avoid access to my app.js file by writing it after the link?
I deployed my app on cPanel. However, if you type 'mylink/'+app.js you have access to my app.js file and that goes for all other js files in my backend. How can I revoke access to those files on cPanel without revoking access to all .js files? Thanks I tried going on Hotlink Protection and removing access to .js butt hat simply removed access to all javascript files.
[ "You need to set your app in a different folder than your domain name.\nPlease see this video for steps on how to deploy https://youtu.be/sIcy3q3Ib_s\n" ]
[ 0 ]
[]
[]
[ "cpanel", "express", "node.js" ]
stackoverflow_0074647262_cpanel_express_node.js.txt
Q: Show mouse hover info as annotation in a plotly R boxplot I have several boxplots and I'd like to always show the information of their mouse hover events. I need that because I am generating pdfs of those charts. This is a reproducible example: library(plotly) set.seed(1) plot_ly(y = ~rnorm(500), type = "box", hoverinfo = 'y') %>% layout(yaxis = list(hoverformat = '.2f')) First, I tried to keep the hover info always on (when the mouse is not over the chart), which seems not to be possible see this question. Now I am trying to add the information from the mouse hover as annotations. However, I do not have the information for the annotations upfront because it is generated by the chart (I mean, the quartiles, max, min and mean were generated into the boxplot). Can I get the max, min and quartiles directly from the chart to use as annotation by any chance? The expected result should show the information below on the chart even when the mouse is not over it. A: You can manually calculate the values that Plotly generates for you, then use annotations to add the text. Alternatively, you can use the values Plotly generated for you and use annotations to add the text. The only problem I foresee is that the text could overlap..for example if the lower fence and the minimum were the same value. Here's a method to use the data Plotly generated for you. This method uses htmlwidgets onRender(). I used toPrecision() to round the values to 3 significant digits. library(plotly) library(tidyverse) set.seed(1) fig <- plot_ly(y = ~rnorm(500), type = "box") %>% # you don't need 'hoverinfo' here layout(yaxis = list(hoverformat = '.2f')) fig %>% htmlwidgets::onRender( "function(el, x) { /* call the plot in JS */ hc = el.calcdata[0][0]; /* extract calculated hovertext */ Plotly.newPlot(el.id, [{y: el.data[0].y, type: 'box', hoverinfo: 'skip'}], {yaxis: {zeroline: false}, annotations: [ {x: .3, y: hc.lf, showarrow: false, xanchor: 'left', /* add each label*/ text: 'lower fence: ' + hc.lf.toPrecision(3)}, {x: .3, y: hc.uf, showarrow: false, xanchor: 'left', text: 'upper fence: ' + hc.uf.toPrecision(3)}, {x: .3, y: hc.max, showarrow: false, xanchor: 'left', text: 'max: ' + hc.max.toPrecision(3)}, {x: .3, y: hc.min, showarrow: false, xanchor: 'left', text: 'min: ' + hc.min.toPrecision(3)}, {x: .3, y: hc.med, showarrow: false, xanchor: 'left', text: 'median: ' + hc.med.toPrecision(3)}, {x: .3, y: hc.q1, showarrow: false, xanchor: 'left', text: 'q1: ' + hc.q1.toPrecision(3)}, {x: .3, y: hc.q3, showarrow: false, xanchor: 'left', text: 'q3: ' + hc.q3.toPrecision(3)} ]} /*end annotations*/ ) /* regenerate the plot with annotations and no hovertext*/ }")
Show mouse hover info as annotation in a plotly R boxplot
I have several boxplots and I'd like to always show the information of their mouse hover events. I need that because I am generating pdfs of those charts. This is a reproducible example: library(plotly) set.seed(1) plot_ly(y = ~rnorm(500), type = "box", hoverinfo = 'y') %>% layout(yaxis = list(hoverformat = '.2f')) First, I tried to keep the hover info always on (when the mouse is not over the chart), which seems not to be possible see this question. Now I am trying to add the information from the mouse hover as annotations. However, I do not have the information for the annotations upfront because it is generated by the chart (I mean, the quartiles, max, min and mean were generated into the boxplot). Can I get the max, min and quartiles directly from the chart to use as annotation by any chance? The expected result should show the information below on the chart even when the mouse is not over it.
[ "You can manually calculate the values that Plotly generates for you, then use annotations to add the text. Alternatively, you can use the values Plotly generated for you and use annotations to add the text. The only problem I foresee is that the text could overlap..for example if the lower fence and the minimum were the same value.\nHere's a method to use the data Plotly generated for you. This method uses htmlwidgets onRender(). I used toPrecision() to round the values to 3 significant digits.\nlibrary(plotly)\nlibrary(tidyverse)\nset.seed(1) \nfig <- plot_ly(y = ~rnorm(500), type = \"box\") %>% # you don't need 'hoverinfo' here\n layout(yaxis = list(hoverformat = '.2f'))\n\nfig %>% htmlwidgets::onRender(\n \"function(el, x) { /* call the plot in JS */\n hc = el.calcdata[0][0]; /* extract calculated hovertext */\n Plotly.newPlot(el.id,\n [{y: el.data[0].y, type: 'box', hoverinfo: 'skip'}],\n {yaxis: {zeroline: false}, annotations: [\n {x: .3, y: hc.lf, showarrow: false, xanchor: 'left', /* add each label*/\n text: 'lower fence: ' + hc.lf.toPrecision(3)},\n {x: .3, y: hc.uf, showarrow: false, xanchor: 'left',\n text: 'upper fence: ' + hc.uf.toPrecision(3)},\n {x: .3, y: hc.max, showarrow: false, xanchor: 'left',\n text: 'max: ' + hc.max.toPrecision(3)},\n {x: .3, y: hc.min, showarrow: false, xanchor: 'left',\n text: 'min: ' + hc.min.toPrecision(3)},\n {x: .3, y: hc.med, showarrow: false, xanchor: 'left',\n text: 'median: ' + hc.med.toPrecision(3)},\n {x: .3, y: hc.q1, showarrow: false, xanchor: 'left',\n text: 'q1: ' + hc.q1.toPrecision(3)},\n {x: .3, y: hc.q3, showarrow: false, xanchor: 'left',\n text: 'q3: ' + hc.q3.toPrecision(3)}\n ]} /*end annotations*/\n ) /* regenerate the plot with annotations and no hovertext*/\n }\")\n\n\n" ]
[ 0 ]
[]
[]
[ "boxplot", "plotly", "r" ]
stackoverflow_0074668844_boxplot_plotly_r.txt
Q: Using a counter to find multiple occurences on same day from ID & date I am trying find when a person has multiple occurences on the same day & when they do not. My data looks something like this data have; input id date ; datalines ; 1 nov10 1 nov15 2 nov11 2 nov11 2 nov14 3 nov12 4 nov17 4 nov19 4 nov19 etc...; I want to create a new variable to show when an occurence happens on the same day or not. I want my end rseult to look like data want; input id date occ; 1 nov10 1 1 nov15 1 2 nov11 2 2 nov11 2 2 nov14 1 3 nov12 1 4 nov17 1 4 nov19 2 4 nov19 2 etc...; THis is what I tried but it is not working for each date instead only doing it if the date repeats on the first. Here is my code data want ; set have ; by id date; if first.date then occ = 1; else occ = 2; run; A: Your IF/THEN logic is just a complicated way to do occ = 1 + not first.date; Looks like you want to instead test whether or not there are multiple observations per date. occ = 1 + not (first.date and last.date) ;
Using a counter to find multiple occurences on same day from ID & date
I am trying find when a person has multiple occurences on the same day & when they do not. My data looks something like this data have; input id date ; datalines ; 1 nov10 1 nov15 2 nov11 2 nov11 2 nov14 3 nov12 4 nov17 4 nov19 4 nov19 etc...; I want to create a new variable to show when an occurence happens on the same day or not. I want my end rseult to look like data want; input id date occ; 1 nov10 1 1 nov15 1 2 nov11 2 2 nov11 2 2 nov14 1 3 nov12 1 4 nov17 1 4 nov19 2 4 nov19 2 etc...; THis is what I tried but it is not working for each date instead only doing it if the date repeats on the first. Here is my code data want ; set have ; by id date; if first.date then occ = 1; else occ = 2; run;
[ "Your IF/THEN logic is just a complicated way to do\nocc = 1 + not first.date;\n\nLooks like you want to instead test whether or not there are multiple observations per date.\nocc = 1 + not (first.date and last.date) ;\n\n" ]
[ 0 ]
[]
[]
[ "sas" ]
stackoverflow_0074680760_sas.txt
Q: Why is my program breaking when I add a void function to the main? It works without it I have 2 functions + my main. One of them is a void function that "prints out instructions" The other is the one that actually does what I want it to do. For some reason when the play function is by itself in the main it works just fine, but as soon as I add the print instructions function, it breaks and I cannot figure out why it's doing that. Functions: int playGame(); void printInstructions(); ` int playGame() { int dice[100]; int diceAmount, j, sum = 0; printf("How many dice would you like to roll? "); scanf("%d",&diceAmount); for( int i = 0; i < diceAmount; i++) { dice[j] = rand() % 6 + 1; sum += dice[j]; printf("Dice %d: %d\n",i+1,dice[j]); } printf("---------\nSum: %d", sum); } ` ` void printInstructions() { printf("--------------\n"); printf("- HOW TO WIN -\n"); printf("--------------\n"); printf("Your dice roll must equal 7 or 11 or else you lose.\n"); printf("Want to test your luck?\n\n"); } ` Whole thing: ` #include <stdio.h> #include <stdlib.h> #include <time.h> int playGame(); void printInstructions(); int playGame() { int dice[100]; int diceAmount, j, sum = 0; printf("How many dice would you like to roll? "); scanf("%d",&diceAmount); for( int i = 0; i < diceAmount; i++) { dice[j] = rand() % 6 + 1; sum += dice[j]; printf("Dice %d: %d\n",i+1,dice[j]); } printf("---------\nSum: %d", sum); } int main() { printInstructions(); playGame(); } void printInstructions() { printf("--------------\n"); printf("- HOW TO WIN -\n"); printf("--------------\n"); printf("Your dice roll must equal 7 or 11 or else you lose.\n"); printf("Want to test your luck?\n\n"); } ` Without the printInstructions(); With the printIUnstruction(); Why is it breaking? A: With the suggestions from UnholySheep & Martin James, I was able to get my code to work. Here is the working code: #include <stdio.h> #include <stdlib.h> #include <time.h> int playGame(); void printInstructions() { printf("--------------\n"); printf("- HOW TO WIN -\n"); printf("--------------\n"); printf("Your dice roll must equal 7 or 11 or else you lose.\n"); printf("Want to test your luck?\n\n"); } int playGame() { int dice; int diceAmount, sum = 0; printf("How many dice would you like to roll? "); scanf("%d",&diceAmount); for( int i = 0; i < diceAmount; i++) { dice = rand() % 6 + 1; sum += dice; printf("Dice %d: %d\n",i+1,dice); } printf("---------\nSum: %d\n",sum); } int main() { printInstructions(); playGame(); } Result:
Why is my program breaking when I add a void function to the main? It works without it
I have 2 functions + my main. One of them is a void function that "prints out instructions" The other is the one that actually does what I want it to do. For some reason when the play function is by itself in the main it works just fine, but as soon as I add the print instructions function, it breaks and I cannot figure out why it's doing that. Functions: int playGame(); void printInstructions(); ` int playGame() { int dice[100]; int diceAmount, j, sum = 0; printf("How many dice would you like to roll? "); scanf("%d",&diceAmount); for( int i = 0; i < diceAmount; i++) { dice[j] = rand() % 6 + 1; sum += dice[j]; printf("Dice %d: %d\n",i+1,dice[j]); } printf("---------\nSum: %d", sum); } ` ` void printInstructions() { printf("--------------\n"); printf("- HOW TO WIN -\n"); printf("--------------\n"); printf("Your dice roll must equal 7 or 11 or else you lose.\n"); printf("Want to test your luck?\n\n"); } ` Whole thing: ` #include <stdio.h> #include <stdlib.h> #include <time.h> int playGame(); void printInstructions(); int playGame() { int dice[100]; int diceAmount, j, sum = 0; printf("How many dice would you like to roll? "); scanf("%d",&diceAmount); for( int i = 0; i < diceAmount; i++) { dice[j] = rand() % 6 + 1; sum += dice[j]; printf("Dice %d: %d\n",i+1,dice[j]); } printf("---------\nSum: %d", sum); } int main() { printInstructions(); playGame(); } void printInstructions() { printf("--------------\n"); printf("- HOW TO WIN -\n"); printf("--------------\n"); printf("Your dice roll must equal 7 or 11 or else you lose.\n"); printf("Want to test your luck?\n\n"); } ` Without the printInstructions(); With the printIUnstruction(); Why is it breaking?
[ "With the suggestions from UnholySheep & Martin James, I was able to get my code to work.\nHere is the working code:\n#include <stdio.h>\n#include <stdlib.h>\n#include <time.h>\n\nint playGame();\n\nvoid printInstructions()\n{\n printf(\"--------------\\n\");\n printf(\"- HOW TO WIN -\\n\");\n printf(\"--------------\\n\");\n printf(\"Your dice roll must equal 7 or 11 or else you lose.\\n\");\n printf(\"Want to test your luck?\\n\\n\");\n}\n\nint playGame()\n {\n int dice;\n int diceAmount, sum = 0;\n \n printf(\"How many dice would you like to roll? \");\n scanf(\"%d\",&diceAmount);\n \n for( int i = 0; i < diceAmount; i++)\n {\n dice = rand() % 6 + 1;\n sum += dice;\n printf(\"Dice %d: %d\\n\",i+1,dice);\n } \n printf(\"---------\\nSum: %d\\n\",sum);\n \n }\n\nint main()\n {\n printInstructions();\n playGame();\n \n }\n\nResult:\n\n" ]
[ 1 ]
[]
[]
[ "c", "for_loop", "function", "printf" ]
stackoverflow_0074681061_c_for_loop_function_printf.txt
Q: Get last 12 months based on the most recent dates in the file and not current date How to get last 12 months based on the most recent dates in the file and not current date. enter image description here Most recent date is 2021-05-07 Thank you This the query that I tried to run but there's no result because this query only take in consideration the current date. `SELECT [Contact_Id] ,([Add_Datetime]) ,[Arena_Name] ,[Event_Date] ,[Event_Name] ,[Season_Name] ,[Seat_Num] ,[Num_Seats] ,[Paid_value] FROM [Group CH].[dbo].[CustomerData] Where [Add_Datetime] >= DATEADD(Month,DATEDIFF(Month,0, GETDATE())-12,0)` A: You're looking to compare the date to the latest date in the table, less 12 months: where Add_Datetime > ( select top(1) dateadd(month, -12, Add_Datetime) from [Group CH].dbo.CustomerData order by Add_Datetime desc );
Get last 12 months based on the most recent dates in the file and not current date
How to get last 12 months based on the most recent dates in the file and not current date. enter image description here Most recent date is 2021-05-07 Thank you This the query that I tried to run but there's no result because this query only take in consideration the current date. `SELECT [Contact_Id] ,([Add_Datetime]) ,[Arena_Name] ,[Event_Date] ,[Event_Name] ,[Season_Name] ,[Seat_Num] ,[Num_Seats] ,[Paid_value] FROM [Group CH].[dbo].[CustomerData] Where [Add_Datetime] >= DATEADD(Month,DATEDIFF(Month,0, GETDATE())-12,0)`
[ "You're looking to compare the date to the latest date in the table, less 12 months:\nwhere Add_Datetime > (\n select top(1) dateadd(month, -12, Add_Datetime)\n from [Group CH].dbo.CustomerData\n order by Add_Datetime desc\n);\n\n" ]
[ 0 ]
[]
[]
[ "date", "sql", "sql_server" ]
stackoverflow_0074681194_date_sql_sql_server.txt
Q: Keep only the three highest values in array of array in php I have an array of 83 arrays (an array that I have a chunk in 83). I'm trying to keep only the three highest values of each array. All the numbers in each array are included between -1 and 1. There is necessarily a 1 in each array that I don't want to count in my three highest values. Array ( [0] => Array ( [1] => 0.5278533158407 [2] => 0.4080014506744 [3] => 0.5086879008467 [5] => 0.3950042642736 [6] => 1 [1] => Array ( [1] => 1 [2] => 0.52873390443395 [3] => 0.52518076782133 [4] => 0.52983621494599 [5] => 0.54392829322042 [6] => 0.53636363636364 Etc... I'm trying the below code but it doesn't work. for ($i = 0; $i < sizeof($list_chunk); $i++) { arsort($list_chunk[$i]); } for ($i = 0; $i < sizeof($list_chunk); $i++) { array_slice($list_chunk[$i],1,3,true); } print("<pre>"); print_r($list_chunk); print("</pre>"); Someone could help me? Thanks a lot A: Your code seems to be almost correct. You are sorting the arrays in descending order, but you are not storing the result of array_slice in any variable. As a result, the changes made by array_slice are not being saved. Here is how you can fix the issue: // Sort the arrays in descending order for ($i = 0; $i < sizeof($list_chunk); $i++) { arsort($list_chunk[$i]); } // Keep only the first three elements of each array for ($i = 0; $i < sizeof($list_chunk); $i++) { $list_chunk[$i] = array_slice($list_chunk[$i], 1, 3, true); } print("<pre>"); print_r($list_chunk); print("</pre>"); Here, we are assigning the result of array_slice to the same variable. This will save the changes made by array_slice and you should get the desired output. Alternatively, you can also use the array_splice function to achieve the same result. Here is how you can use it: // Sort the arrays in descending order for ($i = 0; $i < sizeof($list_chunk); $i++) { arsort($list_chunk[$i]); } // Keep only the first three elements of each array for ($i = 0; $i < sizeof($list_chunk); $i++) { array_splice($list_chunk[$i], 3); } print("<pre>"); print_r($list_chunk); print("</pre>"); This will have the same effect as array_slice, but you don't have to specify the start and end indexes explicitly. You just have to specify the number of elements to keep, and array_splice will remove all the elements after that. A: This solution uses a foreach loop with a reference to the subarray. The subarray is sorted in descending order of size. The first to third elements are extracted. If the first element is 1, then 3 elements are extracted from the 2 element onwards. foreach($array as &$arr){ rsort($arr); $start = $arr[0] == 1 ? 1 : 0; $arr = array_slice($arr,$start,3); } Result: array ( 0 => array ( 0 => 0.5278533158407, 1 => 0.5086879008467, 2 => 0.4080014506744, ), 1 => array ( 0 => 0.54392829322042, 1 => 0.53636363636364, 2 => 0.52983621494599, ), ) Full sample to try: https://3v4l.org/pUhic
Keep only the three highest values in array of array in php
I have an array of 83 arrays (an array that I have a chunk in 83). I'm trying to keep only the three highest values of each array. All the numbers in each array are included between -1 and 1. There is necessarily a 1 in each array that I don't want to count in my three highest values. Array ( [0] => Array ( [1] => 0.5278533158407 [2] => 0.4080014506744 [3] => 0.5086879008467 [5] => 0.3950042642736 [6] => 1 [1] => Array ( [1] => 1 [2] => 0.52873390443395 [3] => 0.52518076782133 [4] => 0.52983621494599 [5] => 0.54392829322042 [6] => 0.53636363636364 Etc... I'm trying the below code but it doesn't work. for ($i = 0; $i < sizeof($list_chunk); $i++) { arsort($list_chunk[$i]); } for ($i = 0; $i < sizeof($list_chunk); $i++) { array_slice($list_chunk[$i],1,3,true); } print("<pre>"); print_r($list_chunk); print("</pre>"); Someone could help me? Thanks a lot
[ "Your code seems to be almost correct. You are sorting the arrays in descending order, but you are not storing the result of array_slice in any variable. As a result, the changes made by array_slice are not being saved.\nHere is how you can fix the issue:\n// Sort the arrays in descending order\nfor ($i = 0; $i < sizeof($list_chunk); $i++) {\n arsort($list_chunk[$i]);\n}\n\n// Keep only the first three elements of each array\nfor ($i = 0; $i < sizeof($list_chunk); $i++) {\n $list_chunk[$i] = array_slice($list_chunk[$i], 1, 3, true);\n}\n\nprint(\"<pre>\");\nprint_r($list_chunk);\nprint(\"</pre>\");\n\nHere, we are assigning the result of array_slice to the same variable. This will save the changes made by array_slice and you should get the desired output.\nAlternatively, you can also use the array_splice function to achieve the same result. Here is how you can use it:\n// Sort the arrays in descending order\nfor ($i = 0; $i < sizeof($list_chunk); $i++) {\n arsort($list_chunk[$i]);\n}\n\n// Keep only the first three elements of each array\nfor ($i = 0; $i < sizeof($list_chunk); $i++) {\n array_splice($list_chunk[$i], 3);\n}\n\nprint(\"<pre>\");\nprint_r($list_chunk);\nprint(\"</pre>\");\n\nThis will have the same effect as array_slice, but you don't have to specify the start and end indexes explicitly. You just have to specify the number of elements to keep, and array_splice will remove all the elements after that.\n", "This solution uses a foreach loop with a reference to the subarray. The subarray is sorted in descending order of size. The first to third elements are extracted. If the first element is 1, then 3 elements are extracted from the 2 element onwards.\nforeach($array as &$arr){\n rsort($arr);\n $start = $arr[0] == 1 ? 1 : 0;\n $arr = array_slice($arr,$start,3);\n}\n\nResult:\narray (\n 0 => \n array (\n 0 => 0.5278533158407,\n 1 => 0.5086879008467,\n 2 => 0.4080014506744,\n ),\n 1 => \n array (\n 0 => 0.54392829322042,\n 1 => 0.53636363636364,\n 2 => 0.52983621494599,\n ),\n)\n\nFull sample to try: https://3v4l.org/pUhic\n" ]
[ 0, 0 ]
[]
[]
[ "max", "php", "slice", "sorting" ]
stackoverflow_0074679467_max_php_slice_sorting.txt
Q: jq - remove duplicate from array i want to remove duplicates from array, here is input json { "abc": [ "five" ], "pqr": [ "one", "one", "two", "two", "three", "three", "four", "four" ], "xyz": [ "one", "one", "two", "two", "four" ] } output I am expecting is to remove duplicates from array: { "abc": [ "five" ], "pqr": [ "one", "two", "three", "four" ], "xyz": [ "one", "two", "four" ] } i tried map, uniq, group_by with jq but nothing helped A: unique can remove duplicates, but it automatically sorts the arrays, which may not be what you want. jq '.[] |= unique' { "abc": [ "five" ], "pqr": [ "four", "one", "three", "two" ], "xyz": [ "four", "one", "two" ] } Demo Retrieving the original ordering using index and sort might help out here: jq '.[] |= [.[[index(unique[])] | sort[]]]' { "abc": [ "five" ], "pqr": [ "one", "two", "three", "four" ], "xyz": [ "one", "two", "four" ] } Demo A: Here is a sort-free alternative for obtaining the distinct items in an array (or stream) while retaining the order of first occurrence. It uses a filter that is a tiny bit more complex than it would otherwise be, for the sake of complete genericity: # generate a stream of the distinct items in `stream` # in order of first occurrence, without sorting def uniques(stream): foreach stream as $s ({}; ($s|type) as $t | (if $t == "string" then $s else ($s|tostring) end) as $y | if .[$t][$y] then .emit = false else .emit = true | (.item = $s) | (.[$t][$y] = true) end; if .emit then .item else empty end ); Now it's just a matter of applying this filter to your JSON. One possibility would be: map_values([uniques(.[])])
jq - remove duplicate from array
i want to remove duplicates from array, here is input json { "abc": [ "five" ], "pqr": [ "one", "one", "two", "two", "three", "three", "four", "four" ], "xyz": [ "one", "one", "two", "two", "four" ] } output I am expecting is to remove duplicates from array: { "abc": [ "five" ], "pqr": [ "one", "two", "three", "four" ], "xyz": [ "one", "two", "four" ] } i tried map, uniq, group_by with jq but nothing helped
[ "unique can remove duplicates, but it automatically sorts the arrays, which may not be what you want.\njq '.[] |= unique'\n\n{\n \"abc\": [\n \"five\"\n ],\n \"pqr\": [\n \"four\",\n \"one\",\n \"three\",\n \"two\"\n ],\n \"xyz\": [\n \"four\",\n \"one\",\n \"two\"\n ]\n}\n\nDemo\nRetrieving the original ordering using index and sort might help out here:\njq '.[] |= [.[[index(unique[])] | sort[]]]'\n\n{\n \"abc\": [\n \"five\"\n ],\n \"pqr\": [\n \"one\",\n \"two\",\n \"three\",\n \"four\"\n ],\n \"xyz\": [\n \"one\",\n \"two\",\n \"four\"\n ]\n}\n\nDemo\n", "Here is a sort-free alternative for obtaining the distinct items in an array (or stream) while retaining the order of first occurrence.\nIt uses a filter that is a tiny bit more complex than it would otherwise be, for the sake of complete genericity:\n# generate a stream of the distinct items in `stream`\n# in order of first occurrence, without sorting\ndef uniques(stream):\n foreach stream as $s ({};\n ($s|type) as $t\n | (if $t == \"string\" then $s else ($s|tostring) end) as $y\n | if .[$t][$y] then .emit = false else .emit = true | (.item = $s) | (.[$t][$y] = true) end;\n if .emit then .item else empty end );\n\nNow it's just a matter of applying this filter to your JSON. One possibility would be:\n map_values([uniques(.[])])\n\n" ]
[ 1, 0 ]
[]
[]
[ "jq" ]
stackoverflow_0074678678_jq.txt
Q: How can I update or edit my this feature without the URL being able to do server side request forgery Hi I am currently having trouble with like the URL being able to change other person's data and am in need of help to prevent that for the security aspects in PHP. The idea behind is Only the Head Admin can Delete all these Employee Accounts. I tried using sessions also which kind of works but like I couldn't get like the row[id] of the employee's example email or the employee's email and store it in a session cause it would only be getting the SESSION[email] of the user login and is not the one I'm editing which makes it doesn't work so I am unsure how can I do it in for this idea. Can someone please help me and how I can implement it thank you so much in advance This image shows the problem where if I change the id to 5 I will be able to edit other people's information This is my code for employeestatus.php <?php $query = "SELECT * FROM users"; //Select Statement to View Employee Status $pQuery = $con->prepare($query); $result = $pQuery->execute(); $result = $pQuery->get_result(); if (! $result) { // die("SELECT query failed<br> " . $con->error); } else { // echo "SELECT query successful<br>"; } $nrows = $result->num_rows; // echo "#rows=$nrows<br>"; if ($nrows > 0) { echo "<table align='center'"; echo "<table border=1>"; echo "<tr>"; echo "<th>Name</th>"; echo "<th>Department</th>"; echo "<th>Shift</th>"; echo "<th>Status</th>"; echo "</tr>"; while ($row = $result->fetch_assoc()) { // traverse your result and store each row into an associative array echo "<tr>"; echo "<td>"; echo $row['name']; echo "</td>"; echo "<td>"; echo $row['department']; echo "</td>"; echo "<td>"; echo $row['shift']; echo "</td>"; echo "<td>"; echo $row['status']; echo "</td>"; echo "<td>"; echo "<a href='editemployeestatus.php?Submit=GetUpdate&id=".$row['id'] . "'>Edit</a>"; //Click this and it will move to editemployee echo "</td>"; if ($_SESSION["role"] === "HA" ) { // Delete Function Only Available to Head Admin echo "<td>"; echo "<a href='employeestatus.php?Submit=Delete&id=". $row['id'] . "'> Delete</a>"; echo "</td>"; } } echo "</table>"; } else { echo "0 records<br>"; } This is the page for my editemployeestatus.php <?php if(isset($_POST['Submit'])){ //Updating if (!empty($_POST['department']) && !empty($_POST['shift']) && !empty($_POST['status']) ) { echo "OK: fields are not empty<br>"; } else { echo "Error: No fields should be empty<br>"; } $department=$_POST['department']; $shift=$_POST['shift']; $status=$_POST['status']; $query= $con->prepare("UPDATE users set department=?, shift=?, status=? WHERE id=?"); $query->bind_param('sssi', $department, $shift, $status, $_SESSION['userid']); //bind the parameters if ($query->execute()){ //execute query echo "Query executed."; unset($_SESSION['userid']); header("location: employeestatus.php"); }else{ echo "Error executing query."; } } if(isset($_GET['Submit']) && $_GET['Submit']==="GetUpdate"){ // $email=$_SESSION['email']; //Assigning Variable Email to the Session // // debug(); $userID = $_GET['id']; $query="SELECT name,department,shift,status FROM users where id=?"; $pQuery = $con->prepare($query); $pQuery->bind_param('i', $userID); //bind the parameters $result=$pQuery->execute(); $result=$pQuery->get_result(); if(!$result) { die("SELECT query failed<br> ".$con->error); } else { echo "SELECT query successful<br>"; } $nrows=$result->num_rows; echo "#rows=$nrows<br>"; if ($row=$result->fetch_assoc()) { ?> <b>Update</b><br> <form action="editemployeestatus.php" method="post"> <table> <tr><td>Name:</td><td><?php echo $row['name']?></td></tr> <tr><td>Department: </td><td> <select name="department" id="department"> <?php if ($row['department'] == "FA" ){ ?> <!-- Check if the employee department is factory --> <option value="FA" selected>Factory</option> <option value="WH">Warehouse</option> <option value="CY">Cybersecurity</option> <?php } elseif ($row['department'] == "WH" ){ ?> <!-- Check if the employee department is warehouse --> <option value="FA" >Factory</option> <option value="WH"selected>Warehouse</option> <option value="CY">Cybersecurity</option> <?php } elseif ($row['department'] == "CY" ){ ?> <!-- Check if the employee department is cybersecurity --> <option value="FA" >Factory</option> <option value="WH">Warehouse</option> <option value="CY"selected>Cybersecurity</option> <?php }?> </select> </tr> <tr><td>Shift: </td><td> <select name="shift" id="shift"> <?php if ($row['shift'] == "09:00 - 17:00" ){ ?> <!-- Check if the employee shift is 09:00 - 17:00 --> <option value="09:00 - 17:00" selected>09:00 - 17:00</option> <option value="09:30 - 17:30">09:30 - 17:30</option> <option value="10:00 - 17:00">10:00 - 18:00</option> <?php } elseif ($row['shift'] == "09:30 - 17:30" ){ ?> <!-- Check if the employee shift is 09:30 - 17:30 --> <option value="09:00 - 17:00" >09:00 - 17:00</option> <option value="09:30 - 17:30"selected>09:30 - 17:30</option> <option value="10:00 - 17:00">10:00 - 18:00</option> <?php } elseif ($row['shift'] == "10:00 - 17:00" ){ ?> <!-- Check if the employee shift is 10:00 - 18:00 --> <option value="09:00 - 17:00" >09:00 - 17:00</option> <option value="09:30 - 17:30">09:30 - 17:30</option> <option value="10:00 - 17:00"selected>10:00 - 18:00</option> <?php }?> </select> </tr> <tr><td>Status: </td><td> <select name="status" id="status"> <?php if ($row['status'] == "Working" ){ ?> <!-- Check if the employee status is working --> <option value="Working" selected>Working</option> <option value="Inactive">Inactive</option> <option value="On MC">On MC</option> <option value="On Leave">On Leave</option> <?php } elseif ($row['status'] == "Inactive" ){ ?> <!-- Check if the employee status is inactive --> <option value="Working" >Working</option> <option value="Inactive"selected>Inactive</option> <option value="On MC">On MC</option> <option value="On Leave">On Leave</option> <?php } elseif ($row['status'] == "On MC" ){ ?> <!-- Check if the employee status is on mc --> <option value="Working" >Working</option> <option value="Inactive">Inactive</option> <option value="On MC"selected>On MC</option> <option value="On Leave">On Leave</option> <?php } elseif ($row['department'] == "On Leave" ) { ?> <!-- Check if the employee status is on leave --> <option value="Working" >Working</option> <option value="Inactive">Inactive</option> <option value="On MC">On MC</option> <option value="On Leave"selected>On Leave</option> <?php }?> </select> </tr> <tr><td></td><td> <input type="submit" name="Submit" onclick="<?php $_SESSION['userid']=$userID ?> value="Update"></td></tr> </table> </form>
How can I update or edit my this feature without the URL being able to do server side request forgery
Hi I am currently having trouble with like the URL being able to change other person's data and am in need of help to prevent that for the security aspects in PHP. The idea behind is Only the Head Admin can Delete all these Employee Accounts. I tried using sessions also which kind of works but like I couldn't get like the row[id] of the employee's example email or the employee's email and store it in a session cause it would only be getting the SESSION[email] of the user login and is not the one I'm editing which makes it doesn't work so I am unsure how can I do it in for this idea. Can someone please help me and how I can implement it thank you so much in advance This image shows the problem where if I change the id to 5 I will be able to edit other people's information This is my code for employeestatus.php <?php $query = "SELECT * FROM users"; //Select Statement to View Employee Status $pQuery = $con->prepare($query); $result = $pQuery->execute(); $result = $pQuery->get_result(); if (! $result) { // die("SELECT query failed<br> " . $con->error); } else { // echo "SELECT query successful<br>"; } $nrows = $result->num_rows; // echo "#rows=$nrows<br>"; if ($nrows > 0) { echo "<table align='center'"; echo "<table border=1>"; echo "<tr>"; echo "<th>Name</th>"; echo "<th>Department</th>"; echo "<th>Shift</th>"; echo "<th>Status</th>"; echo "</tr>"; while ($row = $result->fetch_assoc()) { // traverse your result and store each row into an associative array echo "<tr>"; echo "<td>"; echo $row['name']; echo "</td>"; echo "<td>"; echo $row['department']; echo "</td>"; echo "<td>"; echo $row['shift']; echo "</td>"; echo "<td>"; echo $row['status']; echo "</td>"; echo "<td>"; echo "<a href='editemployeestatus.php?Submit=GetUpdate&id=".$row['id'] . "'>Edit</a>"; //Click this and it will move to editemployee echo "</td>"; if ($_SESSION["role"] === "HA" ) { // Delete Function Only Available to Head Admin echo "<td>"; echo "<a href='employeestatus.php?Submit=Delete&id=". $row['id'] . "'> Delete</a>"; echo "</td>"; } } echo "</table>"; } else { echo "0 records<br>"; } This is the page for my editemployeestatus.php <?php if(isset($_POST['Submit'])){ //Updating if (!empty($_POST['department']) && !empty($_POST['shift']) && !empty($_POST['status']) ) { echo "OK: fields are not empty<br>"; } else { echo "Error: No fields should be empty<br>"; } $department=$_POST['department']; $shift=$_POST['shift']; $status=$_POST['status']; $query= $con->prepare("UPDATE users set department=?, shift=?, status=? WHERE id=?"); $query->bind_param('sssi', $department, $shift, $status, $_SESSION['userid']); //bind the parameters if ($query->execute()){ //execute query echo "Query executed."; unset($_SESSION['userid']); header("location: employeestatus.php"); }else{ echo "Error executing query."; } } if(isset($_GET['Submit']) && $_GET['Submit']==="GetUpdate"){ // $email=$_SESSION['email']; //Assigning Variable Email to the Session // // debug(); $userID = $_GET['id']; $query="SELECT name,department,shift,status FROM users where id=?"; $pQuery = $con->prepare($query); $pQuery->bind_param('i', $userID); //bind the parameters $result=$pQuery->execute(); $result=$pQuery->get_result(); if(!$result) { die("SELECT query failed<br> ".$con->error); } else { echo "SELECT query successful<br>"; } $nrows=$result->num_rows; echo "#rows=$nrows<br>"; if ($row=$result->fetch_assoc()) { ?> <b>Update</b><br> <form action="editemployeestatus.php" method="post"> <table> <tr><td>Name:</td><td><?php echo $row['name']?></td></tr> <tr><td>Department: </td><td> <select name="department" id="department"> <?php if ($row['department'] == "FA" ){ ?> <!-- Check if the employee department is factory --> <option value="FA" selected>Factory</option> <option value="WH">Warehouse</option> <option value="CY">Cybersecurity</option> <?php } elseif ($row['department'] == "WH" ){ ?> <!-- Check if the employee department is warehouse --> <option value="FA" >Factory</option> <option value="WH"selected>Warehouse</option> <option value="CY">Cybersecurity</option> <?php } elseif ($row['department'] == "CY" ){ ?> <!-- Check if the employee department is cybersecurity --> <option value="FA" >Factory</option> <option value="WH">Warehouse</option> <option value="CY"selected>Cybersecurity</option> <?php }?> </select> </tr> <tr><td>Shift: </td><td> <select name="shift" id="shift"> <?php if ($row['shift'] == "09:00 - 17:00" ){ ?> <!-- Check if the employee shift is 09:00 - 17:00 --> <option value="09:00 - 17:00" selected>09:00 - 17:00</option> <option value="09:30 - 17:30">09:30 - 17:30</option> <option value="10:00 - 17:00">10:00 - 18:00</option> <?php } elseif ($row['shift'] == "09:30 - 17:30" ){ ?> <!-- Check if the employee shift is 09:30 - 17:30 --> <option value="09:00 - 17:00" >09:00 - 17:00</option> <option value="09:30 - 17:30"selected>09:30 - 17:30</option> <option value="10:00 - 17:00">10:00 - 18:00</option> <?php } elseif ($row['shift'] == "10:00 - 17:00" ){ ?> <!-- Check if the employee shift is 10:00 - 18:00 --> <option value="09:00 - 17:00" >09:00 - 17:00</option> <option value="09:30 - 17:30">09:30 - 17:30</option> <option value="10:00 - 17:00"selected>10:00 - 18:00</option> <?php }?> </select> </tr> <tr><td>Status: </td><td> <select name="status" id="status"> <?php if ($row['status'] == "Working" ){ ?> <!-- Check if the employee status is working --> <option value="Working" selected>Working</option> <option value="Inactive">Inactive</option> <option value="On MC">On MC</option> <option value="On Leave">On Leave</option> <?php } elseif ($row['status'] == "Inactive" ){ ?> <!-- Check if the employee status is inactive --> <option value="Working" >Working</option> <option value="Inactive"selected>Inactive</option> <option value="On MC">On MC</option> <option value="On Leave">On Leave</option> <?php } elseif ($row['status'] == "On MC" ){ ?> <!-- Check if the employee status is on mc --> <option value="Working" >Working</option> <option value="Inactive">Inactive</option> <option value="On MC"selected>On MC</option> <option value="On Leave">On Leave</option> <?php } elseif ($row['department'] == "On Leave" ) { ?> <!-- Check if the employee status is on leave --> <option value="Working" >Working</option> <option value="Inactive">Inactive</option> <option value="On MC">On MC</option> <option value="On Leave"selected>On Leave</option> <?php }?> </select> </tr> <tr><td></td><td> <input type="submit" name="Submit" onclick="<?php $_SESSION['userid']=$userID ?> value="Update"></td></tr> </table> </form>
[]
[]
[ "To prevent unauthorized users from being able to modify other users' information, you can add a check in your code to verify that the user making the request is authorized to do so. In this case, it looks like only the Head Admin should be able to delete employee accounts, so you could add a check in the delete section of your code to verify that the user making the request is the Head Admin.\nFor example, you could add a line of code like this in the delete section of your code:\nif (isAdminLoggedIn()) { delete_function();}\n\nWith a function is admin logged in like:\n function isAdminLoggedIn() \n {\n $stmt = self::$_db->prepare(\"SELECT id FROM adminAccounts WHERE session=:sid\");\n $sid = session_id();\n $stmt->bindParam(\":sid\", $sid);\n $stmt->execute();\n \n if($stmt->rowCount() === 1) {\n return true;\n } else {\n return false; \n }\n }\n\nThis check will verify that the user making the request is the Head Admin, and if not, the delete function will not be executed.\nIt's also a good idea to sanitize user input to prevent SQL injection attacks. You can do this by using prepared statements and parameterized queries, which will automatically escape special characters in user input and help prevent SQL injection attacks.\nI hope this helps! Let me know if you have any other questions.\n" ]
[ -1 ]
[ "html", "php", "session", "sqlite" ]
stackoverflow_0074681222_html_php_session_sqlite.txt
Q: How to reorder a numpy array by giving each element a new index? I want to reorder a numpy array, such that each element is given a new index. # I want my_array's elements to use new_indicies's indexes. my_array = np.array([23, 54, 67, 98, 31]) new_indicies = [2, 4, 1, 0, 1] # Some magic using new_indicies at my_array # Note that I earlier gave 67 and 31 the index 1 and since 31 is last, that is the one i'm keeping. >>> [98, 31, 23, 0, 54] What would be an efficient approach to this problem? A: To reorder the elements in a NumPy array according to a set of new indices, you can use the put() method. # Create an empty array of zeros with the same size as my_array reordered_array = np.zeros_like(my_array) # Move the elements in my_array to the indices specified in new_indices reordered_array.put(new_indices, my_array) print(reordered_array) # [98, 31, 23, 0, 54]
How to reorder a numpy array by giving each element a new index?
I want to reorder a numpy array, such that each element is given a new index. # I want my_array's elements to use new_indicies's indexes. my_array = np.array([23, 54, 67, 98, 31]) new_indicies = [2, 4, 1, 0, 1] # Some magic using new_indicies at my_array # Note that I earlier gave 67 and 31 the index 1 and since 31 is last, that is the one i'm keeping. >>> [98, 31, 23, 0, 54] What would be an efficient approach to this problem?
[ "To reorder the elements in a NumPy array according to a set of new indices, you can use the put() method.\n# Create an empty array of zeros with the same size as my_array\nreordered_array = np.zeros_like(my_array)\n\n# Move the elements in my_array to the indices specified in new_indices\nreordered_array.put(new_indices, my_array)\n\nprint(reordered_array) # [98, 31, 23, 0, 54]\n\n" ]
[ 1 ]
[]
[]
[ "arrays", "numpy", "python" ]
stackoverflow_0074681288_arrays_numpy_python.txt
Q: Trying to find all last names starting with a letter I'm working a project for school in T-SQL. I have an Advisors table that is fully set up. I'm trying to update the Student table so that each StudentID is associated with an AdvisorID (referencing the Advisors table). The Student table is fully set up, minus the AdvisorID column. Both tables have Name_Full, Name_First, and Name_Last for every Advisor and Student in the respective tables. I'm trying to find all students that have a Name_Last starting with 'R'. I know for a fact that there is at least one student that qualifies since there is a student with a Name_Last = 'Ramos'. I tried searching for every student with a Name_Last starting with the letter 'R' using the following code. SELECT Name_Last FROM Student WHERE Name_Last IN ('R%') This query returns nothing. I've tried using '=' and 'LIKE' instead of 'IN' and those did not work either. I've tried using 'CONTAINS' which also didn't work. I tried: WHERE CHARINDEX('R', Name_Last) = 1 This did not work either. Once I get this working, I'd like to be able to copy it into a WHERE clause using BETWEEN, as I want to assign an AdvisorID to students within certain ranges. A: So I figured out what was wrong. The code was working perfectly fine. BUT every Name_Last in Student started with a ' '. So altering the code to: SELECT Name_Last FROM Student where Name_Last like ' R%' Worked perfectly. Thank you for your suggestions though! A: It is better to trim (remove spaces) first, then take the first letter SELECT Name_Last FROM Student WHERE LEFT(LTRIM(Name_Last),1) = 'R' ;
Trying to find all last names starting with a letter
I'm working a project for school in T-SQL. I have an Advisors table that is fully set up. I'm trying to update the Student table so that each StudentID is associated with an AdvisorID (referencing the Advisors table). The Student table is fully set up, minus the AdvisorID column. Both tables have Name_Full, Name_First, and Name_Last for every Advisor and Student in the respective tables. I'm trying to find all students that have a Name_Last starting with 'R'. I know for a fact that there is at least one student that qualifies since there is a student with a Name_Last = 'Ramos'. I tried searching for every student with a Name_Last starting with the letter 'R' using the following code. SELECT Name_Last FROM Student WHERE Name_Last IN ('R%') This query returns nothing. I've tried using '=' and 'LIKE' instead of 'IN' and those did not work either. I've tried using 'CONTAINS' which also didn't work. I tried: WHERE CHARINDEX('R', Name_Last) = 1 This did not work either. Once I get this working, I'd like to be able to copy it into a WHERE clause using BETWEEN, as I want to assign an AdvisorID to students within certain ranges.
[ "So I figured out what was wrong.\nThe code was working perfectly fine. BUT every Name_Last in Student started with a ' '.\nSo altering the code to:\nSELECT Name_Last\n FROM Student\n where Name_Last like ' R%'\n\nWorked perfectly. Thank you for your suggestions though!\n", "It is better to trim (remove spaces) first, then take the first letter\nSELECT Name_Last\nFROM Student\nWHERE LEFT(LTRIM(Name_Last),1) = 'R'\n;\n\n" ]
[ 1, 0 ]
[ "The missing part in your statement is the name which is the first word of a full name.\nFinds a string that starts with 'R'\nSELECT Name_Last\n FROM Student\n WHERE Name_Last IN ('R%')\n\nFinds a string that includes ' R'\nSELECT Name_Last\n FROM Student\n WHERE Name_Last LIKE '% R%'\n\nHope this helps to your project.\nGood luck!\n" ]
[ -1 ]
[ "ssms", "string", "tsql" ]
stackoverflow_0074669469_ssms_string_tsql.txt
Q: Chrome Ignoring Cache-Control no-store directive? I have a web-app I'm allowing users to add scripts to. These scripts are written in JavaScript and run in the user's browser. When developing new scripts to run locally, I've added a button to the app that allows you to load it from a locally running web server, (i.e. so you'd click on it and enter http://path.to.my.pc:12345/script.js). My app will fetch this script and append to the DOM. These scripts are assumed to be ES6 modules and Chrome happily handles those, recursively importing correctly. However when running locally, I also wanted the ability for users to "refresh" as they're developing such that the app will hit their server again to redownload the scripts. Chrome does not seem to want to do this. Specifically, despite the fact that my local test server has specified no-store as Cache-Control, Chrome doesn't care. Even if I cacheBust script.js (i.e.http://blah/script.js?cb=randomInt), this cacheBust parameter is not recursively passed to the imports. Here's the text of my locally running dev server: const express = require("express"); const serveStatic = require("serve-static"); const morgan = require("morgan"); function setHeaders(res, path) { res.setHeader('Cache-Control', 'no-store'); res.setHeader('Access-Control-Allow-Origin', '*'); res.setHeader('Access-Control-Allow-Methods', '*'); } const app = express(); app.use(morgan('combined')); app.use(serveStatic('./', { setHeaders }); app.listen(12345); Is there something else I can do? I really don't want to force my users to run webpack. The idea is to keep this as simple and stupid as possible so they can just focus on writing their scripts. Edit Update: Checking 'Disable Caching' in Devtools also does not cause Chrome to actually... not cache. A: In order to prevent the browser from caching the scripts that are loaded from your local web server, you can try setting the Expires header to a date in the past. This will tell the browser that the resource has already expired and should not be cached. Here is an example of how you can do this using the setHeader method of the response object in Express: function setHeaders(res, path) { res.setHeader('Cache-Control', 'no-store'); res.setHeader('Access-Control-Allow-Origin', '*'); res.setHeader('Access-Control-Allow-Methods', '*'); // Set the Expires header to a date in the past. res.setHeader('Expires', 'Mon, 26 Jul 1997 05:00:00 GMT'); } By setting the Expires header in this way, the browser will not cache the scripts and will always fetch them from your local web server. A: It sounds like you want to prevent the scripts from being cached in the user's browser when they are loaded from your local development server. The no-store directive in the Cache-Control header is intended to prevent caching, but it seems that Chrome is ignoring this directive in your case. One thing you can try is adding an Expires header to the response from your server with a date in the past. This should instruct the browser not to cache the response. You can do this by adding the following line to your setHeaders function: res.setHeader('Expires', 'Sat, 01 Jan 2000 00:00:00 GMT'); Additionally, you can try adding a Vary header to the response, with the value Cache-Control. This tells the browser that the response may vary depending on the Cache-Control header, and so it should not cache the response unless the Cache-Control header allows it. You can add this header with the following line of code: res.setHeader('Vary', 'Cache-Control'); It's also worth noting that the no-store directive is intended to prevent caching by any cache (e.g. a proxy cache), not just the browser's cache. So it's possible that your local development server is running behind a proxy cache that is ignoring the no-store directive. In that case, you may need to configure the proxy cache to respect the no-store directive. A: Some ideas : Have you prior to everything, do "developer tools > application > clear storage" to avoid using previous server instance cached ? Have you tried "Cache-Control:no-cache, no-store" ? Have you tried on non Chrome browser ? Have you cleared all in client-side browser (ctr+alt+suppr) or hard reload (ctrl+shift+r) ? I've the same need and in my node server i use const nocache = require('nocache'); app.use(nocache()); app.set('etag', false); And in client side, my query use only 'Cache-control', 'no-store'
Chrome Ignoring Cache-Control no-store directive?
I have a web-app I'm allowing users to add scripts to. These scripts are written in JavaScript and run in the user's browser. When developing new scripts to run locally, I've added a button to the app that allows you to load it from a locally running web server, (i.e. so you'd click on it and enter http://path.to.my.pc:12345/script.js). My app will fetch this script and append to the DOM. These scripts are assumed to be ES6 modules and Chrome happily handles those, recursively importing correctly. However when running locally, I also wanted the ability for users to "refresh" as they're developing such that the app will hit their server again to redownload the scripts. Chrome does not seem to want to do this. Specifically, despite the fact that my local test server has specified no-store as Cache-Control, Chrome doesn't care. Even if I cacheBust script.js (i.e.http://blah/script.js?cb=randomInt), this cacheBust parameter is not recursively passed to the imports. Here's the text of my locally running dev server: const express = require("express"); const serveStatic = require("serve-static"); const morgan = require("morgan"); function setHeaders(res, path) { res.setHeader('Cache-Control', 'no-store'); res.setHeader('Access-Control-Allow-Origin', '*'); res.setHeader('Access-Control-Allow-Methods', '*'); } const app = express(); app.use(morgan('combined')); app.use(serveStatic('./', { setHeaders }); app.listen(12345); Is there something else I can do? I really don't want to force my users to run webpack. The idea is to keep this as simple and stupid as possible so they can just focus on writing their scripts. Edit Update: Checking 'Disable Caching' in Devtools also does not cause Chrome to actually... not cache.
[ "In order to prevent the browser from caching the scripts that are loaded from your local web server, you can try setting the Expires header to a date in the past. This will tell the browser that the resource has already expired and should not be cached.\nHere is an example of how you can do this using the setHeader method of the response object in Express:\nfunction setHeaders(res, path) {\n res.setHeader('Cache-Control', 'no-store');\n res.setHeader('Access-Control-Allow-Origin', '*');\n res.setHeader('Access-Control-Allow-Methods', '*');\n\n // Set the Expires header to a date in the past.\n res.setHeader('Expires', 'Mon, 26 Jul 1997 05:00:00 GMT');\n}\n\nBy setting the Expires header in this way, the browser will not cache the scripts and will always fetch them from your local web server.\n", "It sounds like you want to prevent the scripts from being cached in the user's browser when they are loaded from your local development server. The no-store directive in the Cache-Control header is intended to prevent caching, but it seems that Chrome is ignoring this directive in your case.\nOne thing you can try is adding an Expires header to the response from your server with a date in the past. This should instruct the browser not to cache the response. You can do this by adding the following line to your setHeaders function:\nres.setHeader('Expires', 'Sat, 01 Jan 2000 00:00:00 GMT');\n\nAdditionally, you can try adding a Vary header to the response, with the value Cache-Control. This tells the browser that the response may vary depending on the Cache-Control header, and so it should not cache the response unless the Cache-Control header allows it. You can add this header with the following line of code:\nres.setHeader('Vary', 'Cache-Control');\n\nIt's also worth noting that the no-store directive is intended to prevent caching by any cache (e.g. a proxy cache), not just the browser's cache. So it's possible that your local development server is running behind a proxy cache that is ignoring the no-store directive. In that case, you may need to configure the proxy cache to respect the no-store directive.\n", "Some ideas :\n\nHave you prior to everything, do \"developer tools > application >\nclear storage\" to avoid using previous server instance cached ?\nHave you tried \"Cache-Control:no-cache, no-store\" ?\nHave you tried on non Chrome browser ?\nHave you cleared all in client-side browser (ctr+alt+suppr) or hard\nreload (ctrl+shift+r) ?\n\nI've the same need and in my node server i use\nconst nocache = require('nocache');\napp.use(nocache());\napp.set('etag', false);\n\nAnd in client side, my query use only\n'Cache-control', 'no-store'\n\n" ]
[ 0, 0, 0 ]
[]
[]
[ "express", "google_chrome", "javascript" ]
stackoverflow_0074657168_express_google_chrome_javascript.txt
Q: nested routes are not working when using react router v6.4 i'm trying to implement route nesting and here is my App component export default function App() { return ( <BrowserRouter> <Routes> <Route path="signup" element={<Signup />}> <Route path="avatar" element={<Avatar />} /> </Route> </Routes> </BrowserRouter> );} whenever i try to access /signup/avatar it shows the content of the /signup route
nested routes are not working when using react router v6.4
i'm trying to implement route nesting and here is my App component export default function App() { return ( <BrowserRouter> <Routes> <Route path="signup" element={<Signup />}> <Route path="avatar" element={<Avatar />} /> </Route> </Routes> </BrowserRouter> );} whenever i try to access /signup/avatar it shows the content of the /signup route
[]
[]
[ "Do you have an <Outlet /> component inside the <Signup /> component?\nYour avatar component should be displayed there.\nhttps://www.robinwieruch.de/react-router-nested-routes/\n" ]
[ -1 ]
[ "react_router", "react_router_dom", "reactjs" ]
stackoverflow_0074681204_react_router_react_router_dom_reactjs.txt
Q: Plotting Agglomerative Hierarchical Clustering with complete linkage I need to do a visual rappresentation of Hierarchical clustering using Complete Linkage by plotting an dendogram. My data.frame is obtained from eurostat database (CP00 - HICP) and after some cleaning looks like: dput(head(CP00)) structure(list(id = c("CP00", "CP00", "CP00", "CP00", "CP00", "CP00"), country = c("Austria", "Austria", "Austria", "Austria", "Austria", "Austria"), time = structure(c(10988, 11017, 11048, 11078, 11109, 11139), class = "Date"), values = c(1.9, 1.9, 1.8, 1.6, 2.4, 1.9)), row.names = c(NA, -6L), class = c("tbl_df", "tbl", "data.frame")) With 7344 observation. Firstly, I computed the Dissimilarity matrix with and then the hierarchical clustering using complete linkage: # Dissimilarity matrix CP00_clst <- dist(CP00, method = "minkowski", p = 1.5) # Hierarchical clustering using Complete Linkage CP00_clst <- hclust(CP00_clst, method = "complete") Finally, simply plotting with a title: # Plot the obtained dendrogram plot(CP00_clst, main = "Clusterin Countries based on HICP") However, the result is what I need to have, such as a clear dendrogram. In addition, I need to divide the dendogram in 4 cluster. This is my results: My Result This is the outcome that I need: Outcome needed I am new to R and probably there is something wrong in the dissimilarity matrix. Thank you for your help! A: Do you want to plot 7344 entities into the dendrogram or only several countries? If several countries: for dist function, you have CP00 in a long format (1 value per line per object), however, the dist function needs a wide format (1 object per row, multiple properties as columns; see https://www.statology.org/long-vs-wide-data/). The rectangles can be added simply by the rect.hclust function from stats package: https://rdrr.io/r/stats/rect.hclust.html
Plotting Agglomerative Hierarchical Clustering with complete linkage
I need to do a visual rappresentation of Hierarchical clustering using Complete Linkage by plotting an dendogram. My data.frame is obtained from eurostat database (CP00 - HICP) and after some cleaning looks like: dput(head(CP00)) structure(list(id = c("CP00", "CP00", "CP00", "CP00", "CP00", "CP00"), country = c("Austria", "Austria", "Austria", "Austria", "Austria", "Austria"), time = structure(c(10988, 11017, 11048, 11078, 11109, 11139), class = "Date"), values = c(1.9, 1.9, 1.8, 1.6, 2.4, 1.9)), row.names = c(NA, -6L), class = c("tbl_df", "tbl", "data.frame")) With 7344 observation. Firstly, I computed the Dissimilarity matrix with and then the hierarchical clustering using complete linkage: # Dissimilarity matrix CP00_clst <- dist(CP00, method = "minkowski", p = 1.5) # Hierarchical clustering using Complete Linkage CP00_clst <- hclust(CP00_clst, method = "complete") Finally, simply plotting with a title: # Plot the obtained dendrogram plot(CP00_clst, main = "Clusterin Countries based on HICP") However, the result is what I need to have, such as a clear dendrogram. In addition, I need to divide the dendogram in 4 cluster. This is my results: My Result This is the outcome that I need: Outcome needed I am new to R and probably there is something wrong in the dissimilarity matrix. Thank you for your help!
[ "Do you want to plot 7344 entities into the dendrogram or only several countries?\nIf several countries: for dist function, you have CP00 in a long format (1 value per line per object), however, the dist function needs a wide format (1 object per row, multiple properties as columns; see https://www.statology.org/long-vs-wide-data/).\nThe rectangles can be added simply by the rect.hclust function from stats package:\nhttps://rdrr.io/r/stats/rect.hclust.html\n" ]
[ 0 ]
[]
[]
[ "hierarchical_clustering", "r" ]
stackoverflow_0074675701_hierarchical_clustering_r.txt
Q: require working but import not working I have a actions.js file that is exporting actions like this export var toggleTodo = (id) => { return { type: 'TOGGLE_TODO', id } } but when i import it using es6 import i get error Uncaught TypeError: Cannot read property 'toggleTodo' of undefined but when i require it using common js require it works just fine! Can someone explain to me why is this happening, I mean i read these two are same things... Something seem to be different ? // var actions = require('actions') working // dispatch(actions.toggleTodo(id)); import actions from 'actions' //not working dispatch(actions.toggleTodo(id)); A: There are several different forms of import, each doing slightly different thing. The one you are using import actions from 'actions' //not working is for importing default export from actions module. You can see complete list in MDN javascript reference. It's not working because your action.js module probably does not have default export, and actions come as undefined. The form that roughly corresponds to require call is this one: import * as actions from 'actions'; it allows you to access all exported values as properties of actions: dispatch(actions.toggleTodo(id)); or you can use named import like this: import {toggleTodo} from 'actions'; then you can use toggleTodo directly: dispatch(toggleTodo(id)); A: this was incredibly helpful to me in 2022. honestly someone posted a super pretentious comment about how this post is "tagged" wrong. if the confused individual who wrote this, knew what the right tag was- then they probably could google the real solution normally and not post it on stack overflow where confused rif raff like me show up for these exact nuggets. i think that person is looking for the manual none of us can find
require working but import not working
I have a actions.js file that is exporting actions like this export var toggleTodo = (id) => { return { type: 'TOGGLE_TODO', id } } but when i import it using es6 import i get error Uncaught TypeError: Cannot read property 'toggleTodo' of undefined but when i require it using common js require it works just fine! Can someone explain to me why is this happening, I mean i read these two are same things... Something seem to be different ? // var actions = require('actions') working // dispatch(actions.toggleTodo(id)); import actions from 'actions' //not working dispatch(actions.toggleTodo(id));
[ "There are several different forms of import, each doing slightly different thing. The one you are using\nimport actions from 'actions' //not working\n\nis for importing default export from actions module. You can see complete list in MDN javascript reference.\nIt's not working because your action.js module probably does not have default export, and actions come as undefined.\nThe form that roughly corresponds to require call is this one:\nimport * as actions from 'actions';\n\nit allows you to access all exported values as properties of actions:\ndispatch(actions.toggleTodo(id));\n\nor you can use named import like this:\nimport {toggleTodo} from 'actions';\n\nthen you can use toggleTodo directly:\ndispatch(toggleTodo(id));\n\n", "this was incredibly helpful to me in 2022. honestly someone posted a super pretentious comment about how this post is \"tagged\" wrong. if the confused individual who wrote this, knew what the right tag was- then they probably could google the real solution normally and not post it on stack overflow where confused rif raff like me show up for these exact nuggets. i think that person is looking for the manual none of us can find\n" ]
[ 12, 0 ]
[]
[]
[ "es6_class", "es6_module_loader", "es6_modules" ]
stackoverflow_0040762352_es6_class_es6_module_loader_es6_modules.txt
Q: add random integer as clusterProperty in maplibre/mapbox gl-js i want to add a random number as clusterProperty in maplibre/mapbox. So far I have only found methods that use existing cluster properties to create new cluster properties by subtracting, adding, multiplying... the old values. I just want to add a random number, let's say between 1 and 25, as a property for every cluster. Is there a way to achive this? Maplibre Docs: https://maplibre.org/maplibre-gl-js-docs/style-spec/sources/#geojson-clusterProperties I tried using some expression like adding random generated numbers, but i always stumbled into the same problem: Bigger clusters also included the random generated numbers from their children, which led to the number being bigger than 25. I need a way, to add a clusterPorperty, that is somehow independent from other cluster. Thanks in advance :) A: It sounds like you're looking for a way to add a property to each cluster in a Mapbox map that is generated using a random number between 1 and 25. I'm not familiar with Mapbox, but it looks like the clusterProperties option of the GeoJSONSource object may be what you're looking for. You could try using this option to set the property you want to add to a random number between 1 and 25 for each cluster. Here's an example of how you might do this: map.addSource('clusters', { type: 'geojson', data: 'https://my-data-source.com/data.json', cluster: true, clusterProperties: { // Set the property you want to add to a random number between 1 and 25 for each cluster. myProp: Math.floor(Math.random() * 25) + 1 } }); This will add a myProp property to each cluster, with a value between 1 and 25. I'm not sure if this will solve your problem, but it's worth a try.
add random integer as clusterProperty in maplibre/mapbox gl-js
i want to add a random number as clusterProperty in maplibre/mapbox. So far I have only found methods that use existing cluster properties to create new cluster properties by subtracting, adding, multiplying... the old values. I just want to add a random number, let's say between 1 and 25, as a property for every cluster. Is there a way to achive this? Maplibre Docs: https://maplibre.org/maplibre-gl-js-docs/style-spec/sources/#geojson-clusterProperties I tried using some expression like adding random generated numbers, but i always stumbled into the same problem: Bigger clusters also included the random generated numbers from their children, which led to the number being bigger than 25. I need a way, to add a clusterPorperty, that is somehow independent from other cluster. Thanks in advance :)
[ "It sounds like you're looking for a way to add a property to each cluster in a Mapbox map that is generated using a random number between 1 and 25. I'm not familiar with Mapbox, but it looks like the clusterProperties option of the GeoJSONSource object may be what you're looking for. You could try using this option to set the property you want to add to a random number between 1 and 25 for each cluster.\nHere's an example of how you might do this:\n map.addSource('clusters', {\n type: 'geojson',\n data: 'https://my-data-source.com/data.json',\n cluster: true,\n clusterProperties: {\n // Set the property you want to add to a random number between 1 and 25 for each cluster.\n myProp: Math.floor(Math.random() * 25) + 1\n }\n});\n\nThis will add a myProp property to each cluster, with a value between 1 and 25. I'm not sure if this will solve your problem, but it's worth a try.\n" ]
[ 0 ]
[]
[]
[ "mapbox", "mapbox_gl_js", "maplibre_gl" ]
stackoverflow_0074681255_mapbox_mapbox_gl_js_maplibre_gl.txt
Q: How to know the exact position of a camera viewbox in Qt? I am working with OpenGL in python and trying to attach 2d images to a canvas (the images will change according to a certain frequence). I managed to achieve that but to continue my task i need two things: the major problem: I need to get the image position (or bounds), sorry if i don't have the correct term, i am new to this. basically i just need to have some kind of positions to know where my picture is in the canvas. i tried to look into the methods and attributes of self.view.camera I could not find anything to help. one minor problem: i can move the image with the mouse along the canvas and i zoom it. i wonder if it is possible to only allow the zoom but not allow the right/left move [this is resolved in the comments section] here is my code: import sys from PySide2 import QtWidgets, QtCore from vispy import scene from PySide2.QtCore import QMetaObject from PySide2.QtWidgets import * import numpy as np import dog import time import imageio as iio class CameraThread(QtCore.QThread): new_image = QtCore.Signal(object) def __init__(self, parent=None): QtCore.QThread.__init__(self, parent) def run(self): try: while True: frame = iio.imread(dog.getDog(filename='randog')) self.new_image.emit(frame.data) time.sleep(10.0) finally: print('end!') class Ui_MainWindow(object): def setupUi(self, MainWindow): if not MainWindow.objectName(): MainWindow.setObjectName("MainWindow") MainWindow.resize(800, 400) self.centralwidget = QWidget(MainWindow) self.centralwidget.setObjectName("centralwidget") self.gridLayout = QGridLayout(self.centralwidget) self.gridLayout.setObjectName("gridLayout") self.groupBox = QGroupBox(self.centralwidget) self.groupBox.setObjectName("groupBox") self.gridLayout.addWidget(self.groupBox, 0, 0, 1, 1) MainWindow.setCentralWidget(self.centralwidget) QMetaObject.connectSlotsByName(MainWindow) class MainWindow(QtWidgets.QMainWindow): def __init__(self): super(MainWindow, self).__init__() self.ui = Ui_MainWindow() self.ui.setupUi(self) # OpenGL drawing surface self.canvas = scene.SceneCanvas(keys='interactive') self.canvas.create_native() self.canvas.native.setParent(self) self.setWindowTitle('MyApp') self.view = self.canvas.central_widget.add_view() self.view.bgcolor = '#ffffff' # set the canvas to a white background self.image = scene.visuals.Image(np.zeros((1, 1)), interpolation='nearest', parent= self.view.scene, cmap='grays', clim=(0, 2 ** 8 - 1)) self.view.camera = scene.PanZoomCamera(aspect=1) self.view.camera.flip = (0, 1, 0) self.view.camera.set_range() self.view.camera.zoom(1000, (0, 0)) self._camera_runner = CameraThread(parent=self) self._camera_runner.new_image.connect(self.new_image, type=QtCore.Qt.BlockingQueuedConnection) self._camera_runner.start() @QtCore.Slot(object) def new_image(self, img): try: self.image.set_data(img) self.image.update() except Exception as e: print(f"problem sending image: {e}") def main(): import ctypes ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID('my_gui') app = QtWidgets.QApplication([]) main_window = MainWindow() main_window.show() sys.exit(app.exec_()) if __name__ == '__main__': main() A: Do you want to know the coordinates of the picture in the viewport (the window), or do you want the coordinates of the picture on the canvas? Vispy actually puts the image at (0,0) by default inside the Vispy canvas. When you move around the canvas you actually aren't moving the canvas around, you are just moving the camera which is looking at the canvas so the coordinates of the picture stay at (0,0) regardless if you move around the viewport or the camera or not. Also the coordinates of the Vispy canvas correspond one to one with the pixel length and width of your image. One pixel is one unit in Vispy. You can check this by adding this method to your MainWindow class: def my_handler(self,event): transform = self.image.transforms.get_transform(map_to="canvas") img_x, img_y = transform.imap(event.pos)[:2] print(img_x, img_y) # optionally do the below to tell other handlers not to look at this event: event.handled = True and adding this to your __init__ method: self.canvas.events.mouse_move.connect(self.my_handler) You can see that when you hover over the top left corner of your image, it should print roughly (0,0).
How to know the exact position of a camera viewbox in Qt?
I am working with OpenGL in python and trying to attach 2d images to a canvas (the images will change according to a certain frequence). I managed to achieve that but to continue my task i need two things: the major problem: I need to get the image position (or bounds), sorry if i don't have the correct term, i am new to this. basically i just need to have some kind of positions to know where my picture is in the canvas. i tried to look into the methods and attributes of self.view.camera I could not find anything to help. one minor problem: i can move the image with the mouse along the canvas and i zoom it. i wonder if it is possible to only allow the zoom but not allow the right/left move [this is resolved in the comments section] here is my code: import sys from PySide2 import QtWidgets, QtCore from vispy import scene from PySide2.QtCore import QMetaObject from PySide2.QtWidgets import * import numpy as np import dog import time import imageio as iio class CameraThread(QtCore.QThread): new_image = QtCore.Signal(object) def __init__(self, parent=None): QtCore.QThread.__init__(self, parent) def run(self): try: while True: frame = iio.imread(dog.getDog(filename='randog')) self.new_image.emit(frame.data) time.sleep(10.0) finally: print('end!') class Ui_MainWindow(object): def setupUi(self, MainWindow): if not MainWindow.objectName(): MainWindow.setObjectName("MainWindow") MainWindow.resize(800, 400) self.centralwidget = QWidget(MainWindow) self.centralwidget.setObjectName("centralwidget") self.gridLayout = QGridLayout(self.centralwidget) self.gridLayout.setObjectName("gridLayout") self.groupBox = QGroupBox(self.centralwidget) self.groupBox.setObjectName("groupBox") self.gridLayout.addWidget(self.groupBox, 0, 0, 1, 1) MainWindow.setCentralWidget(self.centralwidget) QMetaObject.connectSlotsByName(MainWindow) class MainWindow(QtWidgets.QMainWindow): def __init__(self): super(MainWindow, self).__init__() self.ui = Ui_MainWindow() self.ui.setupUi(self) # OpenGL drawing surface self.canvas = scene.SceneCanvas(keys='interactive') self.canvas.create_native() self.canvas.native.setParent(self) self.setWindowTitle('MyApp') self.view = self.canvas.central_widget.add_view() self.view.bgcolor = '#ffffff' # set the canvas to a white background self.image = scene.visuals.Image(np.zeros((1, 1)), interpolation='nearest', parent= self.view.scene, cmap='grays', clim=(0, 2 ** 8 - 1)) self.view.camera = scene.PanZoomCamera(aspect=1) self.view.camera.flip = (0, 1, 0) self.view.camera.set_range() self.view.camera.zoom(1000, (0, 0)) self._camera_runner = CameraThread(parent=self) self._camera_runner.new_image.connect(self.new_image, type=QtCore.Qt.BlockingQueuedConnection) self._camera_runner.start() @QtCore.Slot(object) def new_image(self, img): try: self.image.set_data(img) self.image.update() except Exception as e: print(f"problem sending image: {e}") def main(): import ctypes ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID('my_gui') app = QtWidgets.QApplication([]) main_window = MainWindow() main_window.show() sys.exit(app.exec_()) if __name__ == '__main__': main()
[ "Do you want to know the coordinates of the picture in the viewport (the window), or do you want the coordinates of the picture on the canvas? Vispy actually puts the image at (0,0) by default inside the Vispy canvas. When you move around the canvas you actually aren't moving the canvas around, you are just moving the camera which is looking at the canvas so the coordinates of the picture stay at (0,0) regardless if you move around the viewport or the camera or not. Also the coordinates of the Vispy canvas correspond one to one with the pixel length and width of your image. One pixel is one unit in Vispy. You can check this by adding this method to your MainWindow class:\ndef my_handler(self,event):\n \n\n transform = self.image.transforms.get_transform(map_to=\"canvas\")\n img_x, img_y = transform.imap(event.pos)[:2]\n print(img_x, img_y)\n # optionally do the below to tell other handlers not to look at this event:\n event.handled = True\n\nand adding this to your __init__ method:\nself.canvas.events.mouse_move.connect(self.my_handler)\n\nYou can see that when you hover over the top left corner of your image, it should print roughly (0,0).\n" ]
[ 0 ]
[]
[]
[ "camera", "pyqt", "python", "qt", "vispy" ]
stackoverflow_0074629482_camera_pyqt_python_qt_vispy.txt
Q: I require converting this for loop into a recursion function rate, cashflows = 0.05,[-1100,300,450,800] def npv_for_loop(rate,cashflows): NPV=0 for i in range(len(cashflows)): NPV+=cashflows[i]/(1+rate)**i print(round(NPV,3)) i generally have no idea how a recursion works and would really appreciate if anybody can help me. A: Here is an example of how you could convert the given for loop into a recursive function: def npv(rate, cashflows, i=0, NPV=0): # Stop the recursion when we reach the end of the cash flows if i == len(cashflows): return NPV # Compute the present value of the ith cash flow present_value = cashflows[i] / (1 + rate) ** i # Recursively call the function to compute the present value of the remaining cash flows return npv(rate, cashflows, i + 1, NPV + present_value) rate, cashflows = 0.05,[-1100,300,450,800] # Compute the NPV of the cash flows using the recursive function npv = npv(rate, cashflows) print(npv) In this code, the npv() function computes the present value of each cash flow in the given cashflows array and sums them up to compute the NPV of the cash flows. The i parameter is the index of the current cash flow being considered, and the NPV parameter is the running total of the present values of the cash flows that have been considered so far. The npv() function calls itself recursively with an updated value of i and NPV until all of the cash flows have been considered. Recursive functions work by calling themselves with updated values for their parameters, and then using the updated values to compute a result. In the case of the npv() function, it calls itself with an updated value of i and NPV until all of the cash flows have been considered, and then it returns the final value of NPV as the result. This is an example of a tail-recursive function, where the final result is computed by the base case (i.e., when i == len(cashflows)) and then passed back up the recursive calls. A: Recursion function is a function calling itself. It works by changing the parameters each time it calls itself until some condition accruing and then it returns. When the function hit the return value it goes back to the last line called it and continue to execute from that line, just like main function calling other. When its done executing, or hit a return, it goes back to the last line called it and continues.. so on until the function ends and returns to main. rate, cashflows = 0.05,[-1100,300,450,800] def npv_for_loop(rate,cashflows): NPV=0 npv_rec(rate, cashflows, NPV) def npv_rec(rate, cashflows, npv, i=0): if len(cashflows) == i: return npv+=cashflows[i]/(1+rate)**i print(round(npv,3)) npv_rec(rate, cashflows, npv, i + 1) npv_for_loop(rate, cashflows)
I require converting this for loop into a recursion function
rate, cashflows = 0.05,[-1100,300,450,800] def npv_for_loop(rate,cashflows): NPV=0 for i in range(len(cashflows)): NPV+=cashflows[i]/(1+rate)**i print(round(NPV,3)) i generally have no idea how a recursion works and would really appreciate if anybody can help me.
[ "Here is an example of how you could convert the given for loop into a recursive function:\ndef npv(rate, cashflows, i=0, NPV=0):\n # Stop the recursion when we reach the end of the cash flows\n if i == len(cashflows):\n return NPV\n\n # Compute the present value of the ith cash flow\n present_value = cashflows[i] / (1 + rate) ** i\n\n # Recursively call the function to compute the present value of the remaining cash flows\n return npv(rate, cashflows, i + 1, NPV + present_value)\n\nrate, cashflows = 0.05,[-1100,300,450,800]\n\n# Compute the NPV of the cash flows using the recursive function\nnpv = npv(rate, cashflows)\nprint(npv)\n\nIn this code, the npv() function computes the present value of each cash flow in the given cashflows array and sums them up to compute the NPV of the cash flows. The i parameter is the index of the current cash flow being considered, and the NPV parameter is the running total of the present values of the cash flows that have been considered so far. The npv() function calls itself recursively with an updated value of i and NPV until all of the cash flows have been considered.\nRecursive functions work by calling themselves with updated values for their parameters, and then using the updated values to compute a result. In the case of the npv() function, it calls itself with an updated value of i and NPV until all of the cash flows have been considered, and then it returns the final value of NPV as the result. This is an example of a tail-recursive function, where the final result is computed by the base case (i.e., when i == len(cashflows)) and then passed back up the recursive calls.\n", "Recursion function is a function calling itself. It works by changing the parameters each time it calls itself until some condition accruing and then it returns. When the function hit the return value it goes back to the last line called it and continue to execute from that line, just like main function calling other. When its done executing, or hit a return, it goes back to the last line called it and continues.. so on until the function ends and returns to main.\nrate, cashflows = 0.05,[-1100,300,450,800]\n\ndef npv_for_loop(rate,cashflows):\n NPV=0\n npv_rec(rate, cashflows, NPV)\n \ndef npv_rec(rate, cashflows, npv, i=0):\n if len(cashflows) == i:\n return\n npv+=cashflows[i]/(1+rate)**i\n print(round(npv,3))\n npv_rec(rate, cashflows, npv, i + 1)\n \nnpv_for_loop(rate, cashflows)\n\n" ]
[ 1, 0 ]
[]
[]
[ "for_loop", "python", "recursion" ]
stackoverflow_0074681195_for_loop_python_recursion.txt
Q: wich algorythm php to create a secret santa program I'm trying to make a draw for a secret santa. I collect in a table the information of the person and the name of the person on whom it should not fall (to avoid couples). However during my php loop I can't take into account my exclusions ` foreach ($supplier as $sup){ $exclude = $sup['blacklist']; $data = $recipient; $temp = array_diff($data[], array($exclude)); echo $temp[rand(0, sizeOf($temp))]; foreach ($recipient as $key=>$recip){ if ($sup['surname'] !== $recip['surname']){ $result[] = ['recipient' => $recip, 'supplier' => $sup]; unset($recipient[$key]); } } } ` How can I take into account this blacklist please ? A: shuffle($supplier); shuffle($recipient); // dump($supplier, $recipient); $result = []; foreach ($supplier as $sup){ $assign = false; dump($sup); foreach ($recipient as $key=>$recip){ dump($recip['surname']); if ($sup['surname'] !== $recip['surname'] && $sup['blacklist'] !== $recip['surname'] && $sup['surname'] !== $recip['blacklist']){ $result[] = ['recipient' => $recip, 'supplier' => $sup]; dump($sup['surname']); unset($recipient[$key]); $assign = true; } if ($assign === true){ break; } } } return $result; } this is my code with shuffle
wich algorythm php to create a secret santa program
I'm trying to make a draw for a secret santa. I collect in a table the information of the person and the name of the person on whom it should not fall (to avoid couples). However during my php loop I can't take into account my exclusions ` foreach ($supplier as $sup){ $exclude = $sup['blacklist']; $data = $recipient; $temp = array_diff($data[], array($exclude)); echo $temp[rand(0, sizeOf($temp))]; foreach ($recipient as $key=>$recip){ if ($sup['surname'] !== $recip['surname']){ $result[] = ['recipient' => $recip, 'supplier' => $sup]; unset($recipient[$key]); } } } ` How can I take into account this blacklist please ?
[ " shuffle($supplier);\n shuffle($recipient);\n // dump($supplier, $recipient);\n\n $result = [];\n\n foreach ($supplier as $sup){\n $assign = false;\n dump($sup);\n foreach ($recipient as $key=>$recip){\n dump($recip['surname']);\n if ($sup['surname'] !== $recip['surname'] && $sup['blacklist'] !== $recip['surname'] && $sup['surname'] !== $recip['blacklist']){\n $result[] = ['recipient' => $recip, 'supplier' => $sup];\n dump($sup['surname']);\n unset($recipient[$key]);\n $assign = true;\n }\n if ($assign === true){\n break;\n }\n }\n }\n return $result;\n }\n\nthis is my code with shuffle\n" ]
[ 0 ]
[]
[]
[ "algorithm", "php", "symfony" ]
stackoverflow_0074681039_algorithm_php_symfony.txt
Q: Cannot find module "@sendgrid/mail" I'm using the Sendgrid mail package (https://www.npmjs.com/package/@sendgrid/mail) to send a test email using the Twilio Serveless functions. I have configured the module, specifying the correct version and module in the configure dashboard here. https://www.twilio.com/console/functions/configure but when I deploy my function and run it using the twilio cli, i get the error message, "message":"Cannot find module '@sendgrid/mail'" I find this weird since deploying the function manually under the Manage tab, https://www.twilio.com/console/functions/manage tab works like a gem. I'm I missing something? Or does the serverless API doesn't currently support this? (the same package configurations work when I manually deploying the function) A: the Twilio GUI Console based Functions are separate and distinct from the API based functions. You can find more detail here. Beta limitations, known issues and limits You can add the npm module(s) using npm install , as detailed here. Twilio Serverless Toolkit - Deploy a Project "Any dependency that is listed in the dependencies field in your package.json will automatically be installed in your deployment." If you use the Visual Studio Code approach, you can do the same. A: const sgMail = require('@sendgrid/mail'); sgMail.setApiKey(process.env.SENDGRID_API_KEY); const msg = { to: 'test@example.com', from: 'test@example.com', subject: 'Sending with Twilio SendGrid is Fun', text: 'and easy to do anywhere, even with Node.js', html: '<strong>and easy to do anywhere, even with Node.js</strong>', }; //ES6 sgMail .send(msg) .then(() => {}, console.error); //ES8 (async () => { try { await sgMail.send(msg); } catch (err) { console.error(err.toString()); } })(); A: just use: yarn add @sendgrid/mail
Cannot find module "@sendgrid/mail"
I'm using the Sendgrid mail package (https://www.npmjs.com/package/@sendgrid/mail) to send a test email using the Twilio Serveless functions. I have configured the module, specifying the correct version and module in the configure dashboard here. https://www.twilio.com/console/functions/configure but when I deploy my function and run it using the twilio cli, i get the error message, "message":"Cannot find module '@sendgrid/mail'" I find this weird since deploying the function manually under the Manage tab, https://www.twilio.com/console/functions/manage tab works like a gem. I'm I missing something? Or does the serverless API doesn't currently support this? (the same package configurations work when I manually deploying the function)
[ "the Twilio GUI Console based Functions are separate and distinct from the API based functions. You can find more detail here.\nBeta limitations, known issues and limits\nYou can add the npm module(s) using npm install , as detailed here.\nTwilio Serverless Toolkit - Deploy a Project\n\"Any dependency that is listed in the dependencies field in your package.json will automatically be installed in your deployment.\"\nIf you use the Visual Studio Code approach, you can do the same.\n", "const sgMail = require('@sendgrid/mail');\nsgMail.setApiKey(process.env.SENDGRID_API_KEY);\nconst msg = {\n to: 'test@example.com',\n from: 'test@example.com',\n subject: 'Sending with Twilio SendGrid is Fun',\n text: 'and easy to do anywhere, even with Node.js',\n html: '<strong>and easy to do anywhere, even with Node.js</strong>',\n};\n//ES6\nsgMail\n .send(msg)\n .then(() => {}, console.error);\n//ES8\n(async () => {\n try {\n await sgMail.send(msg);\n } catch (err) {\n console.error(err.toString());\n }\n})();\n\n", "just use:\nyarn add @sendgrid/mail\n\n" ]
[ 3, 0, 0 ]
[]
[]
[ "twilio", "twilio_functions" ]
stackoverflow_0058786287_twilio_twilio_functions.txt
Q: TextInput react-native-paper remove label on focus I am using the TextInput of React Native Paper (https://callstack.github.io/react-native-paper/text-input.html) Is there a way to not show the label on the border line when we are focusing on a TextInput? <TextInput mode="outlined" label="Email" value={email} onChangeText={email => setEmail(email)} theme={{ colors: { primary: APP_COLORS.primary }}} selectionColor={APP_COLORS.primary} outlineColor={APP_COLORS.grey_low} left={<TextInput.Icon name={() => <AntDesign name="mail" size={22} color="black" />} />} /> A: The label "Email" in black in your picture appears to come from another component not included in your code snippet. If you wish to keep the "Email" label in black, remove the "Email" label in red, but retain the outlined style of the TextInput, you can simply remove the label key of the component: <TextInput mode="outlined" value={email} onChangeText={email => setEmail(email)} theme={{ colors: { primary: APP_COLORS.primary } }} selectionColor={APP_COLORS.primary} outlineColor={APP_COLORS.grey_low} left={ <TextInput.Icon name={() => <AntDesign name="mail" size={22} color="black" /> }/> } />
TextInput react-native-paper remove label on focus
I am using the TextInput of React Native Paper (https://callstack.github.io/react-native-paper/text-input.html) Is there a way to not show the label on the border line when we are focusing on a TextInput? <TextInput mode="outlined" label="Email" value={email} onChangeText={email => setEmail(email)} theme={{ colors: { primary: APP_COLORS.primary }}} selectionColor={APP_COLORS.primary} outlineColor={APP_COLORS.grey_low} left={<TextInput.Icon name={() => <AntDesign name="mail" size={22} color="black" />} />} />
[ "The label \"Email\" in black in your picture appears to come from another component not included in your code snippet.\nIf you wish to keep the \"Email\" label in black, remove the \"Email\" label in red, but retain the outlined style of the TextInput, you can simply remove the label key of the component:\n<TextInput\n mode=\"outlined\"\n value={email}\n onChangeText={email => setEmail(email)}\n theme={{\n colors: { primary: APP_COLORS.primary }\n }}\n selectionColor={APP_COLORS.primary}\n outlineColor={APP_COLORS.grey_low}\n left={\n <TextInput.Icon name={() =>\n <AntDesign\n name=\"mail\"\n size={22}\n color=\"black\"\n />\n }/>\n }\n/>\n\n" ]
[ 0 ]
[]
[]
[ "react_native", "react_native_paper", "textinput" ]
stackoverflow_0074681199_react_native_react_native_paper_textinput.txt
Q: Uncaught ( in promise ) AxiosError: Request failed with status code 403 The goal: get 'suggested videos' data using Youtube v3 RapidAPI for my React + Vite.js project. But, an error occurs, and I don’t know what to do, the file. env in the root folder, with the syntax all correct ( see photos below ). Maybe the problem is in process.env. but I don’t know what exactly is. All dependencies in package.json are up to date. Also, I logged in and subscribed to RapidAPI. How I get data from RapidAPI: import axios from 'axios'; const NODE_ENV = process.env.NODE_ENV; const BASE_URL = 'https://youtube-v31.p.rapidapi.com'; const options = { params: { maxResults: '50' }, headers: { 'X-RapidAPI-Key': NODE_ENV.REACT_APP_RAPID_API_KEY, 'X-RapidAPI-Host': 'youtube-v31.p.rapidapi.com' } }; export const fetchFromAPI = async (url) => { const { data } = await axios.get(`${BASE_URL}/${url}`, options); return data; }; How I parse data: import React, { useState, useEffect } from 'react'; import { Box, Stack, Typography } from '@mui/material'; import { fetchFromAPI } from '../utilities/fetchFromAPI'; import { Videos } from './'; function Feed() { const [selectedCategory, setSelectedCategory] = useState('New'); const [videos, setVideos] = useState([]); useEffect(() => { fetchFromAPI(`search?part=snippet&q=${selectedCategory}`) .then((data) => setVideos(data.items)); }, [selectedCategory]); return ( <Videos videos={videos} /> ) } export default Feed; Photos: How Error looks like: My Directory .env file: REACT_APP_RAPID_API_KEY='91766d6993msh3bd85c80e2cd0a0p118319jsn781536a29badqsw' A: You can access env variable by const envValue = process.env.REACT_APP_RAPID_API_KEY; instead of process.env.NODE_ENV.REACT_APP_RAPID_API_KEY. process.env.NODE_ENV is a string type, so you get the type error when accessing REACT_APP_RAPID_API_KEY from string value. Additionally, process.env.NODE_ENV indicates the current node mode. That's, it is normally production or development or test.
Uncaught ( in promise ) AxiosError: Request failed with status code 403
The goal: get 'suggested videos' data using Youtube v3 RapidAPI for my React + Vite.js project. But, an error occurs, and I don’t know what to do, the file. env in the root folder, with the syntax all correct ( see photos below ). Maybe the problem is in process.env. but I don’t know what exactly is. All dependencies in package.json are up to date. Also, I logged in and subscribed to RapidAPI. How I get data from RapidAPI: import axios from 'axios'; const NODE_ENV = process.env.NODE_ENV; const BASE_URL = 'https://youtube-v31.p.rapidapi.com'; const options = { params: { maxResults: '50' }, headers: { 'X-RapidAPI-Key': NODE_ENV.REACT_APP_RAPID_API_KEY, 'X-RapidAPI-Host': 'youtube-v31.p.rapidapi.com' } }; export const fetchFromAPI = async (url) => { const { data } = await axios.get(`${BASE_URL}/${url}`, options); return data; }; How I parse data: import React, { useState, useEffect } from 'react'; import { Box, Stack, Typography } from '@mui/material'; import { fetchFromAPI } from '../utilities/fetchFromAPI'; import { Videos } from './'; function Feed() { const [selectedCategory, setSelectedCategory] = useState('New'); const [videos, setVideos] = useState([]); useEffect(() => { fetchFromAPI(`search?part=snippet&q=${selectedCategory}`) .then((data) => setVideos(data.items)); }, [selectedCategory]); return ( <Videos videos={videos} /> ) } export default Feed; Photos: How Error looks like: My Directory .env file: REACT_APP_RAPID_API_KEY='91766d6993msh3bd85c80e2cd0a0p118319jsn781536a29badqsw'
[ "You can access env variable by\nconst envValue = process.env.REACT_APP_RAPID_API_KEY;\n\ninstead of process.env.NODE_ENV.REACT_APP_RAPID_API_KEY.\nprocess.env.NODE_ENV is a string type, so you get the type error when accessing REACT_APP_RAPID_API_KEY from string value.\nAdditionally, process.env.NODE_ENV indicates the current node mode.\nThat's, it is normally production or development or test.\n" ]
[ 0 ]
[]
[]
[ "axios", "rapidapi", "reactjs", "vite" ]
stackoverflow_0074680875_axios_rapidapi_reactjs_vite.txt
Q: How to delay ajax using jquery I need to access an API but I dont want to shoot with 100 requests at the same time so I want to make 1 request per second so I write: $.each(added_collections, function(index, collection) { $.each(collection, function(index1, page) { $.each(page, function(index2, product) { setTimeout(function() { $.ajax({ headers: { 'X-CSRF-TOKEN': $('meta[name="csrf-token"]').attr('content') }, type: "POST", url: '/get_data', dataType: "json", data: meta_data, success: function(data) { console.log(data); }, error: function(data){ console.log(data); } }); }, 1000); }); }); }); but this code do not make 1second between requests How I can make 1 ajax call per second A: I'm not sure what data you need to be in that ajax call, but here's a way to throttle in the manner you're after. Build the array of indexes (products) first. Then use a separate helper function to call the ajax and call itself again recursively when complete. let allThings = []; $.each(added_collections, function(index, collection) { $.each(collection, function(index1, page) { $.each(page, function(index2, product) { allThings.push(product) }); }); }); doAjax(0) const doAjax = index => { if (allThings.length >= index) return console.log("DONE"); setTimeout(() => { $.ajax({ headers: { 'X-CSRF-TOKEN': $('meta[name="csrf-token"]').attr('content') }, type: "POST", url: '/get_data', dataType: "json", data: meta_data, success: function(data) { console.log(data); doAjax(++index); }, error: function(data) { console.log(data); } }); }, 1000) } A: Ideally you want to use a throttle function of some kind. You can use a library for this, or can write a simple one which may work for your use case like this (this technique can be used for other functions as well) function throttle(fn, interval) { // keep track of the last time the function was called let lastCalled = 0; // return a throttled version of the function return function throttledFunction(...args) { // get the current time const now = Date.now(); // if the function has been called within the interval, wait until the interval has elapsed if (now - lastCalled < interval) { setTimeout(() => throttledFunction(...args), interval - (now - lastCalled)); return; } // update the last time the function was called lastCalled = now; // call the function with the given arguments return fn(...args); } } Then to use it with your Ajax method, something like this function makeRequest() { $.ajax({ headers: { 'X-CSRF-TOKEN': $('meta[name="csrf-token"]').attr('content') }, type: "POST", url: '/get_data', dataType: "json", data: meta_data, success: function(data) { console.log(data); }, error: function(data){ console.log(data); } }); } const makeThrottledRequest = throttle(makeRequest, 1000); $.each(added_collections, function(index, collection) { $.each(collection, function(index1, page) { $.each(page, function(index2, product) { makeThrottledRequest(); }); }); }); The reason the calls all happen at about the same time in your code is all the timeouts begin at about the same time. Ie between each iteration of the loop, basically no time passes, so the timers all end 1 second later.
How to delay ajax using jquery
I need to access an API but I dont want to shoot with 100 requests at the same time so I want to make 1 request per second so I write: $.each(added_collections, function(index, collection) { $.each(collection, function(index1, page) { $.each(page, function(index2, product) { setTimeout(function() { $.ajax({ headers: { 'X-CSRF-TOKEN': $('meta[name="csrf-token"]').attr('content') }, type: "POST", url: '/get_data', dataType: "json", data: meta_data, success: function(data) { console.log(data); }, error: function(data){ console.log(data); } }); }, 1000); }); }); }); but this code do not make 1second between requests How I can make 1 ajax call per second
[ "I'm not sure what data you need to be in that ajax call, but here's a way to throttle in the manner you're after. Build the array of indexes (products) first. Then use a separate helper function to call the ajax and call itself again recursively when complete.\nlet allThings = [];\n\n$.each(added_collections, function(index, collection) {\n $.each(collection, function(index1, page) {\n $.each(page, function(index2, product) {\n allThings.push(product)\n });\n });\n});\n\ndoAjax(0)\n\nconst doAjax = index => {\n if (allThings.length >= index) return console.log(\"DONE\");\n setTimeout(() => {\n $.ajax({\n headers: {\n 'X-CSRF-TOKEN': $('meta[name=\"csrf-token\"]').attr('content')\n },\n type: \"POST\",\n url: '/get_data',\n dataType: \"json\",\n data: meta_data,\n success: function(data) {\n console.log(data);\n doAjax(++index);\n },\n error: function(data) {\n console.log(data);\n }\n });\n }, 1000)\n}\n\n", "Ideally you want to use a throttle function of some kind. You can use a library for this, or can write a simple one which may work for your use case like this (this technique can be used for other functions as well)\nfunction throttle(fn, interval) {\n // keep track of the last time the function was called\n let lastCalled = 0;\n\n // return a throttled version of the function\n return function throttledFunction(...args) {\n // get the current time\n const now = Date.now();\n\n // if the function has been called within the interval, wait until the interval has elapsed\n if (now - lastCalled < interval) {\n setTimeout(() => throttledFunction(...args), interval - (now - lastCalled));\n return;\n }\n\n // update the last time the function was called\n lastCalled = now;\n\n // call the function with the given arguments\n return fn(...args);\n }\n}\n\nThen to use it with your Ajax method, something like this\nfunction makeRequest() {\n $.ajax({\n headers: {\n 'X-CSRF-TOKEN': $('meta[name=\"csrf-token\"]').attr('content')\n },\n type: \"POST\",\n url: '/get_data',\n dataType: \"json\",\n data: meta_data,\n success: function(data) {\n console.log(data);\n },\n error: function(data){\n console.log(data);\n }\n });\n}\n\nconst makeThrottledRequest = throttle(makeRequest, 1000);\n\n$.each(added_collections, function(index, collection) {\n $.each(collection, function(index1, page) {\n $.each(page, function(index2, product) {\n makeThrottledRequest();\n });\n });\n});\n\nThe reason the calls all happen at about the same time in your code is all the timeouts begin at about the same time. Ie between each iteration of the loop, basically no time passes, so the timers all end 1 second later.\n" ]
[ 0, 0 ]
[]
[]
[ "ajax", "javascript", "jquery" ]
stackoverflow_0074680835_ajax_javascript_jquery.txt
Q: Override javascript property with getter/setter while still accessing underlying property In service of building an API polyfill I'd like to override a property (in this case width and height) of an element with getters and setters to catch changes to the value and modify it before passing it on to the underlying element. Ideally this process would also be reversible. Something along the lines of this code snippet: var realWidth = null; function patch(targetObject) { realWidth = targetObject.magicalPropertyAccessor("width"); Object.defineProperty(targetObject, 'width', { get: function() { return realWidth / 2; }, set: function(value) { realWidth = value * 2; } }); } function unpatch(targetObject) { if (realWidth) targetObject.magicalPropertySetter('width', realWidth); } The intention of the example being that while the element is patched it will silently double changes to it's dimensions while reporting back the original, unaltered value. If this was a function it would be pretty straightforward, but as a property it's unclear how to cache a reference to the original accessor. A: Thanks to Bergi I figured out that Object.getOwnPropertyDescriptor is exactly what I want. I had tried it previously but missed that the property that I had to go to the object's __proto__ to find the property I was looking for. (Your milage may vary depending on the property you're replacing.) This is the code that worked for me: function WidthPatch(canvas) { var self = this; var fakeWidth = canvas.width; this.canvas = canvas; // Cache the real property this.realWidthProp = Object.getOwnPropertyDescriptor(canvas.__proto__, 'width'); // Replace the property with a custom one Object.defineProperty(canvas, 'width', { configurable: true, enumerable: true, get: function() { return fakeWidth; }, set: function(value) { fakeWidth = value; // This updates the real canvas property, silently doubling it. self.realWidthProp.set.call(canvas, fakeWidth * 2); } }); } WidthPatch.prototype.unpatch = function() { // Replace the custom property with the original one. Object.defineProperty(this.canvas, 'width', this.realWidthProp); } A: Reflect offers some nice(r) syntax for this particular use-case. The demo below implements a "WidthPatch" class that overrides a "width" property, returning a value that is 2x the actual value, while simultaneously providing access to the actual value via a new "actualWidth" property. The "UnPatch" method deletes the modified property, reverting to the original behavior. class WidthPatch { static #propertyName = 'width'; static #shadowPropertyName = 'actualWidth'; static Patch(instance) { Reflect.defineProperty(instance, WidthPatch.#shadowPropertyName, { get: _ => WidthPatch.#get(instance) }); Reflect.defineProperty(instance, WidthPatch.#propertyName, { configurable: true, enumerable: true, get: _ => 2 * WidthPatch.#get(instance), set: value => Reflect.set(Reflect.getPrototypeOf(instance), WidthPatch.#propertyName, value, instance) }); } static UnPatch(instance) { Reflect.deleteProperty(instance, WidthPatch.#propertyName); } static #get(instance) { return Reflect.get(Reflect.getPrototypeOf(instance), WidthPatch.#propertyName, instance); } } class Demo { static { const canvas = document.createElement('canvas'); console.log(`width (initial): ${canvas.width}`); WidthPatch.Patch(canvas); console.log(`width (after patching): ${canvas.width}, actualWidth: ${canvas.actualWidth}`); canvas.width = 200; console.log(`width (after setting to 200): ${canvas.width}, actualWidth: ${canvas.actualWidth}`); WidthPatch.UnPatch(canvas); console.log(`width (after unpatching): ${canvas.width}, actualWidth: ${canvas.actualWidth}`); } }
Override javascript property with getter/setter while still accessing underlying property
In service of building an API polyfill I'd like to override a property (in this case width and height) of an element with getters and setters to catch changes to the value and modify it before passing it on to the underlying element. Ideally this process would also be reversible. Something along the lines of this code snippet: var realWidth = null; function patch(targetObject) { realWidth = targetObject.magicalPropertyAccessor("width"); Object.defineProperty(targetObject, 'width', { get: function() { return realWidth / 2; }, set: function(value) { realWidth = value * 2; } }); } function unpatch(targetObject) { if (realWidth) targetObject.magicalPropertySetter('width', realWidth); } The intention of the example being that while the element is patched it will silently double changes to it's dimensions while reporting back the original, unaltered value. If this was a function it would be pretty straightforward, but as a property it's unclear how to cache a reference to the original accessor.
[ "Thanks to Bergi I figured out that Object.getOwnPropertyDescriptor is exactly what I want. I had tried it previously but missed that the property that I had to go to the object's __proto__ to find the property I was looking for. (Your milage may vary depending on the property you're replacing.) This is the code that worked for me:\nfunction WidthPatch(canvas) {\n var self = this;\n var fakeWidth = canvas.width;\n this.canvas = canvas;\n\n // Cache the real property\n this.realWidthProp = Object.getOwnPropertyDescriptor(canvas.__proto__, 'width');\n\n // Replace the property with a custom one\n Object.defineProperty(canvas, 'width', {\n configurable: true,\n enumerable: true,\n get: function() {\n return fakeWidth;\n },\n set: function(value) {\n fakeWidth = value;\n // This updates the real canvas property, silently doubling it.\n self.realWidthProp.set.call(canvas, fakeWidth * 2);\n }\n });\n}\n\nWidthPatch.prototype.unpatch = function() {\n // Replace the custom property with the original one.\n Object.defineProperty(this.canvas, 'width', this.realWidthProp);\n}\n\n", "Reflect offers some nice(r) syntax for this particular use-case.\nThe demo below implements a \"WidthPatch\" class that overrides a \"width\" property, returning a value that is 2x the actual value, while simultaneously providing access to the actual value via a new \"actualWidth\" property. The \"UnPatch\" method deletes the modified property, reverting to the original behavior.\n\n\nclass WidthPatch {\n\n static #propertyName = 'width';\n static #shadowPropertyName = 'actualWidth';\n \n static Patch(instance) {\n Reflect.defineProperty(instance, WidthPatch.#shadowPropertyName, {\n get: _ => WidthPatch.#get(instance)\n });\n Reflect.defineProperty(instance, WidthPatch.#propertyName, {\n configurable: true,\n enumerable: true,\n get: _ => 2 * WidthPatch.#get(instance),\n set: value => Reflect.set(Reflect.getPrototypeOf(instance), WidthPatch.#propertyName, value, instance)\n }); \n }\n \n static UnPatch(instance) {\n Reflect.deleteProperty(instance, WidthPatch.#propertyName);\n }\n \n static #get(instance) {\n return Reflect.get(Reflect.getPrototypeOf(instance), WidthPatch.#propertyName, instance);\n }\n}\n\n\nclass Demo {\n static {\n \n const canvas = document.createElement('canvas');\n \n console.log(`width (initial): ${canvas.width}`);\n \n WidthPatch.Patch(canvas); \n console.log(`width (after patching): ${canvas.width}, actualWidth: ${canvas.actualWidth}`);\n \n canvas.width = 200;\n console.log(`width (after setting to 200): ${canvas.width}, actualWidth: ${canvas.actualWidth}`);\n \n WidthPatch.UnPatch(canvas);\n console.log(`width (after unpatching): ${canvas.width}, actualWidth: ${canvas.actualWidth}`);\n \n }\n}\n\n\n\n" ]
[ 3, 0 ]
[]
[]
[ "javascript" ]
stackoverflow_0035493778_javascript.txt
Q: How can I enable node.js application logs on from cpanel? I have a node.js application that sends email using nodemailer. It is working fine from localhost but when I try it from server I can't receive any mail. Since, other activities of application are working fine means I assumed nodemailer is installed properly on server. I tried on both ways using host as my server and using gmail as on of the services. Here, is what I tried last time. This also works fine on local host. But, I don't get any response when I put this on server. I have nodemailer's configuration as: const output = ` <h3>Limit Details</h3> <p>Admin has added ${limit} quota(s) to ${username}.</p> `; // create reusable transporter object using the default SMTP transport let transporter = nodemailer.createTransport({ service: 'gmail', port: 25, secure: false, // true for 465, false for other ports auth: { user: '<gmail>', pass: '<password>' } }); // setup email data with unicode symbols let mailOptions = { from: '"Sender" <sender gmail>', // sender address to: '<receiver gmail>', // list of receivers subject: 'Quota added information.', // Subject line html: output // html body }; // send mail with defined transport object transporter.sendMail(mailOptions, (error, info) => { if (error) { return console.log(error); } console.log('Message sent: %s', info.messageId); console.log('Preview URL: %s', nodemailer.getTestMessageUrl(info)); }); For testing, I have consoled messageId and Preview Url So, from where I can view this console message in cpanel? How can I view such consoled stuffs from my application in cpanel?? A: Unfortunately there is no easy way to access node's logs on a cpanel server as far as I know. What I usually do is I set up log4js to write logs to a file: const log4js = require('log4js'); log4js.configure({ appenders: { everything: { type: 'file', filename: 'logs.log' } }, categories: { default: { appenders: ['everything'], level: 'ALL' } } }); const logger = log4js.getLogger(); Then you can make logs with: logger.debug('log message'); You can also serve the log file with the server: app.get('/log', (req, res) => { res.sendFile(path.join(__dirname + '/logs.log')); }); A: cPanel's Application manager isn't very fleshed-out. I've been trying it out and comparing it to what I'm used to (PM2) and will be switching back to PM2. I did put up a feature request to cPanel if anyone's interested in supporting. Otherwise, the answer is to write out using a custom logger such as @zenott suggests. However, I prefer to overwrite console.log though so it's easy to switch back to regular console logging if needed. My solution: const moment = require('moment') const fs = require('fs') let logStream = fs.createWriteStream('log.txt') let console = {} console.log = (obj) => { var s = '' if (typeof obj === 'string') s = obj else s = JSON.stringify(obj) var dS = '[' + moment().format(momentFormat) + '] ' s = `[${dS}] ${s}'\n'` logStream.write(s) } A: There is an easier way to access your NodeJS app logs on a cPanel server. Instead of using: console.log() Use: console.error() This will populate the stderr.log file located in your NodeJS app's root folder on your cPanel without holding back. It will even output Circular and JSON formats without any need to stringify(). A: There is also this package "writelog" which can replace the console logs. You can name each log file differently and it keeps the files to a minimum deleting old entries, but you need to add it to each console.log https://www.npmjs.com/package/writelog See what suits your needs
How can I enable node.js application logs on from cpanel?
I have a node.js application that sends email using nodemailer. It is working fine from localhost but when I try it from server I can't receive any mail. Since, other activities of application are working fine means I assumed nodemailer is installed properly on server. I tried on both ways using host as my server and using gmail as on of the services. Here, is what I tried last time. This also works fine on local host. But, I don't get any response when I put this on server. I have nodemailer's configuration as: const output = ` <h3>Limit Details</h3> <p>Admin has added ${limit} quota(s) to ${username}.</p> `; // create reusable transporter object using the default SMTP transport let transporter = nodemailer.createTransport({ service: 'gmail', port: 25, secure: false, // true for 465, false for other ports auth: { user: '<gmail>', pass: '<password>' } }); // setup email data with unicode symbols let mailOptions = { from: '"Sender" <sender gmail>', // sender address to: '<receiver gmail>', // list of receivers subject: 'Quota added information.', // Subject line html: output // html body }; // send mail with defined transport object transporter.sendMail(mailOptions, (error, info) => { if (error) { return console.log(error); } console.log('Message sent: %s', info.messageId); console.log('Preview URL: %s', nodemailer.getTestMessageUrl(info)); }); For testing, I have consoled messageId and Preview Url So, from where I can view this console message in cpanel? How can I view such consoled stuffs from my application in cpanel??
[ "Unfortunately there is no easy way to access node's logs on a cpanel server as far as I know.\nWhat I usually do is I set up log4js to write logs to a file:\nconst log4js = require('log4js');\n\nlog4js.configure({\n appenders: { everything: { type: 'file', filename: 'logs.log' } },\n categories: { default: { appenders: ['everything'], level: 'ALL' } }\n});\n\nconst logger = log4js.getLogger();\n\nThen you can make logs with:\nlogger.debug('log message');\n\nYou can also serve the log file with the server:\napp.get('/log', (req, res) => {\n res.sendFile(path.join(__dirname + '/logs.log'));\n});\n\n", "cPanel's Application manager isn't very fleshed-out. I've been trying it out and comparing it to what I'm used to (PM2) and will be switching back to PM2. \nI did put up a feature request to cPanel if anyone's interested in supporting.\nOtherwise, the answer is to write out using a custom logger such as @zenott suggests. However, I prefer to overwrite console.log though so it's easy to switch back to regular console logging if needed.\nMy solution:\nconst moment = require('moment')\nconst fs = require('fs')\nlet logStream = fs.createWriteStream('log.txt')\nlet console = {}\nconsole.log = (obj) => {\n var s = ''\n if (typeof obj === 'string')\n s = obj\n else\n s = JSON.stringify(obj)\n\n var dS = '[' + moment().format(momentFormat) + '] '\n s = `[${dS}] ${s}'\\n'`\n logStream.write(s)\n}\n\n", "There is an easier way to access your NodeJS app logs on a cPanel server.\nInstead of using:\nconsole.log()\n\nUse:\nconsole.error()\n\nThis will populate the stderr.log file located in your NodeJS app's root folder on your cPanel without holding back.\nIt will even output Circular and JSON formats without any need to stringify().\n", "There is also this package \"writelog\" which can replace the console logs. You can name each log file differently and it keeps the files to a minimum deleting old entries, but you need to add it to each console.log\nhttps://www.npmjs.com/package/writelog\nSee what suits your needs\n" ]
[ 3, 2, 0, 0 ]
[]
[]
[ "cpanel", "error_logging", "node.js" ]
stackoverflow_0056200795_cpanel_error_logging_node.js.txt
Q: how can I inject gradle ext properties I am trying to pass project version and compilation time to my Quarkus project via build.gradle and GradleExtraProperties. I have in my gradle.build file: version '0.0.0-SNAPSHOT' ext { buildTime = new java.text.SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSZ").format(new Date()) } and in my Java source: @ConfigProperty(name = "version") String version; @ConfigProperty(name="buildTime") String buildTime; While version gets properly injected, buildTime fails with ConfigurationException: Failed to load config value of type class java.lang.String for: buildTime Does anybody know if and how that can be accomplished? A: The following worked for me: @ConfigProperty(name = "buildTime", defaultValue="defaultValue") String buildTime; A: The answer was simple and much more embarrassing: If one runs Quarkus in dev mode, Gradle does not build the project. After I run gradew build everything was just fine. SMH
how can I inject gradle ext properties
I am trying to pass project version and compilation time to my Quarkus project via build.gradle and GradleExtraProperties. I have in my gradle.build file: version '0.0.0-SNAPSHOT' ext { buildTime = new java.text.SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSZ").format(new Date()) } and in my Java source: @ConfigProperty(name = "version") String version; @ConfigProperty(name="buildTime") String buildTime; While version gets properly injected, buildTime fails with ConfigurationException: Failed to load config value of type class java.lang.String for: buildTime Does anybody know if and how that can be accomplished?
[ "The following worked for me:\n@ConfigProperty(name = \"buildTime\", defaultValue=\"defaultValue\")\nString buildTime;\n\n", "The answer was simple and much more embarrassing: If one runs Quarkus in dev mode, Gradle does not build the project.\nAfter I run gradew build everything was just fine.\nSMH\n" ]
[ 1, 0 ]
[]
[]
[ "gradle", "java", "microprofile", "quarkus" ]
stackoverflow_0074681081_gradle_java_microprofile_quarkus.txt
Q: Why might Wireshark and NodeJS disagree about a packet's contents? I'm working with raw-socket (a node module for sending raw data out on the network) and playing with their Ping example. I have Wireshark set up to monitor traffic. I can see my ICMP packet go out, and a response comes back. Here's where things get strange. Wireshark shows the following packet: IP: 4500003c69ea00004001e2fec0a85647c0a85640 ICMP: 00004b5200010a096162636465666768696a6b6c6d6e6f7071727374757677616263646566676869 However, the node event handler that fires when data comes in is showing: IP: 4500280069ea00004001e2fec0a85647c0a85640 ICMP: 00004b5200010a096162636465666768696a6b6c6d6e6f7071727374757677616263646566676869 The ICMP components match. However, bytes 0x02 and 0x03 (the Length bytes) differ. Wireshark shows 0x003c or 60 bytes (as expected). Node shows 0x2800 or 10kB... which is not what is expected. Notably, the checksum (bytes 0x18 and 0x19) are the same in each case, although it's only valid for the Wireshark packet. So, here's the question: what might lead to this discrepancy? I'm inclined to believe Wireshark is correct since 60 bytes is the right size for an ICMP reply, but why is Node wrong here? OSX note The docs for this module point out that, on OSX, it will try to use SOCK_DGRAM if SOCK_RAW is not permitted. I have tried this with that function disabled and using sudo and got the same responses as before. Github issue It looks like https://github.com/nospaceships/node-raw-socket/issues/60 is open for this very issue, but it remains unclear if this is a code bug or a usage problem... A: This is due to a FreeBSD bug (feature?) which subtracts the length of the IP header from the IP length header field and also flips it to host byte order. https://cseweb.ucsd.edu//~braghava/notes/freebsd-sockets.txt
Why might Wireshark and NodeJS disagree about a packet's contents?
I'm working with raw-socket (a node module for sending raw data out on the network) and playing with their Ping example. I have Wireshark set up to monitor traffic. I can see my ICMP packet go out, and a response comes back. Here's where things get strange. Wireshark shows the following packet: IP: 4500003c69ea00004001e2fec0a85647c0a85640 ICMP: 00004b5200010a096162636465666768696a6b6c6d6e6f7071727374757677616263646566676869 However, the node event handler that fires when data comes in is showing: IP: 4500280069ea00004001e2fec0a85647c0a85640 ICMP: 00004b5200010a096162636465666768696a6b6c6d6e6f7071727374757677616263646566676869 The ICMP components match. However, bytes 0x02 and 0x03 (the Length bytes) differ. Wireshark shows 0x003c or 60 bytes (as expected). Node shows 0x2800 or 10kB... which is not what is expected. Notably, the checksum (bytes 0x18 and 0x19) are the same in each case, although it's only valid for the Wireshark packet. So, here's the question: what might lead to this discrepancy? I'm inclined to believe Wireshark is correct since 60 bytes is the right size for an ICMP reply, but why is Node wrong here? OSX note The docs for this module point out that, on OSX, it will try to use SOCK_DGRAM if SOCK_RAW is not permitted. I have tried this with that function disabled and using sudo and got the same responses as before. Github issue It looks like https://github.com/nospaceships/node-raw-socket/issues/60 is open for this very issue, but it remains unclear if this is a code bug or a usage problem...
[ "This is due to a FreeBSD bug (feature?) which subtracts the length of the IP header from the IP length header field and also flips it to host byte order.\nhttps://cseweb.ucsd.edu//~braghava/notes/freebsd-sockets.txt\n" ]
[ 1 ]
[]
[]
[ "networking", "node.js" ]
stackoverflow_0074448657_networking_node.js.txt
Q: What is a Simple Example for using the Boost Graph Library I am trying to use the BGL, I find the documentation precise but lacks more examples for simple cases. My goal is described below (after reading the documentation I still couldn't do this): struct Vertex { double m_d; std::size_t m_id; }; //or struct Vertex { double m_d; std::size_t id() const; }; Goals: A directed graph G (what is the difference between a bidirectional and directed other than in_edges please?) G can hold the vertex type Vertex. get the vertex by id from G and change the value m_d in the Vertex struct when I want. add, remove verticies and edges between verticies and also supports costs i.e. cost(edge). Could you please write me an example on how to do this with BGL please? I beleive I need MutableBidirectionalGraph? A: A directed graph G Straight-away: struct Vertex { double m_d = 0; size_t m_id = -1; // or std::size_t id() const; }; struct Edge { double cost = 0; }; using Graph = boost::adjacency_list<boost::vecS, boost::vecS, boost::bidirectionalS, Vertex, Edge>; (what is the difference between a bidirectional and directed other than in_edges please?) There is no other difference, except of course the complexity guarantees for enumerating incoming edges, and a linear overhead upon insertion of edges G can hold the vertex type Vertex. See 0. get the vertex by id from G auto find_by_id = [&g](size_t id) -> Vertex& { auto vv = boost::make_iterator_range(vertices(g)); auto vd = find_if(vv, [&, id](auto vd) { return g[vd].m_id == id; }); return g[*vd]; }; and change the value m_d in the Vertex struct when I want. if (i_want()) { g[vd].m_id += 1; } Or, auto idmap = boost::get(&Vertex::m_id, g); if (i_want()) { idmap[vd] += 1; } or even put(idmap, vd, 42); or even more unmarked: get(boost::vertex_bundle, g, vd).m_id = 999; add, remove vertices remove_vertex(vd, g); and edges between vertices clear_vertex(vd, g); and also supports costs i.e. cost(edge). Wow that really has nothing to do with any of the above. But it's really the same as with vertex ids: if (i_want()) { g[ed].cost = new_cost; } Or, auto cost = boost::get(&Edge::cost, g); if (i_want()) { cost[ed] = new_cost; } or even put(cost, ed, new_cost); or even more unmarked: get(boost::edge_bundle, g, ed).cost = new_cost; Live Demo Live On Coliru #include <boost/graph/adjacency_list.hpp> #include <boost/graph/graph_utility.hpp> #include <boost/range/algorithm.hpp> #include <iostream> struct Vertex { double m_d = 0; size_t m_id = -1; // or std::size_t id() const; }; struct Edge { double cost = 0; }; using Graph = boost::adjacency_list<boost::vecS, boost::vecS, boost::bidirectionalS, Vertex, Edge>; using boost::make_iterator_range; int main(){ Graph g; auto v0 = add_vertex({0.1, 100}, g); auto v1 = add_vertex({0.2, 200}, g); auto v2 = add_vertex({0.3, 300}, g); auto v3 = add_vertex({0.4, 400}, g); auto v4 = add_vertex({0.5, 500}, g); auto v5 = add_vertex({0.6, 600}, g); add_edge(v0, v2, Edge{1.5}, g); add_edge(v1, v3, Edge{2.5}, g); add_edge(v4, v1, Edge{3.5}, g); add_edge(v2, v5, Edge{4.5}, g); auto idmap = boost::get(&Vertex::m_id, g); auto cost = boost::get(&Edge::cost, g); auto find_by_id = [&g](size_t id) -> Vertex& { auto vv = boost::make_iterator_range(vertices(g)); auto vd = find_if(vv, [&, id](auto vd) { return g[vd].m_id == id; }); return g[*vd]; }; print_graph(g, idmap, std::cout << "original: "); auto i_want = [](auto vd) { return (vd % 2); // when I want }; for (auto vd : make_iterator_range(vertices(g))) { if (i_want(vd)) g[vd].m_id += 1; if (i_want(vd)) idmap[vd] += 1; //put(idmap, vd, 42); //get(boost::vertex_bundle, g, vd).m_id = 999; } print_graph(g, idmap, std::cout << "altered: "); clear_vertex(v3, g); remove_vertex(v3, g); // undefined behaviour unless edges cleared print_graph(g, idmap, std::cout << "removed: "); for (auto ed : make_iterator_range(edges(g))) { std::cout << ed << " cost " << cost[ed] << "\n"; } for (auto ed : make_iterator_range(edges(g))) { cost[ed] *= 111; } for (auto ed : make_iterator_range(edges(g))) { std::cout << ed << " cost " << cost[ed] << "\n"; } }; Prints original: 100 --> 300 200 --> 400 300 --> 600 400 --> 500 --> 200 600 --> altered: 100 --> 300 202 --> 402 300 --> 602 402 --> 500 --> 202 602 --> removed: 100 --> 300 202 --> 300 --> 602 500 --> 202 602 --> (0,2) cost 1.5 (3,1) cost 3.5 (2,4) cost 4.5 (0,2) cost 166.5 (3,1) cost 388.5 (2,4) cost 499.5
What is a Simple Example for using the Boost Graph Library
I am trying to use the BGL, I find the documentation precise but lacks more examples for simple cases. My goal is described below (after reading the documentation I still couldn't do this): struct Vertex { double m_d; std::size_t m_id; }; //or struct Vertex { double m_d; std::size_t id() const; }; Goals: A directed graph G (what is the difference between a bidirectional and directed other than in_edges please?) G can hold the vertex type Vertex. get the vertex by id from G and change the value m_d in the Vertex struct when I want. add, remove verticies and edges between verticies and also supports costs i.e. cost(edge). Could you please write me an example on how to do this with BGL please? I beleive I need MutableBidirectionalGraph?
[ "\n\n\nA directed graph G\n\nStraight-away:\nstruct Vertex {\n double m_d = 0;\n size_t m_id = -1;\n // or std::size_t id() const;\n};\n\nstruct Edge {\n double cost = 0;\n};\n\nusing Graph =\n boost::adjacency_list<boost::vecS, boost::vecS, boost::bidirectionalS, Vertex, Edge>;\n\n\n(what is the difference between a bidirectional and directed other than\nin_edges please?)\n\nThere is no other difference, except of course the complexity guarantees\nfor enumerating incoming edges, and a linear overhead upon insertion of edges\n\n\n\nG can hold the vertex type Vertex.\n\nSee 0.\n\n\n\nget the vertex by id from G\n\n auto find_by_id = [&g](size_t id) -> Vertex& {\n auto vv = boost::make_iterator_range(vertices(g));\n auto vd = find_if(vv, [&, id](auto vd) { return g[vd].m_id == id; });\n return g[*vd];\n };\n\n\nand change the value m_d in the Vertex struct when I want.\n\nif (i_want()) {\n g[vd].m_id += 1;\n}\n\nOr,\nauto idmap = boost::get(&Vertex::m_id, g);\n\nif (i_want()) {\n idmap[vd] += 1;\n}\n\nor even\nput(idmap, vd, 42);\n\nor even more unmarked:\nget(boost::vertex_bundle, g, vd).m_id = 999;\n\n\n\n\nadd, remove vertices\n\n remove_vertex(vd, g);\n\n\nand edges between vertices\n\n clear_vertex(vd, g);\n\n\nand also supports costs i.e. cost(edge).\n\nWow that really has nothing to do with any of the above. But it's really the same as with vertex ids:\nif (i_want()) {\n g[ed].cost = new_cost;\n}\n\nOr,\nauto cost = boost::get(&Edge::cost, g);\n\nif (i_want()) {\n cost[ed] = new_cost;\n}\n\nor even\nput(cost, ed, new_cost);\n\nor even more unmarked:\nget(boost::edge_bundle, g, ed).cost = new_cost;\n\n\n\nLive Demo\nLive On Coliru\n#include <boost/graph/adjacency_list.hpp>\n#include <boost/graph/graph_utility.hpp>\n#include <boost/range/algorithm.hpp>\n#include <iostream>\n\nstruct Vertex {\n double m_d = 0;\n size_t m_id = -1;\n // or std::size_t id() const;\n};\n\nstruct Edge {\n double cost = 0;\n};\n\nusing Graph =\n boost::adjacency_list<boost::vecS, boost::vecS, boost::bidirectionalS, Vertex, Edge>;\n\nusing boost::make_iterator_range;\n\nint main(){\n Graph g;\n auto v0 = add_vertex({0.1, 100}, g);\n auto v1 = add_vertex({0.2, 200}, g);\n auto v2 = add_vertex({0.3, 300}, g);\n auto v3 = add_vertex({0.4, 400}, g);\n auto v4 = add_vertex({0.5, 500}, g);\n auto v5 = add_vertex({0.6, 600}, g);\n\n add_edge(v0, v2, Edge{1.5}, g);\n add_edge(v1, v3, Edge{2.5}, g);\n add_edge(v4, v1, Edge{3.5}, g);\n add_edge(v2, v5, Edge{4.5}, g);\n\n auto idmap = boost::get(&Vertex::m_id, g);\n auto cost = boost::get(&Edge::cost, g);\n\n auto find_by_id = [&g](size_t id) -> Vertex& {\n auto vv = boost::make_iterator_range(vertices(g));\n auto vd = find_if(vv, [&, id](auto vd) { return g[vd].m_id == id; });\n return g[*vd];\n };\n\n print_graph(g, idmap, std::cout << \"original: \");\n\n auto i_want = [](auto vd) {\n return (vd % 2); // when I want\n };\n\n for (auto vd : make_iterator_range(vertices(g))) {\n if (i_want(vd))\n g[vd].m_id += 1;\n if (i_want(vd))\n idmap[vd] += 1;\n //put(idmap, vd, 42);\n //get(boost::vertex_bundle, g, vd).m_id = 999;\n }\n\n print_graph(g, idmap, std::cout << \"altered: \");\n\n clear_vertex(v3, g);\n remove_vertex(v3, g); // undefined behaviour unless edges cleared\n\n print_graph(g, idmap, std::cout << \"removed: \");\n\n for (auto ed : make_iterator_range(edges(g))) {\n std::cout << ed << \" cost \" << cost[ed] << \"\\n\";\n }\n\n for (auto ed : make_iterator_range(edges(g))) {\n cost[ed] *= 111;\n }\n\n for (auto ed : make_iterator_range(edges(g))) {\n std::cout << ed << \" cost \" << cost[ed] << \"\\n\";\n }\n};\n\nPrints\noriginal: 100 --> 300 \n200 --> 400 \n300 --> 600 \n400 --> \n500 --> 200 \n600 --> \naltered: 100 --> 300 \n202 --> 402 \n300 --> 602 \n402 --> \n500 --> 202 \n602 --> \nremoved: 100 --> 300 \n202 --> \n300 --> 602 \n500 --> 202 \n602 --> \n(0,2) cost 1.5\n(3,1) cost 3.5\n(2,4) cost 4.5\n(0,2) cost 166.5\n(3,1) cost 388.5\n(2,4) cost 499.5\n\n" ]
[ 3 ]
[]
[]
[ "boost", "c++", "graph" ]
stackoverflow_0074680998_boost_c++_graph.txt
Q: Graph drawing in YASM 8086 I don't know if anyone will be able to help me, but I will try to explain my problem as clearly, as possible. I am learning 8086 FPU with YASM. I want to draw y = cos(x^2+x+1) This is how this graph looks like. I am doing it in DosBox, and I am emulating 8086 processor. My problem: How to normalize graph's(red) ratio, so it could be readable in DosBox. So far, I managed to draw a coordinate plane. And I think I managed to draw this graph, but the ratio is too small to check if it is really good. This is how it looks so far GRAPH IMAGE](https://i.stack.imgur.com/0Hy6X.jpg). I am using FPU, to calculate Y coordinates. I am using stack to calculate Y coordinate. X coordinate will go from 320 to 0 (with dec si), as you can see in my code. In this code, I am trying to calculate Y (di) every with different X (si). And put the pixel in the spot, that it has to be in. ;------------------------------------------------------------------------ %include 'yasmmac.inc' org 100h ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; section .text ; Code starts here startas: call procSetGraphicsMode ;;;;;;;;;;;;;;;;;;;;;;;;; COORDINATE PLANE;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; mov di, 200 mov si, 160 .vertical: mov cl, 15 call procPutPixel dec di jnz .vertical mov si, 320 .horizontal: mov di, 100 mov cl, 15 ; 15 = white color call procPutPixel dec si jnz .horizontal ; y = di ; x = si mov si, 320 mov di, 100 ;;;;;;;;;;;;;;;;;;;;;;;; GRAPH ;;;;;;;;;;;;;;;;;;;;;;;; .loop: mov [value1], si mov [value2], si finit fild dword [value1] fild dword [value1] fild dword [value3] ; move to stack 1 fmul dword [value1] fadd st0, st1 ; add x in stack head fadd st0, st3 ; add 1 in stack head fcos ; cos(x^2 + x + 1) frndint ; round fistp word [y] ; Load rounded answer to [y] variable add di, [y] ; Add [y] to di mov cl, 4 ; 4 = Red color call procPutPixel dec si jnz .loop ;;;;;;;;;;;;;;;;;;;;;;;; WAIT FOR ESC ;;;;;;;;;;;;;;;;;;;;;;;; call procWaitForEsc exit %include 'yasmlib.asm' ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; section .data ; data value1: dd 0.0 value2: dd 0.0 value3: dd 1.0 xc: dw 160 yc: dw 100 x: dw 0 y: dw 0 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; section .bss yasmlim.asm If it helps A: The axes are the easy part Since you are working on a graphics screen that has a resolution of 320 x 200, the X coordinates range from 0 to 319 and the Y coordinates range from 0 to 199. Your code erroneously outputs in X's of 320 and y's of 200 and you don't use the X's of 0 or the Y's of 0. xor di, di ; Y .vertical: mov si, 160 ; X mov cl, 15 ; Color call procPutPixel inc di cmp di, 199 ; MaxY jbe .vertical xor si, si ; X .horizontal: mov di, 100 ; Y mov cl, 15 ; Color call procPutPixel inc si cmp si, 319 ; MaxX jbe .horizontal ; y = di ; x = si mov si, 320 mov di, 100 The data definitions value1: dd 0.0 value2: dd 0.0 value3: dd 1.0 Your FPU instructions are dealing with values stored in memory, but are doing so in incompatible ways! eg. value1 can't be both an integer dword and an single precision float at the same time. You primed this variable with the value from the 16-bit integer register SI, so deal with it as a 16-bit integer. valueX: dw 0 The calculation finit (better use the non-waiting fninit) is best held outside of the loop. frndint is redundant since fistp word [y] will automatically do the rounding for you. fmul st0 can square the value from st0. fld1 can load the value 1 to the FPU stack. No need for a memory based variable and also no need to reload it for every X. xor si, si ; X fninit fld1 ; CONST 1 .loop: mov [valueX], si ; X fild word [valueX] ; st0 = X fmul st0 ; st0 = X^2 fiadd word [valueX] ; st0 = X^2 + X fadd st0, st1 ; st0 = X^2 + X + 1 fcos ; st0 = cos(X^2 + X + 1) [-1,+1] ... fistp word [y] ; {-1,0,+1} mov di, 100 ; Y add di, [y] ; -> Y={99,100,101} mov cl, 4 ; Color call procPutPixel inc si cmp si, 319 jbe .loop fstp ; Clean FPU stack, removing the CONST 1 Getting the output readable The cosine value that the FPU delivers ranges from -1 to +1, but storing this value in the integer memory variable y will produce but 3 discrete values -1, 0, and +1. What is needed is scaling the cosine by some factor like say 60. Add this to the data: scaleY: dw 60 and at the ellipses you add fimul word [scaleY], producing: fcos ; st0 = cos(X^2 + X + 1) [-1,+1] fimul word [scaleY] ; st0 = cos(X^2 + X + 1) * 60 fistp word [y] ; [-60,+60] mov di, 100 ; Y add di, [y] ; -> Y=[40,160]
Graph drawing in YASM 8086
I don't know if anyone will be able to help me, but I will try to explain my problem as clearly, as possible. I am learning 8086 FPU with YASM. I want to draw y = cos(x^2+x+1) This is how this graph looks like. I am doing it in DosBox, and I am emulating 8086 processor. My problem: How to normalize graph's(red) ratio, so it could be readable in DosBox. So far, I managed to draw a coordinate plane. And I think I managed to draw this graph, but the ratio is too small to check if it is really good. This is how it looks so far GRAPH IMAGE](https://i.stack.imgur.com/0Hy6X.jpg). I am using FPU, to calculate Y coordinates. I am using stack to calculate Y coordinate. X coordinate will go from 320 to 0 (with dec si), as you can see in my code. In this code, I am trying to calculate Y (di) every with different X (si). And put the pixel in the spot, that it has to be in. ;------------------------------------------------------------------------ %include 'yasmmac.inc' org 100h ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; section .text ; Code starts here startas: call procSetGraphicsMode ;;;;;;;;;;;;;;;;;;;;;;;;; COORDINATE PLANE;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; mov di, 200 mov si, 160 .vertical: mov cl, 15 call procPutPixel dec di jnz .vertical mov si, 320 .horizontal: mov di, 100 mov cl, 15 ; 15 = white color call procPutPixel dec si jnz .horizontal ; y = di ; x = si mov si, 320 mov di, 100 ;;;;;;;;;;;;;;;;;;;;;;;; GRAPH ;;;;;;;;;;;;;;;;;;;;;;;; .loop: mov [value1], si mov [value2], si finit fild dword [value1] fild dword [value1] fild dword [value3] ; move to stack 1 fmul dword [value1] fadd st0, st1 ; add x in stack head fadd st0, st3 ; add 1 in stack head fcos ; cos(x^2 + x + 1) frndint ; round fistp word [y] ; Load rounded answer to [y] variable add di, [y] ; Add [y] to di mov cl, 4 ; 4 = Red color call procPutPixel dec si jnz .loop ;;;;;;;;;;;;;;;;;;;;;;;; WAIT FOR ESC ;;;;;;;;;;;;;;;;;;;;;;;; call procWaitForEsc exit %include 'yasmlib.asm' ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; section .data ; data value1: dd 0.0 value2: dd 0.0 value3: dd 1.0 xc: dw 160 yc: dw 100 x: dw 0 y: dw 0 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; section .bss yasmlim.asm If it helps
[ "The axes are the easy part\nSince you are working on a graphics screen that has a resolution of 320 x 200, the X coordinates range from 0 to 319 and the Y coordinates range from 0 to 199. Your code erroneously outputs in X's of 320 and y's of 200 and you don't use the X's of 0 or the Y's of 0.\n xor di, di ; Y\n.vertical:\n mov si, 160 ; X\n mov cl, 15 ; Color\n call procPutPixel\n inc di\n cmp di, 199 ; MaxY\n jbe .vertical\n\n xor si, si ; X\n.horizontal: \n mov di, 100 ; Y\n mov cl, 15 ; Color\n call procPutPixel\n inc si\n cmp si, 319 ; MaxX\n jbe .horizontal\n\n ; y = di\n ; x = si\n mov si, 320\n mov di, 100\n\nThe data definitions\n\nvalue1: \ndd 0.0\nvalue2:\ndd 0.0\nvalue3:\ndd 1.0\n\n\nYour FPU instructions are dealing with values stored in memory, but are doing so in incompatible ways! eg. value1 can't be both an integer dword and an single precision float at the same time. You primed this variable with the value from the 16-bit integer register SI, so deal with it as a 16-bit integer.\nvalueX: dw 0\n\nThe calculation\nfinit (better use the non-waiting fninit) is best held outside of the loop.\nfrndint is redundant since fistp word [y] will automatically do the rounding for you.\nfmul st0 can square the value from st0.\nfld1 can load the value 1 to the FPU stack. No need for a memory based variable and also no need to reload it for every X.\n xor si, si ; X\n fninit\n fld1 ; CONST 1\n.loop:\n mov [valueX], si ; X\n fild word [valueX] ; st0 = X\n fmul st0 ; st0 = X^2\n fiadd word [valueX] ; st0 = X^2 + X\n fadd st0, st1 ; st0 = X^2 + X + 1\n fcos ; st0 = cos(X^2 + X + 1) [-1,+1]\n ...\n fistp word [y] ; {-1,0,+1}\n mov di, 100 ; Y\n add di, [y] ; -> Y={99,100,101}\n mov cl, 4 ; Color\n call procPutPixel\n inc si\n cmp si, 319\n jbe .loop\n fstp ; Clean FPU stack, removing the CONST 1\n\nGetting the output readable\nThe cosine value that the FPU delivers ranges from -1 to +1, but storing this value in the integer memory variable y will produce but 3 discrete values -1, 0, and +1.\nWhat is needed is scaling the cosine by some factor like say 60.\nAdd this to the data:\nscaleY: dw 60\n\nand at the ellipses you add fimul word [scaleY], producing:\n fcos ; st0 = cos(X^2 + X + 1) [-1,+1]\n fimul word [scaleY] ; st0 = cos(X^2 + X + 1) * 60\n fistp word [y] ; [-60,+60]\n mov di, 100 ; Y\n add di, [y] ; -> Y=[40,160]\n\n" ]
[ 1 ]
[]
[]
[ "assembly", "fpu", "graph", "x86", "yasm" ]
stackoverflow_0074640974_assembly_fpu_graph_x86_yasm.txt
Q: Using cell input into function and ovewriting result into result in same cell in Google Sheet In Google Sheet, I would like to take the input of a cell, make a calculation, and display the result in the same cell in a different format. This would likely always be a percentage value that I would use conditional formatting to color the cell to provide a 'dashboard' view of statistics. Example would be usage statistics for a month. Assets Records limit 50 1000 November 29 295 Assets Records limit 50 1000 November 58% 30% I found a Quora post detailing how to create your own scripts, so I believe I have the baseline of taking and modifying content of a cell: function onEdit() { var ss = SpreadsheetApp.getActiveSpreadsheet(); var cell = ss.getActiveSelection(); var cell_input = cell.getValue(); var cell_address = cell.getA1Notation() var cell2 = ss.getRange(cell_address); cell2.setFormula('=2*'+cell_input) } In this example, I'm unsure how to reference the cell it should be dividing against. A: Here's a general example that should give you an idea of how to manipulate the cells, formats and values in Sheets: function onEdit(e) { var ss = SpreadsheetApp.getActiveSpreadsheet(); var cell = ss.getActiveSelection(); var cell_input = cell.getValue(); var divisor = cell.offset(-1, 0) if (e.range.getA1Notation()=="B3" || "C3"){ cell.setNumberFormat("@").setValue(Math.ceil((cell_input / divisor.getValue()) * 100) + "%") } } If you create a sheet that looks like this you will get the behavior you were looking for in B3 and C3: How it works: Add the e parameter to the onEdit(e) trigger so you can use e.range to read the range that called the function. To get the cell you're dividing against you need to either know its exact range or its relative position to the cell that called it. Since in your sample the percentage cell is below the divisor, you can just offset() it by -1, which returns the cell above it. If the relative position changes you'll need to take this into account to modify the offset but it should work in general if your table has a consistent structure. You'll need to use an if to fire the trigger only in a specific range, otherwise the entire sheet will be affected. You do this by comparing the caller e.range to your desired range. In this example for the sake of simplicity I just had two cells so I just compared the individual B3 and C3 cells to e.range.getA1Notation(), but if you want a bigger range you'll probably want to use a more advanced technique like the ones in this question To get around the format problem described in TheWizEd's comment I'm forcing the cell to a text value using setNumberFormat("@"). This way if you enter a value a second time it will read it as a number rather than a percent. I tested using the value elsewhere in the sheet and it still works as a percentage when needed, but watch out for unintended behavior. Sources: Simple triggers Range object documentation A: When working with triggers, take advantage of the event object. Replace function onEdit(){ var ss = SpreadsheetApp.getActiveSpreadsheet(); var cell = ss.getActiveSelection(); var cell_input = cell.getValue(); by function onEdit(e){ var cell_input = e.value ?? 0; Notes: SpreasheetApp.Spreadsheet.getActiveSeletion() returns the first range that belongs to the active selection, this might cause problems because the active seleccion could include multiple cells, and the current cell could be any cell in the selection even one cell that is not part of the returned range. SpreadsheetApp.Range.getValue() returns the value of the top left cell in the range. A user could select multiple cells and use the tab key to change the current cell, so under certain circunstances the value obtained this way might not be the value of the edited cell. e.value ?? 0 is used to get 0 as the default value, i.e., when a cell is cleared. As onEdit is triggered when any cell is edited your script should include a condition to only change the corresponding cells. function onEdit(e){ const cell_input = e.value ?? 0; if(e.range.rowStart === 3 && [2,3].includes(e.range.columnStart)){ const newValue = (100 * cell_input / e.range.offset(-1,0).getValue()).toFixed() + '%'; e.range.setValue(newValue); } } The above uses SpreadsheetApp.Range.offset to get the cell above of the edited cell. Writes a the percentage as string, taking advange of the Google Sheets automatic data type assignation. References https://developers.google.com/apps-script/guides/triggers https://developers.google.com/apps-script/guides/triggers/events
Using cell input into function and ovewriting result into result in same cell in Google Sheet
In Google Sheet, I would like to take the input of a cell, make a calculation, and display the result in the same cell in a different format. This would likely always be a percentage value that I would use conditional formatting to color the cell to provide a 'dashboard' view of statistics. Example would be usage statistics for a month. Assets Records limit 50 1000 November 29 295 Assets Records limit 50 1000 November 58% 30% I found a Quora post detailing how to create your own scripts, so I believe I have the baseline of taking and modifying content of a cell: function onEdit() { var ss = SpreadsheetApp.getActiveSpreadsheet(); var cell = ss.getActiveSelection(); var cell_input = cell.getValue(); var cell_address = cell.getA1Notation() var cell2 = ss.getRange(cell_address); cell2.setFormula('=2*'+cell_input) } In this example, I'm unsure how to reference the cell it should be dividing against.
[ "Here's a general example that should give you an idea of how to manipulate the cells, formats and values in Sheets:\nfunction onEdit(e) {\n var ss = SpreadsheetApp.getActiveSpreadsheet();\n var cell = ss.getActiveSelection();\n var cell_input = cell.getValue();\n var divisor = cell.offset(-1, 0)\n\n if (e.range.getA1Notation()==\"B3\" || \"C3\"){\n cell.setNumberFormat(\"@\").setValue(Math.ceil((cell_input / divisor.getValue()) * 100) + \"%\")\n }\n}\n\nIf you create a sheet that looks like this you will get the behavior you were looking for in B3 and C3:\n\nHow it works:\n\nAdd the e parameter to the onEdit(e) trigger so you can use e.range to read the range that called the function.\nTo get the cell you're dividing against you need to either know its exact range or its relative position to the cell that called it. Since in your sample the percentage cell is below the divisor, you can just offset() it by -1, which returns the cell above it. If the relative position changes you'll need to take this into account to modify the offset but it should work in general if your table has a consistent structure.\nYou'll need to use an if to fire the trigger only in a specific range, otherwise the entire sheet will be affected. You do this by comparing the caller e.range to your desired range. In this example for the sake of simplicity I just had two cells so I just compared the individual B3 and C3 cells to e.range.getA1Notation(), but if you want a bigger range you'll probably want to use a more advanced technique like the ones in this question\nTo get around the format problem described in TheWizEd's comment I'm forcing the cell to a text value using setNumberFormat(\"@\"). This way if you enter a value a second time it will read it as a number rather than a percent. I tested using the value elsewhere in the sheet and it still works as a percentage when needed, but watch out for unintended behavior.\n\nSources:\n\nSimple triggers\nRange object documentation\n\n", "When working with triggers, take advantage of the event object.\nReplace\nfunction onEdit(){\n var ss = SpreadsheetApp.getActiveSpreadsheet();\n var cell = ss.getActiveSelection();\n var cell_input = cell.getValue();\n\nby\nfunction onEdit(e){\n var cell_input = e.value ?? 0;\n\nNotes:\n\nSpreasheetApp.Spreadsheet.getActiveSeletion() returns the first range that belongs to the active selection, this might cause problems because the active seleccion could include multiple cells, and the current cell could be any cell in the selection even one cell that is not part of the returned range.\nSpreadsheetApp.Range.getValue() returns the value of the top left cell in the range. A user could select multiple cells and use the tab key to change the current cell, so under certain circunstances the value obtained this way might not be the value of the edited cell.\ne.value ?? 0 is used to get 0 as the default value, i.e., when a cell is cleared.\n\nAs onEdit is triggered when any cell is edited your script should include a condition to only change the corresponding cells.\nfunction onEdit(e){\n const cell_input = e.value ?? 0;\n if(e.range.rowStart === 3 && [2,3].includes(e.range.columnStart)){\n const newValue = (100 * cell_input / e.range.offset(-1,0).getValue()).toFixed() + '%';\n e.range.setValue(newValue);\n }\n}\n\nThe above uses\n\nSpreadsheetApp.Range.offset to get the cell above of the edited cell.\nWrites a the percentage as string, taking advange of the Google Sheets automatic data type assignation.\n\nReferences\n\nhttps://developers.google.com/apps-script/guides/triggers\nhttps://developers.google.com/apps-script/guides/triggers/events\n\n" ]
[ 0, 0 ]
[]
[]
[ "google_apps_script", "google_sheets", "google_sheets_formula" ]
stackoverflow_0074657407_google_apps_script_google_sheets_google_sheets_formula.txt
Q: Problem using C. Program works then acts differently for unknown reason So in my program I retrieve a command line argument (must be 26 characters with no duplicates) which is used kind of like a rubric for a cypher or something and all these letters are put into an array (i know im not doing it super efficiently). Following this I prompt for a user to write something and that sentence will in turn change based on what the CLA is inputted as a "cypher" i guess. When i do do this and the cypher is simply just the alphabet (a-z) [therefore should returning the exact same thing written in the prompt] the first couple letters are correct and follow the logic of my code however after getting to the 5th it starts to print out strange random letters for unknown reasons. ex. hi there how's it going = hi thhrh how's it roisr plss help :D #include <cs50.h> #include <ctype.h> #include <stdio.h> #include <stdlib.h> #include <string.h> char letters[] = {}; char word[] = {}; // Takes input "Code" and checks if it is suitable (so far) still need to input reminder if no key etc. int main(int argc, string argv[]) { if (argc !=2) { printf("Missing command-line argument\n"); return 1; } else if ((argv[1][1]) == ' ') { printf("Usage: ./substitution key"); return 1; } else if (strlen(argv[1]) != 26) { printf("Key must contain 26 characters.\n"); return 1; } for (int i = 0, n = strlen(argv[1]); i < n; i++) { if (isalpha(argv[1][i]) != 0) { letters[i] = argv[1][i]; } else { printf("Key must only contain alphabetic characters.\n"); return 1; } for (int j = 0; j < i; j++) { if (toupper(argv[1][j]) == toupper(argv[1][i])) { printf("No Repeat Characters\n"); return 1; } } // confirmed this prints the entire focking CLA printf("%c", letters[i]); } string ptext = get_string("plaintext: "); printf("cyphertext: "); for (int j = 0; j < strlen(ptext); j++) { if (ptext[j] >= 'A' && ptext[j] <= 'Z') { int l = ptext[j] - 65; char z = letters[l]; //printf("%c\n", z); word[j] = z; printf("%c", word[j]); } else if (ptext[j] >= 'a' && ptext[j] <= 'z') { int k = ptext[j] - 97; char y = letters[k]; word[j] = y; printf("%c", word[j]); } else { printf("%c", ptext[j]); } } printf("\n"); } thats the code! I've tried debugging and looking into why the value changes however it just suddenly makes letters[k] not equal to e when it should as it is in the array made earlier in the code. I'm not sure what's happening as im pretty sure the code has sound logic A: here you have created zero size arrays char letters[] = {}; char word[] = {}; you need to make them larger. I guess you need char letters[26] = {}; char word[50] = {}; not quite sure what max size you need tho
Problem using C. Program works then acts differently for unknown reason
So in my program I retrieve a command line argument (must be 26 characters with no duplicates) which is used kind of like a rubric for a cypher or something and all these letters are put into an array (i know im not doing it super efficiently). Following this I prompt for a user to write something and that sentence will in turn change based on what the CLA is inputted as a "cypher" i guess. When i do do this and the cypher is simply just the alphabet (a-z) [therefore should returning the exact same thing written in the prompt] the first couple letters are correct and follow the logic of my code however after getting to the 5th it starts to print out strange random letters for unknown reasons. ex. hi there how's it going = hi thhrh how's it roisr plss help :D #include <cs50.h> #include <ctype.h> #include <stdio.h> #include <stdlib.h> #include <string.h> char letters[] = {}; char word[] = {}; // Takes input "Code" and checks if it is suitable (so far) still need to input reminder if no key etc. int main(int argc, string argv[]) { if (argc !=2) { printf("Missing command-line argument\n"); return 1; } else if ((argv[1][1]) == ' ') { printf("Usage: ./substitution key"); return 1; } else if (strlen(argv[1]) != 26) { printf("Key must contain 26 characters.\n"); return 1; } for (int i = 0, n = strlen(argv[1]); i < n; i++) { if (isalpha(argv[1][i]) != 0) { letters[i] = argv[1][i]; } else { printf("Key must only contain alphabetic characters.\n"); return 1; } for (int j = 0; j < i; j++) { if (toupper(argv[1][j]) == toupper(argv[1][i])) { printf("No Repeat Characters\n"); return 1; } } // confirmed this prints the entire focking CLA printf("%c", letters[i]); } string ptext = get_string("plaintext: "); printf("cyphertext: "); for (int j = 0; j < strlen(ptext); j++) { if (ptext[j] >= 'A' && ptext[j] <= 'Z') { int l = ptext[j] - 65; char z = letters[l]; //printf("%c\n", z); word[j] = z; printf("%c", word[j]); } else if (ptext[j] >= 'a' && ptext[j] <= 'z') { int k = ptext[j] - 97; char y = letters[k]; word[j] = y; printf("%c", word[j]); } else { printf("%c", ptext[j]); } } printf("\n"); } thats the code! I've tried debugging and looking into why the value changes however it just suddenly makes letters[k] not equal to e when it should as it is in the array made earlier in the code. I'm not sure what's happening as im pretty sure the code has sound logic
[ "here you have created zero size arrays\nchar letters[] = {};\nchar word[] = {};\n\nyou need to make them larger. I guess you need\nchar letters[26] = {};\nchar word[50] = {};\n\nnot quite sure what max size you need tho\n" ]
[ 0 ]
[]
[]
[ "arrays", "c", "for_loop" ]
stackoverflow_0074681306_arrays_c_for_loop.txt
Q: How can you add a subtitle track dynamically to a video.js video player That's pretty much it; I need to change the subtitles to the theater of a webpage and if possible, even not have them at all, but mostly I wanna change the subtitles of a videojs player. I could show you the website but don't think it's necessary. I've tried: videojs(theater_video, {tracks: `<track kind='subtitles' src='/resources/videos/whale_Shailene/Spanish.vtt' srclang='es' label='EspaΓ±ol' ></track> <track kind='subtitles' src='/resources/videos/whale_Shailene/Arabic.vtt' srclang='ar' label='ΨΉΨ±Ψ¨.' ></track>`}) , and: videojs(theater_video, {tracks: [{ src: '/resources/videos/whale_Shailene/Spanish.vtt', kind:'subtitles', srclang: 'es', label: 'EspaΓ±ol' },{ src: '/resources/videos/whale_Shailene/Arabic.vtt', kind:'subtitles', srclang: 'ar', label: 'ΨΉΨ±Ψ¨.' }]}) But neither seem to appear nor even change the subtitles of the player Neither a combination of either of those with: $("#theater video").find("source").after(`<track kind='subtitles' src='/resources/videos/whale_Shailene/Spanish.vtt' srclang='es' label='EspaΓ±ol' ></track> <track kind='subtitles' src='/resources/videos/whale_Shailene/Arabic.vtt' srclang='ar' label='ΨΉΨ±Ψ¨.' ></track>`) Thanks beforehand A: To do this, you will need to use the addTextTrack method provided by the Video.js player. This method takes a few arguments, including the kind of text track you want to add (in this case, subtitles), the label for the track, the language of the track, and the src of the track's text file. Here is an example of how you might use the addTextTrack method to add a Spanish subtitle track to your Video.js player: var player = videojs('theater_video'); // Add a Spanish subtitle track player.addTextTrack('subtitles', 'EspaΓ±ol', 'es', '/resources/videos/whale_Shailene/Spanish.vtt');
How can you add a subtitle track dynamically to a video.js video player
That's pretty much it; I need to change the subtitles to the theater of a webpage and if possible, even not have them at all, but mostly I wanna change the subtitles of a videojs player. I could show you the website but don't think it's necessary. I've tried: videojs(theater_video, {tracks: `<track kind='subtitles' src='/resources/videos/whale_Shailene/Spanish.vtt' srclang='es' label='EspaΓ±ol' ></track> <track kind='subtitles' src='/resources/videos/whale_Shailene/Arabic.vtt' srclang='ar' label='ΨΉΨ±Ψ¨.' ></track>`}) , and: videojs(theater_video, {tracks: [{ src: '/resources/videos/whale_Shailene/Spanish.vtt', kind:'subtitles', srclang: 'es', label: 'EspaΓ±ol' },{ src: '/resources/videos/whale_Shailene/Arabic.vtt', kind:'subtitles', srclang: 'ar', label: 'ΨΉΨ±Ψ¨.' }]}) But neither seem to appear nor even change the subtitles of the player Neither a combination of either of those with: $("#theater video").find("source").after(`<track kind='subtitles' src='/resources/videos/whale_Shailene/Spanish.vtt' srclang='es' label='EspaΓ±ol' ></track> <track kind='subtitles' src='/resources/videos/whale_Shailene/Arabic.vtt' srclang='ar' label='ΨΉΨ±Ψ¨.' ></track>`) Thanks beforehand
[ "To do this, you will need to use the addTextTrack method provided by the Video.js player. This method takes a few arguments, including the kind of text track you want to add (in this case, subtitles), the label for the track, the language of the track, and the src of the track's text file.\nHere is an example of how you might use the addTextTrack method to add a Spanish subtitle track to your Video.js player:\nvar player = videojs('theater_video');\n\n// Add a Spanish subtitle track\nplayer.addTextTrack('subtitles', 'EspaΓ±ol', 'es', '/resources/videos/whale_Shailene/Spanish.vtt');\n\n" ]
[ 1 ]
[]
[]
[ "html", "javascript", "subtitle", "video.js" ]
stackoverflow_0074681006_html_javascript_subtitle_video.js.txt
Q: import statement not working in JavaScript code Hello I am a new programmer, recently I started coding in react but I have this problem with my JavaScript code. Import is not working. import React from 'react'; import './App.css'; import { hello } from '/components/hello.js'; function App() { return ( <div className="App"> <header> <p> <hello></hello> </p> </header> </div> ); } export default App; This here is my app.js file import React from 'react'; export default function hello(){ return <h1>Hello user</h1>; } This here is my hello.js file(It is located inside my components folder) Can anyone tell me why import is not working?? A: Change this: import React from 'react'; export default function Hello(){ return <h1>Hello user</h1>; } To this: import React from 'react'; export function Hello(){ return <h1>Hello user</h1>; } If you want to leave it as a default function then import it like this instead: import Hello from '/components/Hello.js'; Also, it is best practice to capitalize component names. <Hello /> A: Try using default import instead of name import because in your hello.js you are setting up for default. Also your path for hello.js is right? Try ./hello.js A: did anyone actually get this to work? im having this exact problem in 2022 so im hoping someone just forgot to report the real solution. in my case the actual use of import is not working. every occurrence of that statement is not working, if i comment one out then the next one throws the error. and its happening when im trying to import packages not something ive created myself.
import statement not working in JavaScript code
Hello I am a new programmer, recently I started coding in react but I have this problem with my JavaScript code. Import is not working. import React from 'react'; import './App.css'; import { hello } from '/components/hello.js'; function App() { return ( <div className="App"> <header> <p> <hello></hello> </p> </header> </div> ); } export default App; This here is my app.js file import React from 'react'; export default function hello(){ return <h1>Hello user</h1>; } This here is my hello.js file(It is located inside my components folder) Can anyone tell me why import is not working??
[ "Change this:\n import React from 'react';\n\n export default function Hello(){\n return <h1>Hello user</h1>;\n}\n\nTo this:\n import React from 'react';\n\n export function Hello(){\n return <h1>Hello user</h1>;\n}\n\nIf you want to leave it as a default function then import it like this instead:\nimport Hello from '/components/Hello.js';\n\nAlso, it is best practice to capitalize component names. <Hello />\n", "Try using default import instead of name import because in your hello.js you are setting up for default. Also your path for hello.js is right? Try ./hello.js\n", "did anyone actually get this to work? im having this exact problem in 2022 so im hoping someone just forgot to report the real solution.\nin my case the actual use of import is not working. every occurrence of that statement is not working, if i comment one out then the next one throws the error. and its happening when im trying to import packages not something ive created myself.\n" ]
[ 2, 1, 0 ]
[]
[]
[ "javascript", "reactjs" ]
stackoverflow_0064430454_javascript_reactjs.txt
Q: Cannot deploy Firebase Cloud Function in flutter When i try to deploy my function i get a lot of error. This is a brand new flutter install and firebase init with Typescript. Error : node_modules/@types/express-serve-static-core/index.d.ts:99:68 - error TS1110: Type expected. 99 type RemoveTail<S extends string, Tail extends string> = S extends `${infer P}${Tail}` ? P : S; ~~~ node_modules/@types/express-serve-static-core/index.d.ts:99:77 - error TS1005: '}' expected. 99 type RemoveTail<S extends string, Tail extends string> = S extends `${infer P}${Tail}` ? P : S; ~ node_modules/@types/express-serve-static-core/index.d.ts:99:78 - error TS1128: Declaration or statement expected. 99 type RemoveTail<S extends string, Tail extends string> = S extends `${infer P}${Tail}` ? P : S; ~ node_modules/@types/express-serve-static-core/index.d.ts:99:80 - error TS1005: ';' expected. 99 type RemoveTail<S extends string, Tail extends string> = S extends `${infer P}${Tail}` ? P : S; ~ node_modules/@types/express-serve-static-core/index.d.ts:101:33 - error TS1005: ';' expected. ~ Found 127 errors. npm ERR! code ELIFECYCLE npm ERR! errno 2 npm ERR! functions@ build: `tsc` npm ERR! Exit status 2 npm ERR! npm ERR! Failed at the functions@ build script. here is my index.ts import * as functions from "firebase-functions"; // Start writing Firebase Functions // https://firebase.google.com/docs/functions/typescript export const helloWorld = functions.https.onRequest((request, response) => { functions.logger.info("Hello logs!", {structuredData: true}); response.send("Hello from Firebase!"); }); I tried many solution now and none of them are working, the export i try to do is the demo one so there should not be any error in. I upgraded flutter and the firebase cli on my mac. A: // It looks like there is an issue with the TypeScript definitions for the express- // serve-static-core package. This could be caused by an outdated or incorrect version of the package being installed. // To fix this, try running the following commands: Remove the existing node_modules folder: rm -rf node_modules // Install the latest version of the express-serve-static-core package: npm install express-serve-static-core // Rebuild your functions: npm run build // If this does not fix the issue, you may need to manually update the TypeScript // definitions for the express-serve-static-core package by referencing the correct // version in your project's tsconfig.json file. // For example, if you are using version 4.17.1 of the package, you would update your tsconfig.json file to include the following: "compilerOptions": { "typeRoots": [ "node_modules/@types", "node_modules/express-serve-static-core@4.17.1/types" ] } // After updating your tsconfig.json file, run the npm run build command again to // rebuild your functions. This should resolve the TypeScript errors and allow you // to deploy your Cloud Function.
Cannot deploy Firebase Cloud Function in flutter
When i try to deploy my function i get a lot of error. This is a brand new flutter install and firebase init with Typescript. Error : node_modules/@types/express-serve-static-core/index.d.ts:99:68 - error TS1110: Type expected. 99 type RemoveTail<S extends string, Tail extends string> = S extends `${infer P}${Tail}` ? P : S; ~~~ node_modules/@types/express-serve-static-core/index.d.ts:99:77 - error TS1005: '}' expected. 99 type RemoveTail<S extends string, Tail extends string> = S extends `${infer P}${Tail}` ? P : S; ~ node_modules/@types/express-serve-static-core/index.d.ts:99:78 - error TS1128: Declaration or statement expected. 99 type RemoveTail<S extends string, Tail extends string> = S extends `${infer P}${Tail}` ? P : S; ~ node_modules/@types/express-serve-static-core/index.d.ts:99:80 - error TS1005: ';' expected. 99 type RemoveTail<S extends string, Tail extends string> = S extends `${infer P}${Tail}` ? P : S; ~ node_modules/@types/express-serve-static-core/index.d.ts:101:33 - error TS1005: ';' expected. ~ Found 127 errors. npm ERR! code ELIFECYCLE npm ERR! errno 2 npm ERR! functions@ build: `tsc` npm ERR! Exit status 2 npm ERR! npm ERR! Failed at the functions@ build script. here is my index.ts import * as functions from "firebase-functions"; // Start writing Firebase Functions // https://firebase.google.com/docs/functions/typescript export const helloWorld = functions.https.onRequest((request, response) => { functions.logger.info("Hello logs!", {structuredData: true}); response.send("Hello from Firebase!"); }); I tried many solution now and none of them are working, the export i try to do is the demo one so there should not be any error in. I upgraded flutter and the firebase cli on my mac.
[ "// It looks like there is an issue with the TypeScript definitions for the express-\n// serve-static-core package. This could be caused by an outdated or incorrect version of the package being installed.\n// To fix this, try running the following commands:\n\nRemove the existing node_modules folder:\nrm -rf node_modules\n\n// Install the latest version of the express-serve-static-core package:\nnpm install express-serve-static-core\n\n// Rebuild your functions:\nnpm run build\n\n// If this does not fix the issue, you may need to manually update the TypeScript\n// definitions for the express-serve-static-core package by referencing the correct\n// version in your project's tsconfig.json file.\n\n// For example, if you are using version 4.17.1 of the package, you would update your tsconfig.json file to include the following:\n\n\"compilerOptions\": {\n\"typeRoots\": [\n\"node_modules/@types\",\n\"node_modules/express-serve-static-core@4.17.1/types\"\n]\n}\n\n// After updating your tsconfig.json file, run the npm run build command again to \n// rebuild your functions. This should resolve the TypeScript errors and allow you \n// to deploy your Cloud Function.\n\n" ]
[ 1 ]
[]
[]
[ "firebase", "flutter", "google_cloud_functions", "typescript" ]
stackoverflow_0074620232_firebase_flutter_google_cloud_functions_typescript.txt
Q: Flutter - Isar schema is not defined I have decided to use Isar database in my next project and I find it much helpful when dealing with local data. I followed the quickstart guide in its website. I added dependencies. Annotated the contact class. Ran code generator. But at fourth step, I have problem creating schema while creating Isar instance. initIsar() async { final dir = await getApplicationSupportDirectory(); final isar = await Isar.open( schemas: [ContactSchema], directory: dir.path, inspector: true, ); } The problem is where I typed ContactSchema, it says Undefined name 'ContactSchema'. Try correcting the name to one that is defined, or defining the name. So question I have to ask is, I followed guide but I'm unable to create a schema. How can I create one to make Isar db work? UPDATE: import 'package:isar/isar.dart'; part 'contact.g.dart'; @Collection() class Contact { @Id() int? id; late String name; } After adding part 'contact.g.dart', type this command flutter pub run build_runner build and you are good to go. A: For completeness of response: Run (error? look at it, it likely say to remove isar from the first command) flutter pub add isar isar_flutter_libs flutter pub add -d isar_generator build_runner flutter pub run build_runner build # Run every update of the collection Example collection: @Collection(accessor: "time") class Timer { final Id id = Isar.autoIncrement; @Index( unique: true, replace: true, ) late final bool isRunning; late final DateTime dates = DateTime.now(); } Suggestion: (tree -x output) $ tree -x . β”œβ”€β”€ main.dart └── model β”œβ”€β”€ timer.dart └── timer.g.dart 1 directory, 3 files For how I'm using this: void main() async { // ignore: unused_local_variable final isar = await Isar.open([TimerSchema]); runApp(const MyApp()); } Look at my timer app for help: pranitshah.cyou I'll update my website for this. A: After you run the build_runner command the schema class is generated in the MODEL_NAME.g.dart file. you need to import that file to get access to the schema class.
Flutter - Isar schema is not defined
I have decided to use Isar database in my next project and I find it much helpful when dealing with local data. I followed the quickstart guide in its website. I added dependencies. Annotated the contact class. Ran code generator. But at fourth step, I have problem creating schema while creating Isar instance. initIsar() async { final dir = await getApplicationSupportDirectory(); final isar = await Isar.open( schemas: [ContactSchema], directory: dir.path, inspector: true, ); } The problem is where I typed ContactSchema, it says Undefined name 'ContactSchema'. Try correcting the name to one that is defined, or defining the name. So question I have to ask is, I followed guide but I'm unable to create a schema. How can I create one to make Isar db work? UPDATE: import 'package:isar/isar.dart'; part 'contact.g.dart'; @Collection() class Contact { @Id() int? id; late String name; } After adding part 'contact.g.dart', type this command flutter pub run build_runner build and you are good to go.
[ "For completeness of response:\nRun (error? look at it, it likely say to remove isar from the first command)\nflutter pub add isar isar_flutter_libs\nflutter pub add -d isar_generator build_runner\nflutter pub run build_runner build # Run every update of the collection\n\nExample collection:\n@Collection(accessor: \"time\")\nclass Timer {\n final Id id = Isar.autoIncrement;\n\n @Index(\n unique: true,\n replace: true,\n )\n late final bool isRunning;\n late final DateTime dates = DateTime.now();\n}\n\nSuggestion: (tree -x output)\n$ tree -x\n.\nβ”œβ”€β”€ main.dart\n└── model\n β”œβ”€β”€ timer.dart\n └── timer.g.dart\n\n1 directory, 3 files\n\nFor how I'm using this:\nvoid main() async {\n // ignore: unused_local_variable\n final isar = await Isar.open([TimerSchema]);\n runApp(const MyApp());\n}\n\nLook at my timer app for help: pranitshah.cyou\nI'll update my website for this.\n", "After you run the build_runner command the schema class is generated in the MODEL_NAME.g.dart file. you need to import that file to get access to the schema class.\n" ]
[ 0, 0 ]
[ "Hello Zahid If you want to add local db in your next flutter app there are various packgaes on https://pub.dev.\nI would highly recomend you to go start with https://pub.dev/packages/floor it is easy to learn light weight to use..\nTry this you would loved it.. and how to implement you can google it and here is a tutorial for how to use Floor in your flutter Mobile App.\nhttps://www.youtube.com/watch?v=cQ7W7vpwTbk&t=1055s\n" ]
[ -1 ]
[ "flutter", "flutter_packages", "isar" ]
stackoverflow_0071062256_flutter_flutter_packages_isar.txt
Q: Selection in DataGridView does not work after CellPaintEvent I have a Datagridview. I need to use a CellPainting Event to customize the appereance of my DataGridview. I used the code of the msd documentation: MSD Documentation Evrything works perfect, i was able to customize my Datagridview like i needed to. But the only thing that is not working anymore ist the selection of a Row: Cells where the paintevent has done its job are excluded from the selection. It looks like this: Does anyone know waht i have to do that the selection works normal again? Edit: i also tried to reapplay the Cellstyle with "ApplyStyle" Doest not work. Edit2: Here the Code from MSD. Applying it will result that the selection won't work properly. private void dataGridView1_CellPainting(object sender, System.Windows.Forms.DataGridViewCellPaintingEventArgs e) { if (this.dataGridView1.Columns["ContactName"].Index == e.ColumnIndex && e.RowIndex >= 0) { Rectangle newRect = new Rectangle(e.CellBounds.X + 1, e.CellBounds.Y + 1, e.CellBounds.Width - 4, e.CellBounds.Height - 4); using ( Brush gridBrush = new SolidBrush(this.dataGridView1.GridColor), backColorBrush = new SolidBrush(e.CellStyle.BackColor)) { using (Pen gridLinePen = new Pen(gridBrush)) { // Erase the cell. e.Graphics.FillRectangle(backColorBrush, e.CellBounds); // Draw the grid lines (only the right and bottom lines; // DataGridView takes care of the others). e.Graphics.DrawLine(gridLinePen, e.CellBounds.Left, e.CellBounds.Bottom - 1, e.CellBounds.Right - 1, e.CellBounds.Bottom - 1); e.Graphics.DrawLine(gridLinePen, e.CellBounds.Right - 1, e.CellBounds.Top, e.CellBounds.Right - 1, e.CellBounds.Bottom); // Draw the inset highlight box. e.Graphics.DrawRectangle(Pens.Blue, newRect); // Draw the text content of the cell, ignoring alignment. if (e.Value != null) { e.Graphics.DrawString((String)e.Value, e.CellStyle.Font, Brushes.Crimson, e.CellBounds.X + 2, e.CellBounds.Y + 2, StringFormat.GenericDefault); } e.Handled = true; } } } } A: from dr.null i built the following code: // checks if cell is selected. if so, paint the background with the selectionbackgroundcolor. if ((e.State & DataGridViewElementStates.Selected) == DataGridViewElementStates.Selected) { var selectedbrush = new SolidBrush(e.CellStyle.SelectionBackColor); e.Graphics.FillRectangle(selectedbrush, e.CellBounds); } this code has the be in the CellPaintingEvent. :
Selection in DataGridView does not work after CellPaintEvent
I have a Datagridview. I need to use a CellPainting Event to customize the appereance of my DataGridview. I used the code of the msd documentation: MSD Documentation Evrything works perfect, i was able to customize my Datagridview like i needed to. But the only thing that is not working anymore ist the selection of a Row: Cells where the paintevent has done its job are excluded from the selection. It looks like this: Does anyone know waht i have to do that the selection works normal again? Edit: i also tried to reapplay the Cellstyle with "ApplyStyle" Doest not work. Edit2: Here the Code from MSD. Applying it will result that the selection won't work properly. private void dataGridView1_CellPainting(object sender, System.Windows.Forms.DataGridViewCellPaintingEventArgs e) { if (this.dataGridView1.Columns["ContactName"].Index == e.ColumnIndex && e.RowIndex >= 0) { Rectangle newRect = new Rectangle(e.CellBounds.X + 1, e.CellBounds.Y + 1, e.CellBounds.Width - 4, e.CellBounds.Height - 4); using ( Brush gridBrush = new SolidBrush(this.dataGridView1.GridColor), backColorBrush = new SolidBrush(e.CellStyle.BackColor)) { using (Pen gridLinePen = new Pen(gridBrush)) { // Erase the cell. e.Graphics.FillRectangle(backColorBrush, e.CellBounds); // Draw the grid lines (only the right and bottom lines; // DataGridView takes care of the others). e.Graphics.DrawLine(gridLinePen, e.CellBounds.Left, e.CellBounds.Bottom - 1, e.CellBounds.Right - 1, e.CellBounds.Bottom - 1); e.Graphics.DrawLine(gridLinePen, e.CellBounds.Right - 1, e.CellBounds.Top, e.CellBounds.Right - 1, e.CellBounds.Bottom); // Draw the inset highlight box. e.Graphics.DrawRectangle(Pens.Blue, newRect); // Draw the text content of the cell, ignoring alignment. if (e.Value != null) { e.Graphics.DrawString((String)e.Value, e.CellStyle.Font, Brushes.Crimson, e.CellBounds.X + 2, e.CellBounds.Y + 2, StringFormat.GenericDefault); } e.Handled = true; } } } }
[ "from dr.null i built the following code:\n// checks if cell is selected. if so, paint the background with the selectionbackgroundcolor. \nif ((e.State & DataGridViewElementStates.Selected) == DataGridViewElementStates.Selected)\n{\n var selectedbrush = new SolidBrush(e.CellStyle.SelectionBackColor);\n e.Graphics.FillRectangle(selectedbrush, e.CellBounds);\n}\n\nthis code has the be in the CellPaintingEvent.\n:\n\n" ]
[ 2 ]
[]
[]
[ "c#", "datagridview", "paintevent", "winforms" ]
stackoverflow_0074680869_c#_datagridview_paintevent_winforms.txt
Q: same flutter project but different git repository I have the same flutter project but I want two different apps from the same project because the client wants some minor changes but the other client didn’t. Also, both client wants their specific apps on the play store and app store as well and requested code as well. I want to copy my existing project so I can match the second client's requirements without doing any changes to the first one. can I be able to change the project origin URL to the new one? A: You can link the same project to two different repositories and push to or pull from anyone you want at any time. git remote add remoteOne <repository address 1> git remote add remoteTwo <repository address 2> So, as you proceed, you can do git push remoteOne or git push remotewo after you commit your progress
same flutter project but different git repository
I have the same flutter project but I want two different apps from the same project because the client wants some minor changes but the other client didn’t. Also, both client wants their specific apps on the play store and app store as well and requested code as well. I want to copy my existing project so I can match the second client's requirements without doing any changes to the first one. can I be able to change the project origin URL to the new one?
[ "You can link the same project to two different repositories and push to or pull from anyone you want at any time.\ngit remote add remoteOne <repository address 1>\n\ngit remote add remoteTwo <repository address 2>\n\nSo, as you proceed, you can do git push remoteOne or git push remotewo after you commit your progress\n" ]
[ 0 ]
[]
[]
[ "dart", "flutter", "git" ]
stackoverflow_0073219267_dart_flutter_git.txt
Q: flutter box won't save the data I am trying to store logged user data in Hive storage but my user box return null Code main.dart Future main() async { WidgetsFlutterBinding.ensureInitialized(); await Hive.initFlutter(); Hive.registerAdapter(UsermodelAdapter()); await Hive.openBox<Usermodel>('user'); runApp(MyApp()); } login.dart After user successfully logged in by server I'm trying to store data before redirecting user print('box:::: ${user.name}'); // print (admin) - user name coming from server //store data in storage var userData = Usermodel() ..name = user.name ..email = user.email ..createdAt = user.createdAt ..approved = user.approved; final box = Hive.box<Usermodel>('user'); box.add(userData); print('box:::: ${box.get('name')}'); // print (null) any idea why I cannot store my user data? A: Using the add() to store your data in a Hive box, will give it an incremental index as it's key, so you can imagine that your Hive box is like a list, so this: box.add(data); will add data to the box with a 0 index since it's the first and only element in the box. box.get('name'); // will return null because that key doesn't exist there are two solutions for that case. Either saving the data in the box using put() with providing a "name" key like this: box.put('name', data); box.get('name'); // will return data. or trying to get the data with that index key with the getAt() like this: box.add(data); box.getAt(0); // will return data. (assuming that it's the first element so it's index is 0)
flutter box won't save the data
I am trying to store logged user data in Hive storage but my user box return null Code main.dart Future main() async { WidgetsFlutterBinding.ensureInitialized(); await Hive.initFlutter(); Hive.registerAdapter(UsermodelAdapter()); await Hive.openBox<Usermodel>('user'); runApp(MyApp()); } login.dart After user successfully logged in by server I'm trying to store data before redirecting user print('box:::: ${user.name}'); // print (admin) - user name coming from server //store data in storage var userData = Usermodel() ..name = user.name ..email = user.email ..createdAt = user.createdAt ..approved = user.approved; final box = Hive.box<Usermodel>('user'); box.add(userData); print('box:::: ${box.get('name')}'); // print (null) any idea why I cannot store my user data?
[ "Using the add() to store your data in a Hive box, will give it an incremental index as it's key, so you can imagine that your Hive box is like a list, so this:\nbox.add(data); will add data to the box with a 0 index since it's the first and only element in the box.\nbox.get('name'); // will return null because that key doesn't exist\n\nthere are two solutions for that case.\n\nEither saving the data in the box using put() with providing a \"name\" key like this:\nbox.put('name', data); \nbox.get('name'); // will return data.\n\nor trying to get the data with that index key with the getAt() like this:\n box.add(data); \n box.getAt(0); // will return data. (assuming that it's the first element so it's index is 0)\n\n" ]
[ 0 ]
[]
[]
[ "flutter", "flutter_hive" ]
stackoverflow_0067865747_flutter_flutter_hive.txt
Q: Adding a box on top of a divider I've made a divider for my site and it works perfectly! I'm just having trouble with adding a little box on top of my divider as shown in the picture, any help would be appreciated! .divider { width: 2px; background: linear-gradient(90deg, rgba(6,144,45,1) 0%, rgba(6,144,45,0.0032387955182072714) 87%); height: auto; min-height: 5vh; margin-right: 25px; } <div class="divider"></div> The box on the divider should look like this A: You can achieve this by using the pseudo-element ::before. .divider::before { content: ''; display: block; position: relative; background-color: black; width: 8px; height: 8px; top: 0; left: -2px; } .divider { background-color: black; width: 4px; height: 60px; } <div class="divider"></div>
Adding a box on top of a divider
I've made a divider for my site and it works perfectly! I'm just having trouble with adding a little box on top of my divider as shown in the picture, any help would be appreciated! .divider { width: 2px; background: linear-gradient(90deg, rgba(6,144,45,1) 0%, rgba(6,144,45,0.0032387955182072714) 87%); height: auto; min-height: 5vh; margin-right: 25px; } <div class="divider"></div> The box on the divider should look like this
[ "You can achieve this by using the pseudo-element ::before.\n\n\n.divider::before {\n content: '';\n display: block;\n position: relative;\n background-color: black;\n width: 8px;\n height: 8px;\n top: 0;\n left: -2px;\n}\n\n.divider {\n background-color: black;\n width: 4px;\n height: 60px;\n}\n<div class=\"divider\"></div>\n\n\n\n" ]
[ 1 ]
[]
[]
[ "css", "html" ]
stackoverflow_0074681206_css_html.txt
Q: Cumulative sum from the beginning of a stream in Spark I have to compute a cumulative sum on a value column by group from the beginning of the time series with a daily output. If I do with a batch, it should be something like this: val columns = Seq("timestamp", "group", "value") val data = List( (Instant.parse("2020-01-01T00:00:00Z"), "Group1", 0), (Instant.parse("2020-01-01T00:00:00Z"), "Group2", 0), (Instant.parse("2020-01-01T12:00:00Z"), "Group1", 1), (Instant.parse("2020-01-01T12:00:00Z"), "Group2", -1), (Instant.parse("2020-01-02T00:00:00Z"), "Group1", 2), (Instant.parse("2020-01-02T00:00:00Z"), "Group2", -2), (Instant.parse("2020-01-02T12:00:00Z"), "Group1", 3), (Instant.parse("2020-01-02T12:00:00Z"), "Group2", -3), ) val df = spark .createDataFrame(data) .toDF(columns: _*) // defines a window from the beginning by `group` val event_window = Window .partitionBy(col("group")) .orderBy(col("timestamp")) .rowsBetween(Window.unboundedPreceding, Window.currentRow) val computed_df = df .withColumn( "cumsum", functions .sum('value) .over(event_window) // apply the aggregation on a window from the beginning ) .groupBy(window($"timestamp", "1 day"), $"group") .agg(functions.last("cumsum").as("cumsum_by_day")) // display the last value for each day computed_df.show(truncate = false) and the output is +------------------------------------------+------+-------------+ |window |group |cumsum_by_day| +------------------------------------------+------+-------------+ |{2020-01-01 01:00:00, 2020-01-02 01:00:00}|Group1| 1 | |{2020-01-02 01:00:00, 2020-01-03 01:00:00}|Group1| 6 | |{2020-01-01 01:00:00, 2020-01-02 01:00:00}|Group2|-1 | |{2020-01-02 01:00:00, 2020-01-03 01:00:00}|Group2|-6 | +------------------------------------------+------+-------------+ The result is perfectly fine. However, in my case, the data source is not an existing dataset but a stream and I didn't find any solution to apply the aggregation from the beginning of the stream, not on a sliding window. The closest code I can do is: // MemoryStream to reproduce locally the issue implicit val sqlCtx: SQLContext = spark.sqlContext val memoryStream = MemoryStream[(Instant, String, Int)] memoryStream.addData(data) val df = memoryStream .toDF() .toDF(columns: _*) val computed_df = df .groupBy(window($"timestamp", "1 day"), $"group") .agg(functions.sum('value).as("agg")) computed_df.writeStream .option("truncate", value = false) .format("console") .outputMode("complete") .start() .processAllAvailable() } It produces an aggregation for each day but not from the beginning of the stream. If I try to add something like .over(event_window) (like in batch), it compiles but fails at runtime. How can we apply an aggregation function from the beginning of a stream? Here a GitHub repository with all the context to run that code. A: I didn't find any solution using the high-level functions. For example, it is not possible to add another groupBy over the main aggregation agg(functions.sum('value).as("agg"), functions.last('timestamp).as("ts") to get the daily report. After many experiments, I switched to the low level functions. The most polyvalent function seems to be flatMapGroupsWithState. // same `events` Dataframe as before // Accumulate value by group and report every day val computed_df = events .withWatermark("timestamp", "0 second") // watermarking required to use GroupStateTimeout .as[(Instant, String, Int)] .groupByKey(event => event._2) .flatMapGroupsWithState[IntermediateState, AggResult]( OutputMode.Append(), GroupStateTimeout.EventTimeTimeout )(processEventGroup) processEventGroup is the key function which contains all the technical stuff: cumulative aggregative and output after each day. def processEventGroup( group: String, events: Iterator[(Instant, String, Int)], state: GroupState[IntermediateState] ) = { def mergeState(events: List[Event]): Iterator[AggResult] = { // Initialize the aggregation without previous state or a new one var (acc_value, acc_timestamp) = state.getOption .map(s => (s.agg_value, s.last_timestamp)) .getOrElse((0, Instant.EPOCH)) val agg_results = events.flatMap { e => // create an daily report if the new event occurs on another day val intermediate_day_result = if ( // not same day acc_timestamp != Instant.EPOCH && truncateDay(e.timestamp) > truncateDay(acc_timestamp) ) { Seq(AggResult(truncateDay(acc_timestamp), group, acc_value)) } else { Seq.empty } // apply the aggregation as usual (`sum` on value, `last` on timestamp) acc_value += e.value acc_timestamp = e.timestamp intermediate_day_result } // if a timeout occurs before next events data in the same group, // a daily report will be generated state.setTimeoutTimestamp(state.getCurrentWatermarkMs, "1 day") // save the current aggregated value as state storage state.update(IntermediateState(acc_timestamp, group, acc_value)) agg_results.iterator } if (state.hasTimedOut && events.isEmpty) { // generate a daily report on timeout state.getOption .map(agg_result => AggResult(truncateDay(agg_result.last_timestamp), group, agg_result.agg_value) ) .iterator } else { // a list daily report may be generated while processing the new events mergeState(events.map { case (timestamp, group, value) => Event(timestamp, group, value) }.toList) } } processEventGroup will be called at each batch once per group. The state is managed by GroupState (the state should just be serializable). For completness, here the missing elements: def truncateDay(ts: Instant): Instant = { ts.truncatedTo(ChronoUnit.DAYS) } case class Event(timestamp: Instant, group: String, value: Int) case class IntermediateState(last_timestamp: Instant, group: String, agg_value: Int) case class AggResult(day_start: Instant, group: String, cumsum_by_day: Int) (code available here)
Cumulative sum from the beginning of a stream in Spark
I have to compute a cumulative sum on a value column by group from the beginning of the time series with a daily output. If I do with a batch, it should be something like this: val columns = Seq("timestamp", "group", "value") val data = List( (Instant.parse("2020-01-01T00:00:00Z"), "Group1", 0), (Instant.parse("2020-01-01T00:00:00Z"), "Group2", 0), (Instant.parse("2020-01-01T12:00:00Z"), "Group1", 1), (Instant.parse("2020-01-01T12:00:00Z"), "Group2", -1), (Instant.parse("2020-01-02T00:00:00Z"), "Group1", 2), (Instant.parse("2020-01-02T00:00:00Z"), "Group2", -2), (Instant.parse("2020-01-02T12:00:00Z"), "Group1", 3), (Instant.parse("2020-01-02T12:00:00Z"), "Group2", -3), ) val df = spark .createDataFrame(data) .toDF(columns: _*) // defines a window from the beginning by `group` val event_window = Window .partitionBy(col("group")) .orderBy(col("timestamp")) .rowsBetween(Window.unboundedPreceding, Window.currentRow) val computed_df = df .withColumn( "cumsum", functions .sum('value) .over(event_window) // apply the aggregation on a window from the beginning ) .groupBy(window($"timestamp", "1 day"), $"group") .agg(functions.last("cumsum").as("cumsum_by_day")) // display the last value for each day computed_df.show(truncate = false) and the output is +------------------------------------------+------+-------------+ |window |group |cumsum_by_day| +------------------------------------------+------+-------------+ |{2020-01-01 01:00:00, 2020-01-02 01:00:00}|Group1| 1 | |{2020-01-02 01:00:00, 2020-01-03 01:00:00}|Group1| 6 | |{2020-01-01 01:00:00, 2020-01-02 01:00:00}|Group2|-1 | |{2020-01-02 01:00:00, 2020-01-03 01:00:00}|Group2|-6 | +------------------------------------------+------+-------------+ The result is perfectly fine. However, in my case, the data source is not an existing dataset but a stream and I didn't find any solution to apply the aggregation from the beginning of the stream, not on a sliding window. The closest code I can do is: // MemoryStream to reproduce locally the issue implicit val sqlCtx: SQLContext = spark.sqlContext val memoryStream = MemoryStream[(Instant, String, Int)] memoryStream.addData(data) val df = memoryStream .toDF() .toDF(columns: _*) val computed_df = df .groupBy(window($"timestamp", "1 day"), $"group") .agg(functions.sum('value).as("agg")) computed_df.writeStream .option("truncate", value = false) .format("console") .outputMode("complete") .start() .processAllAvailable() } It produces an aggregation for each day but not from the beginning of the stream. If I try to add something like .over(event_window) (like in batch), it compiles but fails at runtime. How can we apply an aggregation function from the beginning of a stream? Here a GitHub repository with all the context to run that code.
[ "I didn't find any solution using the high-level functions. For example, it is not possible to add another groupBy over the main aggregation agg(functions.sum('value).as(\"agg\"), functions.last('timestamp).as(\"ts\") to get the daily report.\nAfter many experiments, I switched to the low level functions. The most polyvalent function seems to be flatMapGroupsWithState.\n// same `events` Dataframe as before \n\n// Accumulate value by group and report every day\nval computed_df = events\n .withWatermark(\"timestamp\", \"0 second\") // watermarking required to use GroupStateTimeout\n .as[(Instant, String, Int)]\n .groupByKey(event => event._2)\n .flatMapGroupsWithState[IntermediateState, AggResult](\n OutputMode.Append(),\n GroupStateTimeout.EventTimeTimeout\n )(processEventGroup)\n\nprocessEventGroup is the key function which contains all the technical stuff: cumulative aggregative and output after each day.\ndef processEventGroup(\n group: String,\n events: Iterator[(Instant, String, Int)],\n state: GroupState[IntermediateState]\n) = {\n def mergeState(events: List[Event]): Iterator[AggResult] = {\n // Initialize the aggregation without previous state or a new one\n var (acc_value, acc_timestamp) = state.getOption\n .map(s => (s.agg_value, s.last_timestamp))\n .getOrElse((0, Instant.EPOCH))\n\n val agg_results = events.flatMap { e =>\n // create an daily report if the new event occurs on another day\n val intermediate_day_result =\n if ( // not same day\n acc_timestamp != Instant.EPOCH &&\n truncateDay(e.timestamp) > truncateDay(acc_timestamp)\n ) {\n Seq(AggResult(truncateDay(acc_timestamp), group, acc_value))\n } else {\n Seq.empty\n }\n // apply the aggregation as usual (`sum` on value, `last` on timestamp)\n acc_value += e.value\n acc_timestamp = e.timestamp\n intermediate_day_result\n }\n\n // if a timeout occurs before next events data in the same group, \n // a daily report will be generated\n state.setTimeoutTimestamp(state.getCurrentWatermarkMs, \"1 day\")\n // save the current aggregated value as state storage\n state.update(IntermediateState(acc_timestamp, group, acc_value))\n agg_results.iterator\n }\n\n if (state.hasTimedOut && events.isEmpty) {\n // generate a daily report on timeout\n state.getOption\n .map(agg_result =>\n AggResult(truncateDay(agg_result.last_timestamp), group, agg_result.agg_value)\n )\n .iterator\n } else {\n // a list daily report may be generated while processing the new events\n mergeState(events.map { case (timestamp, group, value) =>\n Event(timestamp, group, value)\n }.toList)\n }\n}\n\nprocessEventGroup will be called at each batch once per group.\nThe state is managed by GroupState (the state should just be serializable).\nFor completness, here the missing elements:\ndef truncateDay(ts: Instant): Instant = {\n ts.truncatedTo(ChronoUnit.DAYS)\n}\n\ncase class Event(timestamp: Instant, group: String, value: Int)\n\ncase class IntermediateState(last_timestamp: Instant, group: String, agg_value: Int)\n\ncase class AggResult(day_start: Instant, group: String, cumsum_by_day: Int)\n\n(code available here)\n" ]
[ 0 ]
[]
[]
[ "apache_spark", "apache_spark_sql", "scala", "spark_streaming" ]
stackoverflow_0074648128_apache_spark_apache_spark_sql_scala_spark_streaming.txt
Q: How to target latest minor Azure Function Runtime Version locally? What do I have to do to get the latest Function Runtime Version running locally? I'm developing isolated v4 app with .NET 6. Version 4.15.0 was released a week ago. Source. But when I run the project, it is using v4.13.0.19486. I've tried to explicitly set the version in app settings with the FUNCTIONS_EXTENSION_VERSION setting with no success. Updated Azure Functions Core Tools and VS to the latest version. Not sure what more I have to do. A: The local version would be the one that is part of the local version of Azure Functions Tools. The best way to test a particular minor version would be to use the docker container for the same. Here are the tags available for you to choose from.
How to target latest minor Azure Function Runtime Version locally?
What do I have to do to get the latest Function Runtime Version running locally? I'm developing isolated v4 app with .NET 6. Version 4.15.0 was released a week ago. Source. But when I run the project, it is using v4.13.0.19486. I've tried to explicitly set the version in app settings with the FUNCTIONS_EXTENSION_VERSION setting with no success. Updated Azure Functions Core Tools and VS to the latest version. Not sure what more I have to do.
[ "The local version would be the one that is part of the local version of Azure Functions Tools. The best way to test a particular minor version would be to use the docker container for the same.\nHere are the tags available for you to choose from.\n" ]
[ 0 ]
[]
[]
[ "azure", "azure_functions", "azure_functions_core_tools" ]
stackoverflow_0074616912_azure_azure_functions_azure_functions_core_tools.txt
Q: How do you combine the data from two tables create table table1 ( header bit, [type] char(1), Intype varchar(3), agrid int, affid int, inno varchar(10), amount int ); create table table2 ( header bit, [type] char(1), Intype varchar(3), agrid int, affid int, inno varchar(10), amount int ); Scenario 1: insert into table1 (header , [type] , Intype ,agrid , affid, inno,amount) values (0, 'D','001',18,84,'001',null), (0, 'N', '001', 18,84,'001',null); insert into table2 (header , [type] , Intype ,agrid , affid, inno,amount) values (1, null, null,18,84, '001', 90), (1, null, null,18,84, '001', 60), (1, null, null,18,84, '001', 84); For every header trailer 0 record, i need to show the related trailer 1 record joining on inno , affid, agrid. Please let m eknow how can i achieve this I need: header , [type] , Intype ,agrid , affid, inno,amount 0 , 'D' , '001' , 18 , 84 , '001' , null ----table 1 record for type D 1 , null, null, 18, 84, 001, 90 1, null, null,18,84, 001, 60 1, null, null,18,84, 001, 84 0, 'N', '001', 18,84,'001',null ----table 1 record for type N 1 , null, null, 18, 84, 001, 90 1, null, null,18,84, 001, 60 1, null, null,18,84, 001, 84 0, 'N', '001', 18,84,'001',null Scenario 2: insert into table1 (header , [type] , Intype ,agrid , affid, inno,amount) values (0, 'D','001',14,95,'001',null), (0, 'D', '001', 14,95,'008',null), (0, 'N', '001', 14,95,'008',null); insert into table2 (header , [type] , Intype ,agrid , affid, inno,amount) values (1, null, null,14,95, '001', 11), (1, null, null,14,95, '008', 23); I need: header , [type] , Intype ,agrid , affid, inno,amount 0, 'D','001',14,95,'001',null ----table 1 record for type D 1, null, null,14,95, 001, 11 0, 'D', '001', 14,95,'008',null ---table 1 record for type D 1, null, null,14,95, 008, 23 0, 'N', '001', 14,95,'008',null ----table 1 record for type N 1, null, null,14,95, 008, 23 I tried with some joins, it didn't work. A: You can try to sort then, by adding more columns the main part is that both get a row_number that can join table1 and table1, the constant sorter is necessary as header is binary and can only handle two different tables, if you have more, it will be useful To get the right order for amount, you need an aditional sorting column, like an indetidy Tables are by nature unsorted, so the result can differ SELECT header , [type] , Intype ,agrid , affid, inno,amount, ROW_NUMBER() OVER(PARTITION BY agrid , affid, inno ORDER BY agrid) rn,1 sorter FROM table1 UNION ALL SELECT t2.header , t2.[type] , t2.Intype ,t2.agrid , t2.affid, t2.inno,t2.amount, t1.rn,2 FROM table2 t2 JOIN (SELECT header , [type] , Intype ,agrid , affid, inno,amount, ROW_NUMBER() OVER(PARTITION BY agrid , affid, inno ORDER BY agrid) rn FROM table1) t1 ON t1.agrid = t2.agrid AND t1.affid = t2.affid AND t1.inno = t2.inno ORDER BY agrid , affid, inno,rn,sorter header type Intype agrid affid inno amount rn sorter False D 001 18 84 001 null 1 1 True null null 18 84 001 90 1 2 True null null 18 84 001 60 1 2 True null null 18 84 001 84 1 2 False N 001 18 84 001 null 2 1 True null null 18 84 001 90 2 2 True null null 18 84 001 84 2 2 True null null 18 84 001 60 2 2 truncate table table1 truncate table table2 insert into table1 (header , [type] , Intype ,agrid , affid, inno,amount) values (0, 'D','001',14,95,'001',null), (0, 'D', '001', 14,95,'008',null), (0, 'N', '001', 14,95,'008',null); insert into table2 (header , [type] , Intype ,agrid , affid, inno,amount) values (1, null, null,14,95, '001', 11), (1, null, null,14,95, '008', 23); 5 rows affected SELECT header , [type] , Intype ,agrid , affid, inno,amount, ROW_NUMBER() OVER(PARTITION BY agrid , affid, inno ORDER BY agrid) rn,1 sorter FROM table1 UNION ALL SELECT t2.header , t2.[type] , t2.Intype ,t2.agrid , t2.affid, t2.inno,t2.amount, t1.rn,2 FROM table2 t2 JOIN (SELECT header , [type] , Intype ,agrid , affid, inno,amount, ROW_NUMBER() OVER(PARTITION BY agrid , affid, inno ORDER BY agrid) rn FROM table1) t1 ON t1.agrid = t2.agrid AND t1.affid = t2.affid AND t1.inno = t2.inno ORDER BY agrid , affid, inno,rn,sorter header type Intype agrid affid inno amount rn sorter False D 001 14 95 001 null 1 1 True null null 14 95 001 11 1 2 False D 001 14 95 008 null 1 1 True null null 14 95 008 23 1 2 False N 001 14 95 008 null 2 1 True null null 14 95 008 23 2 2 fiddle
How do you combine the data from two tables
create table table1 ( header bit, [type] char(1), Intype varchar(3), agrid int, affid int, inno varchar(10), amount int ); create table table2 ( header bit, [type] char(1), Intype varchar(3), agrid int, affid int, inno varchar(10), amount int ); Scenario 1: insert into table1 (header , [type] , Intype ,agrid , affid, inno,amount) values (0, 'D','001',18,84,'001',null), (0, 'N', '001', 18,84,'001',null); insert into table2 (header , [type] , Intype ,agrid , affid, inno,amount) values (1, null, null,18,84, '001', 90), (1, null, null,18,84, '001', 60), (1, null, null,18,84, '001', 84); For every header trailer 0 record, i need to show the related trailer 1 record joining on inno , affid, agrid. Please let m eknow how can i achieve this I need: header , [type] , Intype ,agrid , affid, inno,amount 0 , 'D' , '001' , 18 , 84 , '001' , null ----table 1 record for type D 1 , null, null, 18, 84, 001, 90 1, null, null,18,84, 001, 60 1, null, null,18,84, 001, 84 0, 'N', '001', 18,84,'001',null ----table 1 record for type N 1 , null, null, 18, 84, 001, 90 1, null, null,18,84, 001, 60 1, null, null,18,84, 001, 84 0, 'N', '001', 18,84,'001',null Scenario 2: insert into table1 (header , [type] , Intype ,agrid , affid, inno,amount) values (0, 'D','001',14,95,'001',null), (0, 'D', '001', 14,95,'008',null), (0, 'N', '001', 14,95,'008',null); insert into table2 (header , [type] , Intype ,agrid , affid, inno,amount) values (1, null, null,14,95, '001', 11), (1, null, null,14,95, '008', 23); I need: header , [type] , Intype ,agrid , affid, inno,amount 0, 'D','001',14,95,'001',null ----table 1 record for type D 1, null, null,14,95, 001, 11 0, 'D', '001', 14,95,'008',null ---table 1 record for type D 1, null, null,14,95, 008, 23 0, 'N', '001', 14,95,'008',null ----table 1 record for type N 1, null, null,14,95, 008, 23 I tried with some joins, it didn't work.
[ "You can try to sort then, by adding more columns\nthe main part is that both get a row_number that can join table1 and table1, the constant sorter is necessary as header is binary and can only handle two different tables, if you have more, it will be useful\nTo get the right order for amount, you need an aditional sorting column, like an indetidy\nTables are by nature unsorted, so the result can differ\nSELECT \nheader , [type] , Intype ,agrid , affid, inno,amount,\nROW_NUMBER() OVER(PARTITION BY agrid , affid, inno ORDER BY agrid) rn,1 sorter\nFROM table1\nUNION ALL\nSELECT \nt2.header , t2.[type] , t2.Intype ,t2.agrid , t2.affid, t2.inno,t2.amount,\nt1.rn,2\nFROM table2 t2 JOIN (SELECT \nheader , [type] , Intype ,agrid , affid, inno,amount,\nROW_NUMBER() OVER(PARTITION BY agrid , affid, inno ORDER BY agrid) rn\nFROM table1)\n t1 ON t1.agrid = t2.agrid \n AND t1.affid = t2.affid AND t1.inno = t2.inno\nORDER BY agrid , affid, inno,rn,sorter\n\n\n\n\n\nheader\ntype\nIntype\nagrid\naffid\ninno\namount\nrn\nsorter\n\n\n\n\nFalse\nD\n001\n18\n84\n001\nnull\n1\n1\n\n\nTrue\nnull\nnull\n18\n84\n001\n90\n1\n2\n\n\nTrue\nnull\nnull\n18\n84\n001\n60\n1\n2\n\n\nTrue\nnull\nnull\n18\n84\n001\n84\n1\n2\n\n\nFalse\nN\n001\n18\n84\n001\nnull\n2\n1\n\n\nTrue\nnull\nnull\n18\n84\n001\n90\n2\n2\n\n\nTrue\nnull\nnull\n18\n84\n001\n84\n2\n2\n\n\nTrue\nnull\nnull\n18\n84\n001\n60\n2\n2\n\n\n\n\ntruncate table table1\n\ntruncate table table2\n\ninsert into table1 (header , [type] , Intype ,agrid , affid, inno,amount)\nvalues\n(0, 'D','001',14,95,'001',null),\n(0, 'D', '001', 14,95,'008',null),\n(0, 'N', '001', 14,95,'008',null);\n\ninsert into table2 (header , [type] , Intype ,agrid , affid, inno,amount)\nvalues\n(1, null, null,14,95, '001', 11),\n(1, null, null,14,95, '008', 23);\n\n5 rows affected\n\nSELECT \nheader , [type] , Intype ,agrid , affid, inno,amount,\nROW_NUMBER() OVER(PARTITION BY agrid , affid, inno ORDER BY agrid) rn,1 sorter\nFROM table1\nUNION ALL\nSELECT \nt2.header , t2.[type] , t2.Intype ,t2.agrid , t2.affid, t2.inno,t2.amount,\nt1.rn,2\nFROM table2 t2 JOIN (SELECT \nheader , [type] , Intype ,agrid , affid, inno,amount,\nROW_NUMBER() OVER(PARTITION BY agrid , affid, inno ORDER BY agrid) rn\nFROM table1)\n t1 ON t1.agrid = t2.agrid \n AND t1.affid = t2.affid AND t1.inno = t2.inno\nORDER BY agrid , affid, inno,rn,sorter\n\n\n\n\n\nheader\ntype\nIntype\nagrid\naffid\ninno\namount\nrn\nsorter\n\n\n\n\nFalse\nD\n001\n14\n95\n001\nnull\n1\n1\n\n\nTrue\nnull\nnull\n14\n95\n001\n11\n1\n2\n\n\nFalse\nD\n001\n14\n95\n008\nnull\n1\n1\n\n\nTrue\nnull\nnull\n14\n95\n008\n23\n1\n2\n\n\nFalse\nN\n001\n14\n95\n008\nnull\n2\n1\n\n\nTrue\nnull\nnull\n14\n95\n008\n23\n2\n2\n\n\n\n\nfiddle\n" ]
[ 0 ]
[]
[]
[ "sql", "sql_server", "tsql" ]
stackoverflow_0074680993_sql_sql_server_tsql.txt
Q: connect to a database from a prestashop module the module can't connect with the database. I have a prestashop module that has to do a task with a database when this hook is launched: hookActionPaymentConfirmation. But I can't do anything, I don't know if it's a problem with the connection or with the query. This is the code of the function of the hookActionPaymentConfirmation, do you see any error? The task is to update the stock of products in a table of the same database. I want to do it with the name of the database and the name of the server, because I'm planning to implement that table in an external database. ` public function hookActionPaymentConfirmation($params) { //mail("luilli.guillan@gmail.com", "yiha", "hola"); $database=Configuration::get('MIMODULOMISMADB_ACCOUNT_NOMBREDB', null); $user=Configuration::get('MIMODULOMISMADB_ACCOUNT_USUARIO', null); $password=Configuration::get('MIMODULOMISMADB_ACCOUNT_PASSWORD', null); // $db = new DbMySQLi("localhost",$user,$password,$database,true); //$products = $params['cart']->getProducts(true);//en los nuevos ps ya no va y hay que hacerlo con las dos ordenes siguientes $order = new Order($params['id_order']); $products = $order->getCartProducts(); foreach ($products as $product) { $id_product = $product['id_product']; $cantidad = $product['cart_quantity']; $referencia = $product['reference']; $product_attribute_id = $product['product_attribute_id']; $newProduct = new Product($id_product); if($newProduct->hasCombinations()) { $sql = 'select * from product_attribute where id_product_attribute = ' . (string) $product_attribute_id . ';'; //$rec = DB::getInstance()->getRow($sql); $rec = $db->getValue($sql); $referencia = $rec["reference"]; //mail("luilli.guillan@gmail.com", "has combinations", $id_product." ".$referencia." ".$cantidad." p.a: ".$product_attribute); } $unidades = $db->getValue('SELECT unidades FROM productos WHERE '.$campoid.' = "'.$referencia.'"'); $unidadesRestantes=$unidades-$cantidad; $db->Execute('UPDATE productos SET unidades="'.$unidadesRestantes.'" WHERE '.$campoid.' = "'.$referencia.'"'); mail("luilli.guillan@gmail.com", "yay",$database." ".$user." ".$password." ".$unidades); //mail("luilli.guillan@gmail.com", "yay",$unidades); } }` A: i've already solved.It was this line missing: $campoid = Configuration::get('CAMPOID', null);
connect to a database from a prestashop module
the module can't connect with the database. I have a prestashop module that has to do a task with a database when this hook is launched: hookActionPaymentConfirmation. But I can't do anything, I don't know if it's a problem with the connection or with the query. This is the code of the function of the hookActionPaymentConfirmation, do you see any error? The task is to update the stock of products in a table of the same database. I want to do it with the name of the database and the name of the server, because I'm planning to implement that table in an external database. ` public function hookActionPaymentConfirmation($params) { //mail("luilli.guillan@gmail.com", "yiha", "hola"); $database=Configuration::get('MIMODULOMISMADB_ACCOUNT_NOMBREDB', null); $user=Configuration::get('MIMODULOMISMADB_ACCOUNT_USUARIO', null); $password=Configuration::get('MIMODULOMISMADB_ACCOUNT_PASSWORD', null); // $db = new DbMySQLi("localhost",$user,$password,$database,true); //$products = $params['cart']->getProducts(true);//en los nuevos ps ya no va y hay que hacerlo con las dos ordenes siguientes $order = new Order($params['id_order']); $products = $order->getCartProducts(); foreach ($products as $product) { $id_product = $product['id_product']; $cantidad = $product['cart_quantity']; $referencia = $product['reference']; $product_attribute_id = $product['product_attribute_id']; $newProduct = new Product($id_product); if($newProduct->hasCombinations()) { $sql = 'select * from product_attribute where id_product_attribute = ' . (string) $product_attribute_id . ';'; //$rec = DB::getInstance()->getRow($sql); $rec = $db->getValue($sql); $referencia = $rec["reference"]; //mail("luilli.guillan@gmail.com", "has combinations", $id_product." ".$referencia." ".$cantidad." p.a: ".$product_attribute); } $unidades = $db->getValue('SELECT unidades FROM productos WHERE '.$campoid.' = "'.$referencia.'"'); $unidadesRestantes=$unidades-$cantidad; $db->Execute('UPDATE productos SET unidades="'.$unidadesRestantes.'" WHERE '.$campoid.' = "'.$referencia.'"'); mail("luilli.guillan@gmail.com", "yay",$database." ".$user." ".$password." ".$unidades); //mail("luilli.guillan@gmail.com", "yay",$unidades); } }`
[ "i've already solved.It was this line missing: $campoid = Configuration::get('CAMPOID', null);\n" ]
[ 0 ]
[]
[]
[ "mysql", "prestashop", "prestashop_1.7", "prestashop_modules" ]
stackoverflow_0074635194_mysql_prestashop_prestashop_1.7_prestashop_modules.txt
Q: Php lengthy if statements and permutations Is there any method to simplify a lengthy if statement such as below. Even Switch case is very lengthy: $day1 $day2 if ($day1 ==β€˜Monday’ && $day2 ==β€˜Monday’) { //code } elseif ($day1 ==β€˜Monday’ && $day2 ==β€˜Tuesday’) { // code } elseif ($day1 == β€˜Monday’ && $day2 == β€˜Wednesday’) { //code } …covering all possible combinations of $day1 and $day2 Help and tips greatly appreciated. **Update The problem has arisen out a bad database design. At the time, I thought it was a good idea. I have a table which records attendance data for a school as follows Id School_id Date Student_id MonAM MonPM TueAM TuePM WedAM WedPM ThuAM ThuPM FriAM FriPM SatAM SatPM SunAM SunPM 1 1 2022-11-28 1 / / / / / / / / / / 2 1 2022-11-28 2 / / N / / / / / / / 3 1 2022-11-28 3 N / N / / / N / / / 4 1 2022-11-28 4 / N / / / N / / / N 5 1 2022-11-28 5 N / / N N / N / N / / = present, N = absent When attendance data is saved, my code checks for the date of Monday of the current week based on the current date. This date is saved along with attendance data. In other words, attendance data is saved on a weekly basis on db. For the most part, this works very well. However, when it comes to generating attendance reports, the db design makes it difficult to find data mid-week or from, let’s say Tuesday of one week to Friday of another week, since the date column is always the date of Monday of week when data was saved. My application is built on Laravel. My db query returns rows based on the Monday date of query date range. To get around the issue of mid-week data, I am using the following logic after querying db for each student. If query result array (Laravel collection) count is 1: Find day of query start date (ie Monday). Find day of query end date (ie Wednesday) Use a if statement (as above) to select only the relevant column data, ie column MonAM – WedPM If query result array (Laravel collection) count > 1 In foreach loop, check if current array key is 1 (this is first row of result) Find day of query start date (ie Monday). Use a if statement (similar to above) to select only the relevant column data, In foreach loop, check if current array is last key (this is last row) Find day of query end date (ie Wednesday) Use a if statement (similar to above) to select only the relevant column data In foreach loop, if key is not first or last, then select data from all columns Im assuming the above logic will give me accurate data for all query date ranges. A: To be honest, I think you're looking at it a bit too narrow-minded. You have laravel at your disposal and you might as well do this kind of stuff in your Model. I expect your input days ($day1, $day2) are not day of week inputs, but actual dates (like '2022-11-30'). You could add something like this to the Model: public function getInRange(Carbon $start, Carbon $end) { $dt = $start->lt($this->Date) ? $start : $this->Date; $internalEnd = $end->subDays(7)->gt($this->Date) ? $this->Date->addDays(7) : $end; $output = []; while($dt->lt($internalEnd)) { $output[] = [ 'day' => $dt, 'AM' => $this->{$dt->shortEnglishDayOfWeek.'AM'}, 'PM' => $this->{$dt->shortEnglishDayOfWeek.'PM'}, ]; $dt = $dt->addDay(); } return $output; } Then, in the controller, or where you have the collection of Models: $collection->flatMap(fn($entry) => $entry->getInRange($start, $end)); Of course, I still know little of your actual code, so you might need to tweak some stuff. You could also pass a closure to getInRange to format the data like you need it if you want the function to be more flexible,... There are a lot of options.
Php lengthy if statements and permutations
Is there any method to simplify a lengthy if statement such as below. Even Switch case is very lengthy: $day1 $day2 if ($day1 ==β€˜Monday’ && $day2 ==β€˜Monday’) { //code } elseif ($day1 ==β€˜Monday’ && $day2 ==β€˜Tuesday’) { // code } elseif ($day1 == β€˜Monday’ && $day2 == β€˜Wednesday’) { //code } …covering all possible combinations of $day1 and $day2 Help and tips greatly appreciated. **Update The problem has arisen out a bad database design. At the time, I thought it was a good idea. I have a table which records attendance data for a school as follows Id School_id Date Student_id MonAM MonPM TueAM TuePM WedAM WedPM ThuAM ThuPM FriAM FriPM SatAM SatPM SunAM SunPM 1 1 2022-11-28 1 / / / / / / / / / / 2 1 2022-11-28 2 / / N / / / / / / / 3 1 2022-11-28 3 N / N / / / N / / / 4 1 2022-11-28 4 / N / / / N / / / N 5 1 2022-11-28 5 N / / N N / N / N / / = present, N = absent When attendance data is saved, my code checks for the date of Monday of the current week based on the current date. This date is saved along with attendance data. In other words, attendance data is saved on a weekly basis on db. For the most part, this works very well. However, when it comes to generating attendance reports, the db design makes it difficult to find data mid-week or from, let’s say Tuesday of one week to Friday of another week, since the date column is always the date of Monday of week when data was saved. My application is built on Laravel. My db query returns rows based on the Monday date of query date range. To get around the issue of mid-week data, I am using the following logic after querying db for each student. If query result array (Laravel collection) count is 1: Find day of query start date (ie Monday). Find day of query end date (ie Wednesday) Use a if statement (as above) to select only the relevant column data, ie column MonAM – WedPM If query result array (Laravel collection) count > 1 In foreach loop, check if current array key is 1 (this is first row of result) Find day of query start date (ie Monday). Use a if statement (similar to above) to select only the relevant column data, In foreach loop, check if current array is last key (this is last row) Find day of query end date (ie Wednesday) Use a if statement (similar to above) to select only the relevant column data In foreach loop, if key is not first or last, then select data from all columns Im assuming the above logic will give me accurate data for all query date ranges.
[ "To be honest, I think you're looking at it a bit too narrow-minded. You have laravel at your disposal and you might as well do this kind of stuff in your Model. I expect your input days ($day1, $day2) are not day of week inputs, but actual dates (like '2022-11-30').\nYou could add something like this to the Model:\npublic function getInRange(Carbon $start, Carbon $end) {\n $dt = $start->lt($this->Date) ? $start : $this->Date;\n $internalEnd = $end->subDays(7)->gt($this->Date) ? $this->Date->addDays(7) : $end;\n $output = [];\n\n while($dt->lt($internalEnd)) {\n $output[] = [\n 'day' => $dt,\n 'AM' => $this->{$dt->shortEnglishDayOfWeek.'AM'},\n 'PM' => $this->{$dt->shortEnglishDayOfWeek.'PM'},\n ];\n\n $dt = $dt->addDay();\n }\n\n return $output;\n}\n\nThen, in the controller, or where you have the collection of Models:\n$collection->flatMap(fn($entry) => $entry->getInRange($start, $end));\n\nOf course, I still know little of your actual code, so you might need to tweak some stuff. You could also pass a closure to getInRange to format the data like you need it if you want the function to be more flexible,... There are a lot of options.\n" ]
[ 0 ]
[]
[]
[ "php" ]
stackoverflow_0074674645_php.txt
Q: Terraform AWS -Using Data from remote backend issue I am currently building a 3 tier wordpress structure for a project that includes VPC, ASG-ALB, EFS and RDS clusters. .TF files are in seperate directories so i am using makefile to apply them in order. Backend is stored in s3 bucket/dynamodb. I want to be able to get certain info from RDS, EFS tf files that are located in seperate directories under same project so i can use them in userdata template under ASG.tf file. Please see below: My user_data template requires below vars: data "template_file" "user_data" { template = file("user_data.sh") vars = { db_username = var.database_user **db_user_password** = data.random_string.rds_password.result ## retreives from random_string resource in rds.tf in seperate folder. db_name = var.database_name **db_RDS** = data.aws_rds_cluster.wordpress_db_cluster.endpoint ## retreives from rds_cluster.wordpress resource in rds.tf in seperate folder. **efs_dns_name** = data.aws_efs_file_system.efs.dns_name ## retreives from aws_efs_file_system resource in efs.tf in seperate folder. } } EFS and RDS are already been built and ready to use. i get below errors when trying to retreive db_user_password(created using random string under RDS directory), db_RDS(need cluster endpoint) and efs_dns_name(need endpoint from efs). β”‚ Error: Reference to undeclared resource β”‚ β”‚ on main.tf line 82, in data "template_file" "user_data": β”‚ 82: db_user_password = data.random_string.rds_password.result ## retreive from random_string resource in rds.tf in RDS folder. β”‚ β”‚ A data resource "random_string" "rds_password" has not been declared in the root module. β•΅ β•· β”‚ Error: Reference to undeclared resource β”‚ β”‚ on main.tf line 84, in data "template_file" "user_data": β”‚ 84: db_RDS = data.aws_rds_cluster.wordpress_db_cluster.endpoint ## retreive from rds_cluster.wordpress resource in rds.tf in RDS folder. β”‚ β”‚ A data resource "aws_rds_cluster" "wordpress_db_cluster" has not been declared in the root module. β•΅ β•· β”‚ Error: Reference to undeclared resource β”‚ β”‚ on main.tf line 85, in data "template_file" "user_data": β”‚ 85: efs_dns_name = data.aws_efs_file_system.efs.dns_name ## retreive from aws_efs_file_system resource in efs.tf in EFS folder. β”‚ β”‚ A data resource "aws_efs_file_system" "efs" has not been declared in the root module. My question is how can i declare above resources that are from another directory to use inside my template. Or is there another way around to use them. I have tried adding data.terraform_remote_state.backend but still no luck. A: There are three different methods to achieve solution to your problem. Solution nr 1 - Terraform modules The components of the system you have created in separate directiories became separate Terraform projects. The intended way of stucturing such dependency is to use Terraform modules. In this scenario, the entire project is called root module, and all components are in subdirectiories (you can think of them as subprojects). Any argument to the module is treated as variable in the subproject (module), and any data needed back in the root module (for example to be used in another module) should be outputted from this subproject. Solution nr 2 - Remote state data objects If you choose to leave the components as totally separate projects, you can still read their remote state (and, in particular, their outputs) using Remote State Data. In this approach, you should remember to output necessary attributes in the source module, and use those with remote state data as shown in tutorial I have linked to. Solution nr 3 - Terragrunt The approach you have come up with is particularly similar to the approach that is proposed by Gruntwork's Terragrunt. If you see the quickstart of terragrunt, you may find that this tool is about splitting terraform project into subprojects just like you do, and running them all using dependency statement, in proper order and more- probably the same way you created your Makefile for. I favor the solution nr 1, but I would look and think about all of them to choose the one that suits you most. Good luck!
Terraform AWS -Using Data from remote backend issue
I am currently building a 3 tier wordpress structure for a project that includes VPC, ASG-ALB, EFS and RDS clusters. .TF files are in seperate directories so i am using makefile to apply them in order. Backend is stored in s3 bucket/dynamodb. I want to be able to get certain info from RDS, EFS tf files that are located in seperate directories under same project so i can use them in userdata template under ASG.tf file. Please see below: My user_data template requires below vars: data "template_file" "user_data" { template = file("user_data.sh") vars = { db_username = var.database_user **db_user_password** = data.random_string.rds_password.result ## retreives from random_string resource in rds.tf in seperate folder. db_name = var.database_name **db_RDS** = data.aws_rds_cluster.wordpress_db_cluster.endpoint ## retreives from rds_cluster.wordpress resource in rds.tf in seperate folder. **efs_dns_name** = data.aws_efs_file_system.efs.dns_name ## retreives from aws_efs_file_system resource in efs.tf in seperate folder. } } EFS and RDS are already been built and ready to use. i get below errors when trying to retreive db_user_password(created using random string under RDS directory), db_RDS(need cluster endpoint) and efs_dns_name(need endpoint from efs). β”‚ Error: Reference to undeclared resource β”‚ β”‚ on main.tf line 82, in data "template_file" "user_data": β”‚ 82: db_user_password = data.random_string.rds_password.result ## retreive from random_string resource in rds.tf in RDS folder. β”‚ β”‚ A data resource "random_string" "rds_password" has not been declared in the root module. β•΅ β•· β”‚ Error: Reference to undeclared resource β”‚ β”‚ on main.tf line 84, in data "template_file" "user_data": β”‚ 84: db_RDS = data.aws_rds_cluster.wordpress_db_cluster.endpoint ## retreive from rds_cluster.wordpress resource in rds.tf in RDS folder. β”‚ β”‚ A data resource "aws_rds_cluster" "wordpress_db_cluster" has not been declared in the root module. β•΅ β•· β”‚ Error: Reference to undeclared resource β”‚ β”‚ on main.tf line 85, in data "template_file" "user_data": β”‚ 85: efs_dns_name = data.aws_efs_file_system.efs.dns_name ## retreive from aws_efs_file_system resource in efs.tf in EFS folder. β”‚ β”‚ A data resource "aws_efs_file_system" "efs" has not been declared in the root module. My question is how can i declare above resources that are from another directory to use inside my template. Or is there another way around to use them. I have tried adding data.terraform_remote_state.backend but still no luck.
[ "There are three different methods to achieve solution to your problem.\nSolution nr 1 - Terraform modules\nThe components of the system you have created in separate directiories became separate Terraform projects. The intended way of stucturing such dependency is to use Terraform modules.\nIn this scenario, the entire project is called root module, and all components are in subdirectiories (you can think of them as subprojects). Any argument to the module is treated as variable in the subproject (module), and any data needed back in the root module (for example to be used in another module) should be outputted from this subproject.\nSolution nr 2 - Remote state data objects\nIf you choose to leave the components as totally separate projects, you can still read their remote state (and, in particular, their outputs) using Remote State Data. In this approach, you should remember to output necessary attributes in the source module, and use those with remote state data as shown in tutorial I have linked to.\nSolution nr 3 - Terragrunt\nThe approach you have come up with is particularly similar to the approach that is proposed by Gruntwork's Terragrunt. If you see the quickstart of terragrunt, you may find that this tool is about splitting terraform project into subprojects just like you do, and running them all using dependency statement, in proper order and more- probably the same way you created your Makefile for.\nI favor the solution nr 1, but I would look and think about all of them to choose the one that suits you most. Good luck!\n" ]
[ 0 ]
[]
[]
[ "amazon_efs", "amazon_rds", "amazon_web_services", "terraform", "terraform_provider_aws" ]
stackoverflow_0074680191_amazon_efs_amazon_rds_amazon_web_services_terraform_terraform_provider_aws.txt
Q: Python GC: What's the meaning: Not all items in some free lists may be freed due to the particular implementation, in particular float When I read the doc of gc.collect(). There is a saying: Not all items in some free lists may be freed due to the particular implementation, in particular float. I'm quite confused. What's the meaning of this saying? import gc l = [1.0, 2.0, 3.0] l = None gc.collect() Does it mean that even though the list [1.0, 2.0, 3.0] has no reference after l = None, the list's elements 1.0, 2.0, 3.0 cannot be garbage collected since it is float. However, if it's int [1, 2, 3], then elements will be freed. Why? It's quite counterintuitive. Could we give me a solid example, what's the meaning of Not all items in some free lists may be freed due to the particular implementation, in particular float. PS: Does it mean that if I have a function which will generate list of float in intermediate step but not return it out. Since float cannot be gc, if I repeatedly call this function, it has risk of memory leak? import random def f(): l = [random.uniform(0, 1) for _ in range(100)] while True: f() A: It's counterintuitive, but thats just simply how the gc works. In particular, the gc.collect() method may not free memory associated with floating-point numbers (i.e. float objects). This is because the garbage collector uses a specific algorithm to determine which objects can be safely freed, and this algorithm may not consider floating-point numbers to be unused in all cases. A: The statement "Not all items in some free lists may be freed due to the particular implementation, in particular float" means that the gc.collect() method may not be able to free all objects in the free list, especially if they are floating-point numbers. This is because the garbage collector uses a particular implementation to free objects in the free list, and this implementation may not be able to free all types of objects, especially floating-point numbers. In the example you provided, the list [1.0, 2.0, 3.0] will not be freed by the gc.collect() method because it contains floating-point numbers. If the list contained integers instead, like [1, 2, 3], then it would be freed by the gc.collect() method. Here is an example that illustrates this behavior: import gc # Create a list of floating-point numbers l1 = [1.0, 2.0, 3.0] # Set the reference to the list to None l1 = None # Run the garbage collector gc.collect() # Print the number of objects in the free list print(gc.get_count()) # Output: 3 # Create a list of integers l2 = [1, 2, 3] # Set the reference to the list to None l2 = None # Run the garbage collector gc.collect() # Print the number of objects in the free list print(gc.get_count()) # Output: 0 In the example above, we create two lists: l1 and l2. The l1 list contains floating-point numbers, while the l2 list contains integers. We then set the references to these lists to None and run the garbage collector using the gc.collect() method. After running the garbage collector, we print the number of objects in the free list using the gc.get_count() method. This shows that the l1 list, which contains floating-point numbers, was not freed by the garbage collector, while the l2 list, which contains integers, was freed. In summary, the statement "Not all items in some free lists may be freed due to the particular implementation, in particular float" means that the gc.collect() method may not be able to free all objects in the free list, especially if they are floating-point numbers. This is because the garbage collector uses a particular implementation that may not be able to free all types of objects, especially floating-point numbers.
Python GC: What's the meaning: Not all items in some free lists may be freed due to the particular implementation, in particular float
When I read the doc of gc.collect(). There is a saying: Not all items in some free lists may be freed due to the particular implementation, in particular float. I'm quite confused. What's the meaning of this saying? import gc l = [1.0, 2.0, 3.0] l = None gc.collect() Does it mean that even though the list [1.0, 2.0, 3.0] has no reference after l = None, the list's elements 1.0, 2.0, 3.0 cannot be garbage collected since it is float. However, if it's int [1, 2, 3], then elements will be freed. Why? It's quite counterintuitive. Could we give me a solid example, what's the meaning of Not all items in some free lists may be freed due to the particular implementation, in particular float. PS: Does it mean that if I have a function which will generate list of float in intermediate step but not return it out. Since float cannot be gc, if I repeatedly call this function, it has risk of memory leak? import random def f(): l = [random.uniform(0, 1) for _ in range(100)] while True: f()
[ "It's counterintuitive, but thats just simply how the gc works.\nIn particular, the gc.collect() method may not free memory associated with floating-point numbers (i.e. float objects). This is because the garbage collector uses a specific algorithm to determine which objects can be safely freed, and this algorithm may not consider floating-point numbers to be unused in all cases.\n", "The statement \"Not all items in some free lists may be freed due to the particular implementation, in particular float\" means that the gc.collect() method may not be able to free all objects in the free list, especially if they are floating-point numbers. This is because the garbage collector uses a particular implementation to free objects in the free list, and this implementation may not be able to free all types of objects, especially floating-point numbers.\nIn the example you provided, the list [1.0, 2.0, 3.0] will not be freed by the gc.collect() method because it contains floating-point numbers. If the list contained integers instead, like [1, 2, 3], then it would be freed by the gc.collect() method.\nHere is an example that illustrates this behavior:\nimport gc\n\n# Create a list of floating-point numbers\nl1 = [1.0, 2.0, 3.0]\n\n# Set the reference to the list to None\nl1 = None\n\n# Run the garbage collector\ngc.collect()\n\n# Print the number of objects in the free list\nprint(gc.get_count()) # Output: 3\n\n# Create a list of integers\nl2 = [1, 2, 3]\n\n# Set the reference to the list to None\nl2 = None\n\n# Run the garbage collector\ngc.collect()\n\n# Print the number of objects in the free list\nprint(gc.get_count()) # Output: 0\n\nIn the example above, we create two lists: l1 and l2. The l1 list contains floating-point numbers, while the l2 list contains integers. We then set the references to these lists to None and run the garbage collector using the gc.collect() method.\nAfter running the garbage collector, we print the number of objects in the free list using the gc.get_count() method. This shows that the l1 list, which contains floating-point numbers, was not freed by the garbage collector, while the l2 list, which contains integers, was freed.\nIn summary, the statement \"Not all items in some free lists may be freed due to the particular implementation, in particular float\" means that the gc.collect() method may not be able to free all objects in the free list, especially if they are floating-point numbers. This is because the garbage collector uses a particular implementation that may not be able to free all types of objects, especially floating-point numbers.\n" ]
[ 0, 0 ]
[]
[]
[ "garbage_collection", "memory_management", "python" ]
stackoverflow_0074681214_garbage_collection_memory_management_python.txt
Q: Can we get data processed in Spring Batch after batch job is completed? I am using spring batch for reading data from db and process the same and do spome process in writer. if batch size is less than the records read by reader then spring batch runs in multiple batches.I want to do the processing in writer only once at the end of all batch process completion or if this is not possible then i will remove writer and process the data obtained in processor after batch job is completed.Is this possible? Below is my trigger Spring Batch job code private void triggerSpringBatchJob() { loggerConfig.logDebug(log, " : Triggering product catalog scheduler "); JobParametersBuilder builder = new JobParametersBuilder(); try { // Adding date in buildJobParameters because if not added we will get A job // instance already exists: JobInstanceAlreadyCompleteException builder.addDate("date", new Date()); jobLauncher.run(processProductCatalog, builder.toJobParameters()); } catch (JobExecutionAlreadyRunningException | JobRestartException | JobInstanceAlreadyCompleteException | JobParametersInvalidException e) { e.printStackTrace(); } } Below is my spring batch configuration @Configuration @EnableBatchProcessing public class BatchJobProcessConfiguration { @Bean @StepScope RepositoryItemReader<Tuple> reader(SkuRepository skuRepository, ProductCatalogConfiguration productCatalogConfiguration) { RepositoryItemReader<Tuple> reader = new RepositoryItemReader<>(); reader.setRepository(skuRepository); // query parameters List<Object> queryMethodArguments = new ArrayList<>(); if (productCatalogConfiguration.getSkuId().isEmpty()) { reader.setMethodName("findByWebEligibleAndDiscontinued"); queryMethodArguments.add(productCatalogConfiguration.getWebEligible()); // for web eligible queryMethodArguments.add(productCatalogConfiguration.getDiscontinued()); // for discontinued queryMethodArguments.add(productCatalogConfiguration.getCbdProductId()); // for cbd products } else { reader.setMethodName("findBySkuIds"); queryMethodArguments.add(productCatalogConfiguration.getSkuId()); // for sku ids } reader.setArguments(queryMethodArguments); reader.setPageSize(1000); Map<String, Direction> sorts = new HashMap<>(); sorts.put("sku_id", Direction.ASC); reader.setSort(sorts); return reader; } @Bean @StepScope ItemWriter<ProductCatalogWriterData> writer() { return new ProductCatalogWriter(); } @Bean ProductCatalogProcessor processor() { return new ProductCatalogProcessor(); } @Bean SkipPolicy readerSkipper() { return new ReaderSkipper(); @Bean Step productCatalogDataStep(ItemReader<Tuple> itemReader, ProductCatalogWriter writer, HttpServletRequest request, StepBuilderFactory stepBuilderFactory,BatchConfiguration batchConfiguration) { return stepBuilderFactory.get("processProductCatalog").<Tuple, ProductCatalogWriterData>chunk(batchConfiguration.getBatchChunkSize()) .reader(itemReader).faultTolerant().skipPolicy(readerSkipper()).processor(processor()).writer(writer).build(); } @Bean Job productCatalogData(Step productCatalogDataStep, HttpServletRequest request, JobBuilderFactory jobBuilderFactory) { return jobBuilderFactory.get("processProductCatalog").incrementer(new RunIdIncrementer()) .flow(productCatalogDataStep).end().build(); } } A: // It is possible to get data processed in Spring Batch after all batch job executions have completed. One way to do this would be to move the processing that you are currently doing in the writer step to a separate step that is executed only after all other steps in the job have completed. // To do this, you can add a new step to your job configuration that will be executed after all other steps have completed. This step can use a Tasklet to perform the desired processing on the data obtained in the processor step. Here is an example of how you could add this step to your configuration: @Bean Step postProcessingStep(Tasklet tasklet, HttpServletRequest request, StepBuilderFactory stepBuilderFactory) { return stepBuilderFactory.get("postProcessingStep").tasklet(tasklet).build(); } @Bean Job productCatalogData(Step productCatalogDataStep, Step postProcessingStep, HttpServletRequest request, JobBuilderFactory jobBuilderFactory) { return jobBuilderFactory.get("processProductCatalog").incrementer(new RunIdIncrementer()) .start(productCatalogDataStep) .next(postProcessingStep) // add the post-processing step to the job flow .end().build(); } // The postProcessingStep will be executed after the productCatalogDataStep completes, and you can use it to perform any additional processing on the data obtained in the processor step. // Alternatively, you could remove the writer step entirely and perform the processing in the processor itself. You can do this by implementing the processing logic in the process method of your ProductCatalogProcessor class, and then returning the processed data from this method. The processor step can then be configured to not use a writer, like this: @Bean Step productCatalogDataStep(ItemReader<Tuple> itemReader, ProductCatalogWriter writer, HttpServletRequest request, StepBuilderFactory stepBuilderFactory,BatchConfiguration batchConfiguration) { return stepBuilderFactory.get("processProductCatalog").<Tuple, ProductCatalogWriterData>chunk(batchConfiguration.getBatchChunkSize()) .reader(itemReader).faultTolerant().skipPolicy(readerSkipper()).processor(processor()).build(); // removed the writer from the step } // In this case, the processing will be performed on each chunk of data as it is processed by the processor, and the resulting data can be accessed directly from the processor step without the need for a separate writer step.
Can we get data processed in Spring Batch after batch job is completed?
I am using spring batch for reading data from db and process the same and do spome process in writer. if batch size is less than the records read by reader then spring batch runs in multiple batches.I want to do the processing in writer only once at the end of all batch process completion or if this is not possible then i will remove writer and process the data obtained in processor after batch job is completed.Is this possible? Below is my trigger Spring Batch job code private void triggerSpringBatchJob() { loggerConfig.logDebug(log, " : Triggering product catalog scheduler "); JobParametersBuilder builder = new JobParametersBuilder(); try { // Adding date in buildJobParameters because if not added we will get A job // instance already exists: JobInstanceAlreadyCompleteException builder.addDate("date", new Date()); jobLauncher.run(processProductCatalog, builder.toJobParameters()); } catch (JobExecutionAlreadyRunningException | JobRestartException | JobInstanceAlreadyCompleteException | JobParametersInvalidException e) { e.printStackTrace(); } } Below is my spring batch configuration @Configuration @EnableBatchProcessing public class BatchJobProcessConfiguration { @Bean @StepScope RepositoryItemReader<Tuple> reader(SkuRepository skuRepository, ProductCatalogConfiguration productCatalogConfiguration) { RepositoryItemReader<Tuple> reader = new RepositoryItemReader<>(); reader.setRepository(skuRepository); // query parameters List<Object> queryMethodArguments = new ArrayList<>(); if (productCatalogConfiguration.getSkuId().isEmpty()) { reader.setMethodName("findByWebEligibleAndDiscontinued"); queryMethodArguments.add(productCatalogConfiguration.getWebEligible()); // for web eligible queryMethodArguments.add(productCatalogConfiguration.getDiscontinued()); // for discontinued queryMethodArguments.add(productCatalogConfiguration.getCbdProductId()); // for cbd products } else { reader.setMethodName("findBySkuIds"); queryMethodArguments.add(productCatalogConfiguration.getSkuId()); // for sku ids } reader.setArguments(queryMethodArguments); reader.setPageSize(1000); Map<String, Direction> sorts = new HashMap<>(); sorts.put("sku_id", Direction.ASC); reader.setSort(sorts); return reader; } @Bean @StepScope ItemWriter<ProductCatalogWriterData> writer() { return new ProductCatalogWriter(); } @Bean ProductCatalogProcessor processor() { return new ProductCatalogProcessor(); } @Bean SkipPolicy readerSkipper() { return new ReaderSkipper(); @Bean Step productCatalogDataStep(ItemReader<Tuple> itemReader, ProductCatalogWriter writer, HttpServletRequest request, StepBuilderFactory stepBuilderFactory,BatchConfiguration batchConfiguration) { return stepBuilderFactory.get("processProductCatalog").<Tuple, ProductCatalogWriterData>chunk(batchConfiguration.getBatchChunkSize()) .reader(itemReader).faultTolerant().skipPolicy(readerSkipper()).processor(processor()).writer(writer).build(); } @Bean Job productCatalogData(Step productCatalogDataStep, HttpServletRequest request, JobBuilderFactory jobBuilderFactory) { return jobBuilderFactory.get("processProductCatalog").incrementer(new RunIdIncrementer()) .flow(productCatalogDataStep).end().build(); } }
[ "// It is possible to get data processed in Spring Batch after all batch job executions have completed. One way to do this would be to move the processing that you are currently doing in the writer step to a separate step that is executed only after all other steps in the job have completed.\n\n// To do this, you can add a new step to your job configuration that will be executed after all other steps have completed. This step can use a Tasklet to perform the desired processing on the data obtained in the processor step. Here is an example of how you could add this step to your configuration:\n\n@Bean\nStep postProcessingStep(Tasklet tasklet, HttpServletRequest request, StepBuilderFactory stepBuilderFactory) {\n return stepBuilderFactory.get(\"postProcessingStep\").tasklet(tasklet).build();\n}\n\n@Bean\nJob productCatalogData(Step productCatalogDataStep, Step postProcessingStep, HttpServletRequest request,\n JobBuilderFactory jobBuilderFactory) {\n return jobBuilderFactory.get(\"processProductCatalog\").incrementer(new RunIdIncrementer())\n .start(productCatalogDataStep)\n .next(postProcessingStep) // add the post-processing step to the job flow\n .end().build();\n}\n// The postProcessingStep will be executed after the productCatalogDataStep completes, and you can use it to perform any additional processing on the data obtained in the processor step.\n\n// Alternatively, you could remove the writer step entirely and perform the processing in the processor itself. You can do this by implementing the processing logic in the process method of your ProductCatalogProcessor class, and then returning the processed data from this method. The processor step can then be configured to not use a writer, like this:\n\n@Bean\nStep productCatalogDataStep(ItemReader<Tuple> itemReader, ProductCatalogWriter writer,\n HttpServletRequest request, StepBuilderFactory stepBuilderFactory,BatchConfiguration batchConfiguration) {\n return stepBuilderFactory.get(\"processProductCatalog\").<Tuple, ProductCatalogWriterData>chunk(batchConfiguration.getBatchChunkSize())\n .reader(itemReader).faultTolerant().skipPolicy(readerSkipper()).processor(processor()).build();\n // removed the writer from the step\n}\n// In this case, the processing will be performed on each chunk of data as it is processed by the processor, and the resulting data can be accessed directly from the processor step without the need for a separate writer step.\n\n" ]
[ 0 ]
[]
[]
[ "batch_processing", "spring", "spring_batch", "spring_boot" ]
stackoverflow_0074655259_batch_processing_spring_spring_batch_spring_boot.txt
Q: How to force EditText to accept only numbers in compose kotlin? How to force outLineTxtFieldValue to accept only numbers ?. For example, user can only enter numbers, not a negative value. We can enter numbers such as 120, 1, 9, 10012, in short, we can say positive integers or natural numbers in kotlin compose outlinetextfield or textfield A: To force a TextField or OutlinedTextField to accept only numbers in Compose, you can use the keyboardType property and set it to Number like so: TextField( keyboardType = KeyboardType.Number ) Or for OutlinedTextField, you can use the modifier property and set it to keyboardType = KeyboardType.Number like this: OutlinedTextField( modifier = Modifier.keyboardType(KeyboardType.Number) ) This will ensure that only numbers can be entered into the text field. If you also want to restrict the input to only positive integers or natural numbers, you can use the inputType property and set it to InputType.NumberType.Integer like this: TextField( keyboardType = KeyboardType.Number, inputType = InputType.NumberType.Integer ) Or for OutlinedTextField, you can use the modifier property and set it to inputType = InputType.NumberType.Integer like this: OutlinedTextField( modifier = Modifier.keyboardType(KeyboardType.Number) .inputType(InputType.NumberType.Integer) ) This will allow only positive integers to be entered into the text field. Here is an example of how to use the TextFieldValidation.Number validation option: TextField( value = text, onValueChange = { text = it }, label = { Text("Enter a number") }, validation = TextFieldValidation.Number ) This code will create a TextField that only accepts numbers and will not allow the user to enter negative values. If the user tries to enter a negative value or a non-numeric character, the input will be rejected and the user will be notified of the error.
How to force EditText to accept only numbers in compose kotlin?
How to force outLineTxtFieldValue to accept only numbers ?. For example, user can only enter numbers, not a negative value. We can enter numbers such as 120, 1, 9, 10012, in short, we can say positive integers or natural numbers in kotlin compose outlinetextfield or textfield
[ "To force a TextField or OutlinedTextField to accept only numbers in Compose, you can use the keyboardType property and set it to Number like so:\nTextField(\n keyboardType = KeyboardType.Number\n)\n\nOr for OutlinedTextField, you can use the modifier property and set it to keyboardType = KeyboardType.Number like this:\nOutlinedTextField(\n modifier = Modifier.keyboardType(KeyboardType.Number)\n)\n\nThis will ensure that only numbers can be entered into the text field.\nIf you also want to restrict the input to only positive integers or natural numbers, you can use the inputType property and set it to InputType.NumberType.Integer like this:\nTextField(\n keyboardType = KeyboardType.Number,\n inputType = InputType.NumberType.Integer\n)\n\nOr for OutlinedTextField, you can use the modifier property and set it to inputType = InputType.NumberType.Integer like this:\nOutlinedTextField(\n modifier = Modifier.keyboardType(KeyboardType.Number)\n .inputType(InputType.NumberType.Integer)\n)\n\nThis will allow only positive integers to be entered into the text field.\nHere is an example of how to use the TextFieldValidation.Number validation option:\nTextField(\n value = text,\n onValueChange = { text = it },\n label = { Text(\"Enter a number\") },\n validation = TextFieldValidation.Number\n)\n\nThis code will create a TextField that only accepts numbers and will not allow the user to enter negative values. If the user tries to enter a negative value or a non-numeric character, the input will be rejected and the user will be notified of the error.\n" ]
[ 1 ]
[]
[]
[ "android", "kotlin", "textfield" ]
stackoverflow_0074681337_android_kotlin_textfield.txt
Q: Issue in setting an image as the background a of a scene in Manim Community v0.17.0 Issue in setting an image as the background a of a scene in Manim Community v0.17.0 from manim import * class ImageFromArray(Scene): def construct(self): self.background_image =r"C:\Users\Shobhan\Desktop\program\bb.jpg" is not working...what to do? A: To set an image as the background of a scene in Manim Community v0.17.0, you can use the set_background_image method in your construct function. The method takes the path to the image as an argument, so you can use it like this: class ImageFromArray(Scene): def construct(self): self.set_background_image(r"C:\Users\Shobhan\Desktop\program\bb.jpg") A: Taken from Manim page class ImageFromArray(Scene): def construct(self): image = ImageMobject(np.uint8([[0, 100, 30, 200], [255, 0, 5, 33]])) image.height = 7 self.add(image) Try creating an ImageMobject and then use the method add(). Where the class ImageMobject seems to accept a path in its constructor: class ImageMobject(filename_or_array, scale_to_resolution=1080, invert=False, image_mode='RGBA', **kwargs
Issue in setting an image as the background a of a scene in Manim Community v0.17.0
Issue in setting an image as the background a of a scene in Manim Community v0.17.0 from manim import * class ImageFromArray(Scene): def construct(self): self.background_image =r"C:\Users\Shobhan\Desktop\program\bb.jpg" is not working...what to do?
[ "To set an image as the background of a scene in Manim Community v0.17.0, you can use the set_background_image method in your construct function. The method takes the path to the image as an argument, so you can use it like this:\nclass ImageFromArray(Scene):\n def construct(self):\n self.set_background_image(r\"C:\\Users\\Shobhan\\Desktop\\program\\bb.jpg\")\n\n", "Taken from Manim page\nclass ImageFromArray(Scene):\n def construct(self):\n image = ImageMobject(np.uint8([[0, 100, 30, 200],\n [255, 0, 5, 33]]))\n image.height = 7\n self.add(image)\n\nTry creating an ImageMobject and then use the method add().\nWhere the class ImageMobject seems to accept a path in its constructor:\nclass ImageMobject(filename_or_array, scale_to_resolution=1080, invert=False, image_mode='RGBA', **kwargs\n\n" ]
[ 0, 0 ]
[]
[]
[ "manim", "python" ]
stackoverflow_0074679231_manim_python.txt
Q: Expressjs on a cPanel environment I am trying to build an API with expressjs. Although I was able to run a pretty simple test with basic http like the code below const http = require('http') const hostname = '127.0.0.1'; const port = 3003; const server = http.createServer((req, res) => { res.statusCode = 200; res.setHeader('Content-Type', 'text/plain'); res.end('Hello World! I am your new NodeJS app! \n'); }); server.listen(port, hostname, () => { console.log(`Server running at http://${hostname}:${port}/`); }); When I am trying the same example with expressjs I get an error Cannot get /node/index.php the app.js code for my express app is below const express = require('express'); const app = express(); const port = 3003; app.get('/', (req, res) => { res.status(200).send("Hello"); }); app.listen(port, () => { console.log(`Example app listening on port ${port}`); }); when I go to terminal and hit node app.js I get the console log part but the page can't load. I also think it is worth mentioning that my .htaccess looks like this RewriteEngine On RewriteRule ^$ http://127.0.0.1:3003 / [P,L] RewriteCond %{REQUEST_FILENAME} !-f RewriteCond %{REQUEST_FILENAME} !-d RewriteRule ^(.*)$ http://127.0.0.1:3003 /$1 [P,L] I tried adding DirectoryIndex disabled in my .htaccess but then I get a 500 Internal Server Error error. Any ideas? A: The issue is that basic cPanel plan doesn't support node.js applications for some reason (haven't dug deep enough to upderstend why). What worked for me is getting a CloudLinux add-on and running my app from the software tab through the provided "Setup Node.js App". Hope this helps people like me, that are trying to make Express.js work on cPanel. A: Are you using the "setup nodejs app" module? (It can be installed if you are using VPS) If so then you don't need to add the port to the url. You should hit the domain URL directly. Here is my backend server: https://hashback.hashtag-metoo.com/ Check out this video for setup steps https://youtu.be/sIcy3q3Ib_s
Expressjs on a cPanel environment
I am trying to build an API with expressjs. Although I was able to run a pretty simple test with basic http like the code below const http = require('http') const hostname = '127.0.0.1'; const port = 3003; const server = http.createServer((req, res) => { res.statusCode = 200; res.setHeader('Content-Type', 'text/plain'); res.end('Hello World! I am your new NodeJS app! \n'); }); server.listen(port, hostname, () => { console.log(`Server running at http://${hostname}:${port}/`); }); When I am trying the same example with expressjs I get an error Cannot get /node/index.php the app.js code for my express app is below const express = require('express'); const app = express(); const port = 3003; app.get('/', (req, res) => { res.status(200).send("Hello"); }); app.listen(port, () => { console.log(`Example app listening on port ${port}`); }); when I go to terminal and hit node app.js I get the console log part but the page can't load. I also think it is worth mentioning that my .htaccess looks like this RewriteEngine On RewriteRule ^$ http://127.0.0.1:3003 / [P,L] RewriteCond %{REQUEST_FILENAME} !-f RewriteCond %{REQUEST_FILENAME} !-d RewriteRule ^(.*)$ http://127.0.0.1:3003 /$1 [P,L] I tried adding DirectoryIndex disabled in my .htaccess but then I get a 500 Internal Server Error error. Any ideas?
[ "The issue is that basic cPanel plan doesn't support node.js applications for some reason (haven't dug deep enough to upderstend why).\nWhat worked for me is getting a CloudLinux add-on and running my app from the software tab through the provided \"Setup Node.js App\".\nHope this helps people like me, that are trying to make Express.js work on cPanel.\n", "Are you using the \"setup nodejs app\" module? (It can be installed if you are using VPS)\nIf so then you don't need to add the port to the url. You should hit the domain URL directly.\nHere is my backend server: https://hashback.hashtag-metoo.com/\nCheck out this video for setup steps https://youtu.be/sIcy3q3Ib_s\n" ]
[ 0, 0 ]
[]
[]
[ "channel", "express", "node.js" ]
stackoverflow_0072565511_channel_express_node.js.txt
Q: How to open a Flutter .hive file from Hive in Android Studio Can someone please explain how to open the .hive files generated by Flutter Hive when you create a Box and add to it? Within my application, the files are stored as name.hive files. I have tried opening them as JSON/XML/plain text but nothing seems to work, the contents are showing incorrectly I am using Android Studio on Ubuntu Just want to know how I can open the file and view the contents properly? A: An issue was opened for this and the answer is: Hive uses a custom binary format for storing data which isn't supported by any existing editors or tools. so you cannot actually open a box .hive file like you could do with JSON, XML...
How to open a Flutter .hive file from Hive in Android Studio
Can someone please explain how to open the .hive files generated by Flutter Hive when you create a Box and add to it? Within my application, the files are stored as name.hive files. I have tried opening them as JSON/XML/plain text but nothing seems to work, the contents are showing incorrectly I am using Android Studio on Ubuntu Just want to know how I can open the file and view the contents properly?
[ "An issue was opened for this and the answer is:\nHive uses a custom binary format for storing data which isn't supported by any existing editors or tools.\n\nso you cannot actually open a box .hive file like you could do with JSON, XML...\n" ]
[ 0 ]
[]
[]
[ "dart", "flutter", "flutter_hive" ]
stackoverflow_0067709112_dart_flutter_flutter_hive.txt
Q: Adding prefix to column names generated by pivot_longer names_to .value Is it possible to add a prefix to the column names that are generated from using the .value option in the argument names_to in pivot_longer ? Example: df <- tibble(x1 = 0.1, x2 = 0.2, y1 = 0.05, y2 = 0.15) df %>% pivot_longer(cols = everything(), names_to = c('coord_type', '.value'), names_pattern = '(\\D+)(\\d+)') # A tibble: 2 Γ— 3 coord_type `1` `2` <chr> <dbl> <dbl> 1 x 0.1 0.2 2 y 0.05 0.15 I would like the 1 and 2 columns to have prefixes "coord_" in front of them. Of course, I could simply rename them after the pivot_longer like so: df %>% pivot_longer(cols = everything(), names_to = c('coord_type', '.value'), names_pattern = '(\\D+)(\\d+)') %>% rename_with(.fn = ~glue("coord_{.x}"), matches("[12]")) # A tibble: 2 Γ— 3 coord_type coord_1 coord_2 <chr> <dbl> <dbl> 1 x 0.1 0.2 2 y 0.05 0.15 But I'm interested if pivot_longer has some option to do so concisely. A: We could use names_repair, which can take a function library(stringr) library(tidyr) pivot_longer(df, cols = everything(), names_to = c('coord_type', '.value'), names_pattern = '(\\D+)(\\d+)', names_repair = ~ str_replace(.x, "^(\\d+)", "coord_\\1")) -output # A tibble: 2 Γ— 3 coord_type coord_1 coord_2 <chr> <dbl> <dbl> 1 x 0.1 0.2 2 y 0.05 0.15
Adding prefix to column names generated by pivot_longer names_to .value
Is it possible to add a prefix to the column names that are generated from using the .value option in the argument names_to in pivot_longer ? Example: df <- tibble(x1 = 0.1, x2 = 0.2, y1 = 0.05, y2 = 0.15) df %>% pivot_longer(cols = everything(), names_to = c('coord_type', '.value'), names_pattern = '(\\D+)(\\d+)') # A tibble: 2 Γ— 3 coord_type `1` `2` <chr> <dbl> <dbl> 1 x 0.1 0.2 2 y 0.05 0.15 I would like the 1 and 2 columns to have prefixes "coord_" in front of them. Of course, I could simply rename them after the pivot_longer like so: df %>% pivot_longer(cols = everything(), names_to = c('coord_type', '.value'), names_pattern = '(\\D+)(\\d+)') %>% rename_with(.fn = ~glue("coord_{.x}"), matches("[12]")) # A tibble: 2 Γ— 3 coord_type coord_1 coord_2 <chr> <dbl> <dbl> 1 x 0.1 0.2 2 y 0.05 0.15 But I'm interested if pivot_longer has some option to do so concisely.
[ "We could use names_repair, which can take a function\nlibrary(stringr)\nlibrary(tidyr)\npivot_longer(df, cols = everything(),\n names_to = c('coord_type', '.value'),\n names_pattern = '(\\\\D+)(\\\\d+)', \n names_repair = ~ str_replace(.x, \"^(\\\\d+)\", \"coord_\\\\1\"))\n\n-output\n# A tibble: 2 Γ— 3\n coord_type coord_1 coord_2\n <chr> <dbl> <dbl>\n1 x 0.1 0.2 \n2 y 0.05 0.15\n\n" ]
[ 0 ]
[]
[]
[ "dplyr", "r", "tidyr" ]
stackoverflow_0074680530_dplyr_r_tidyr.txt
Q: How to restrict the textfield to accept max 3 digits with a decimal value of two digits I am using Xcode 14 Swift 5 and I am new to it. I am trying to make the textField accpet maximum 3 digits but also can have a decimal number in it with 2 digits. (E.g. the user can enter 123 or 123.45 but not 12345 nor 12134.54) Thanks in advance. here is my code to restrict the textField to accept max 3 digits but I can't write the rest. // TextField Config func textField(_ textField: UITextField, shouldChangeCharactersIn range: NSRange, replacementString string: String) -> Bool { guard let text = textField.text else { return true } let text_Length = text.count + string.count - range.length if text_Length > 3 { return false } return true } // Done A: To make your text field accept a maximum of three digits with a decimal number of two digits, you can use the NSCharacterSet class to define the characters that are allowed in the text field. You can then use the rangeOfCharacter method to check if the entered text contains any characters that are not in the allowed set. Here is an example of how you can implement this in your code: let allowedCharacters = CharacterSet(charactersIn: "0123456789.") func textField(_ textField: UITextField, shouldChangeCharactersIn range: NSRange, replacementString string: String) -> Bool { guard let text = textField.text else { return true } if let _ = string.rangeOfCharacter(from: allowedCharacters.inverted) { return false } let text_Length = text.count + string.count - range.length if text_Length > 3 { return false } return true } You can further improve this code by checking if the entered text contains a decimal point and, if so, limiting the number of digits after the decimal point to two. You can also use regular expressions to validate the entered text, which can make the code more concise and readable.
How to restrict the textfield to accept max 3 digits with a decimal value of two digits
I am using Xcode 14 Swift 5 and I am new to it. I am trying to make the textField accpet maximum 3 digits but also can have a decimal number in it with 2 digits. (E.g. the user can enter 123 or 123.45 but not 12345 nor 12134.54) Thanks in advance. here is my code to restrict the textField to accept max 3 digits but I can't write the rest. // TextField Config func textField(_ textField: UITextField, shouldChangeCharactersIn range: NSRange, replacementString string: String) -> Bool { guard let text = textField.text else { return true } let text_Length = text.count + string.count - range.length if text_Length > 3 { return false } return true } // Done
[ "To make your text field accept a maximum of three digits with a decimal number of two digits, you can use the NSCharacterSet class to define the characters that are allowed in the text field. You can then use the rangeOfCharacter method to check if the entered text contains any characters that are not in the allowed set.\nHere is an example of how you can implement this in your code:\nlet allowedCharacters = CharacterSet(charactersIn: \"0123456789.\")\n\nfunc textField(_ textField: UITextField, shouldChangeCharactersIn range: NSRange, replacementString string: String) -> Bool {\n\n guard let text = textField.text else { return true }\n\n if let _ = string.rangeOfCharacter(from: allowedCharacters.inverted) {\n return false\n }\n\n let text_Length = text.count + string.count - range.length\n if text_Length > 3 {\n return false\n }\n\n return true\n}\n\nYou can further improve this code by checking if the entered text contains a decimal point and, if so, limiting the number of digits after the decimal point to two. You can also use regular expressions to validate the entered text, which can make the code more concise and readable.\n" ]
[ 0 ]
[]
[]
[ "ios", "swift", "swift5", "uitextfield", "xcode" ]
stackoverflow_0074680405_ios_swift_swift5_uitextfield_xcode.txt