content
stringlengths
86
88.9k
title
stringlengths
0
150
question
stringlengths
1
35.8k
answers
sequence
answers_scores
sequence
non_answers
sequence
non_answers_scores
sequence
tags
sequence
name
stringlengths
30
130
Q: I want to add the class just in the clicked span when I tried to use jQuery it changes every span with the same class but I don't want to add an id for each span $(".pay-btn").click(function (e) { e.preventDefault(); $(".bay-btn").addClass('bay-btn-clicked'); }); I want to add a class to the clicked span without the unclicked yet <div class="type1 types"> <div class="img-con"><img src="/src/shop/imgs/black-sh.jfif" alt=""></div> <p>Lorem ipsum dolor sit amet consectetur adipisicing elit. Eius esse consequatur magnam exercitationem quisquam.</p> <span class="cost">10$</span> <span class="bay-btn">Bay</span> </div> <div class="type2 types"> <div class="img-con"><img src="/src/shop/imgs/red-sh.jfif" alt=""></div> <p>Lorem ipsum dolor sit amet consectetur adipisicing elit. Eius esse consequatur magnam exercitationem quisquam.</p> <span class="cost">10$</span> <span class="bay-btn">Bay</span> </div> A: You can use the this keyword to target the clicked element. The this keyword refers to the element that triggered the event. Here's how you can use it in your code: $(".pay-btn").click(function (e) { e.preventDefault(); // Use 'this' to target the clicked element $(this).addClass('bay-btn-clicked'); }); A: It seems that you misspelled the class name ".pay-btn" and ".bay-btn" in jQuery, so the code would be: $(".pay-btn").click(function (e) { e.preventDefault(); $(".pay-btn").addClass('pay-btn-clicked'); }); And dont forget to update the class names in HTML file as well: bay-btn -> pay-btn
I want to add the class just in the clicked span
when I tried to use jQuery it changes every span with the same class but I don't want to add an id for each span $(".pay-btn").click(function (e) { e.preventDefault(); $(".bay-btn").addClass('bay-btn-clicked'); }); I want to add a class to the clicked span without the unclicked yet <div class="type1 types"> <div class="img-con"><img src="/src/shop/imgs/black-sh.jfif" alt=""></div> <p>Lorem ipsum dolor sit amet consectetur adipisicing elit. Eius esse consequatur magnam exercitationem quisquam.</p> <span class="cost">10$</span> <span class="bay-btn">Bay</span> </div> <div class="type2 types"> <div class="img-con"><img src="/src/shop/imgs/red-sh.jfif" alt=""></div> <p>Lorem ipsum dolor sit amet consectetur adipisicing elit. Eius esse consequatur magnam exercitationem quisquam.</p> <span class="cost">10$</span> <span class="bay-btn">Bay</span> </div>
[ "You can use the this keyword to target the clicked element. The this keyword refers to the element that triggered the event. Here's how you can use it in your code:\n$(\".pay-btn\").click(function (e) { \n e.preventDefault();\n // Use 'this' to target the clicked element\n $(this).addClass('bay-btn-clicked');\n});\n\n", "It seems that you misspelled the class name\n\".pay-btn\" and \".bay-btn\" in jQuery, so the code would be:\n$(\".pay-btn\").click(function (e) { \n e.preventDefault();\n $(\".pay-btn\").addClass('pay-btn-clicked');\n });\n\nAnd dont forget to update the class names in HTML file as well:\nbay-btn -> pay-btn\n" ]
[ 0, 0 ]
[]
[]
[ "css", "html", "javascript", "jquery" ]
stackoverflow_0074679679_css_html_javascript_jquery.txt
Q: having issues using face detection API from Clarifai API I have added the face detection API from Clarifai APi to my project, however, whenever i copy an image to my project and click on detects, it actually shows the image but it is not detecting the face. see below App.js and FaceRecognition.js import React, {Component} from 'react'; import Clarifai from 'clarifai'; import Navigation from './components/Navigation/Navigation'; import Logo from './components/Logo/Logo'; import ImageLinkForm from './components/ImageLinkForm/ImageLinkForm'; import FaceRecognition from './components/FaceRecognition/FaceRecognition'; import Rank from './components/Rank/Rank'; import './App.css'; const app = new Clarifai.App({ apiKey: 'xxxxxxxxxxxx' }); class App extends Component { constructor() { super(); this.state = { input: '', imageUrl: '', box: {} } } calculateFaceLocation =(data) => { const clarifaiFace = data.outputs[0].data.regions[0].region_info.bounding_box; const image = document.getElementById('inputimage'); const width = Number(image.width); const height = Number(image.height); return { leftCol: clarifaiFace.left_col * width, topRow: clarifaiFace.top_row * height, rightCol: width - (clarifaiFace.right_col * width), bottomRow: height - (clarifaiFace.bottom_row * height) } } displayFaceBox = (box) => { console.log(box) this.setState({box: box}); } onInputChange = (event) => { this.setState({input: event.target.value}) } onButtonSubmit = () => { this.setState({imageUrl: this.state.input}) app.models.predict( Clarifai.FACE_DETECT_MODEL, this.state.input) .then( response => this.displayFaceBox(this.calculateFaceLocation(response))) .catch(err => console.log(err)); } render() { return ( <div className="App"> <Navigation /> <Logo /> <Rank /> <ImageLinkForm onInputChange={this.onInputChange} onButtonSubmit={this.onButtonSubmit} /> <FaceRecognition box={this.state.box} imageUrl={this.state.imageUrl}/> </div> ); } } export default App; FaceRecognition.js import React from 'react'; import './FaceRecognition.css'; const FaceRecognition = ({imageUrl, box}) => { return ( <div className='center ma'> <div className='absolute mt2'> <img id='inputimage' alt='' src={imageUrl} width='500px' height='auto' /> <div className='bounding-box' style= {{top: box.topRow, right: box.rightCol, bottom: box.bottomRow, left: box.leftCol}}></div> </div> </div> ); } export default FaceRecognition; FaceRecognition.css bounding-box { position: absolute; box-shadow: 0 0 0 3px #149df2 inset; display: flex; flex-wrap: wrap; justify-content: center; cursor: pointer; } what am i doing wrong? i tried copy paste from the actual Clarifai API code, but no luck the bounding-box css is not even showing up in the console. please help me A: First of all, please don't use this client: https://github.com/Clarifai/clarifai-javascript, it has been deprecated for a while and several things in this package are very old and broken. If you're purely developing client-side, you can use the REST endpoints directly: https://docs.clarifai.com/api-guide/predict/images (see "Javascript (REST)" snippets throughout the docs) I also recommend to use PAT instead of API keys. This will allow you access across all your Clarifai apps with a single token. A: Clarifai has changed the way to use their Api. On Clarifai Face detect model , click to use model, then you can copy the code on how to use their Api. https://clarifai.com/clarifai/main/models/face-detection?utm_source=clarifai&utm_medium=referral&tab=versions Then you can import your PAT and other credentials requested for in the code from Clarifai portal. Use this as a guide https://help.clarifai.com/hc/en-us/articles/4408131744407-Integrating-Clarifai-in-your-React-Javascript-project You are welcome
having issues using face detection API from Clarifai API
I have added the face detection API from Clarifai APi to my project, however, whenever i copy an image to my project and click on detects, it actually shows the image but it is not detecting the face. see below App.js and FaceRecognition.js import React, {Component} from 'react'; import Clarifai from 'clarifai'; import Navigation from './components/Navigation/Navigation'; import Logo from './components/Logo/Logo'; import ImageLinkForm from './components/ImageLinkForm/ImageLinkForm'; import FaceRecognition from './components/FaceRecognition/FaceRecognition'; import Rank from './components/Rank/Rank'; import './App.css'; const app = new Clarifai.App({ apiKey: 'xxxxxxxxxxxx' }); class App extends Component { constructor() { super(); this.state = { input: '', imageUrl: '', box: {} } } calculateFaceLocation =(data) => { const clarifaiFace = data.outputs[0].data.regions[0].region_info.bounding_box; const image = document.getElementById('inputimage'); const width = Number(image.width); const height = Number(image.height); return { leftCol: clarifaiFace.left_col * width, topRow: clarifaiFace.top_row * height, rightCol: width - (clarifaiFace.right_col * width), bottomRow: height - (clarifaiFace.bottom_row * height) } } displayFaceBox = (box) => { console.log(box) this.setState({box: box}); } onInputChange = (event) => { this.setState({input: event.target.value}) } onButtonSubmit = () => { this.setState({imageUrl: this.state.input}) app.models.predict( Clarifai.FACE_DETECT_MODEL, this.state.input) .then( response => this.displayFaceBox(this.calculateFaceLocation(response))) .catch(err => console.log(err)); } render() { return ( <div className="App"> <Navigation /> <Logo /> <Rank /> <ImageLinkForm onInputChange={this.onInputChange} onButtonSubmit={this.onButtonSubmit} /> <FaceRecognition box={this.state.box} imageUrl={this.state.imageUrl}/> </div> ); } } export default App; FaceRecognition.js import React from 'react'; import './FaceRecognition.css'; const FaceRecognition = ({imageUrl, box}) => { return ( <div className='center ma'> <div className='absolute mt2'> <img id='inputimage' alt='' src={imageUrl} width='500px' height='auto' /> <div className='bounding-box' style= {{top: box.topRow, right: box.rightCol, bottom: box.bottomRow, left: box.leftCol}}></div> </div> </div> ); } export default FaceRecognition; FaceRecognition.css bounding-box { position: absolute; box-shadow: 0 0 0 3px #149df2 inset; display: flex; flex-wrap: wrap; justify-content: center; cursor: pointer; } what am i doing wrong? i tried copy paste from the actual Clarifai API code, but no luck the bounding-box css is not even showing up in the console. please help me
[ "First of all, please don't use this client: https://github.com/Clarifai/clarifai-javascript, it has been deprecated for a while and several things in this package are very old and broken.\nIf you're purely developing client-side, you can use the REST endpoints directly: https://docs.clarifai.com/api-guide/predict/images (see \"Javascript (REST)\" snippets throughout the docs)\nI also recommend to use PAT instead of API keys. This will allow you access across all your Clarifai apps with a single token.\n", "Clarifai has changed the way to use their Api. On Clarifai Face detect model , click to use model, then you can copy the code on how to use their Api.\nhttps://clarifai.com/clarifai/main/models/face-detection?utm_source=clarifai&utm_medium=referral&tab=versions\nThen you can import your PAT and other credentials requested for in the code from Clarifai portal.\nUse this as a guide https://help.clarifai.com/hc/en-us/articles/4408131744407-Integrating-Clarifai-in-your-React-Javascript-project\nYou are welcome \n" ]
[ 0, 0 ]
[]
[]
[ "clarifai", "css", "reactjs", "tachyons_css" ]
stackoverflow_0073459743_clarifai_css_reactjs_tachyons_css.txt
Q: How to change ruby version Ruby 1.8 and 1.9 are installed in my Ubuntu machine. I've just installed Ruby 2.0.0 from ppa:brightbox/ruby-ng-experimental to install a bundle which requires 2.0.0. Now all 1.8, 1.9 and 2.0 are installed though I can't tell bundle to use 2.0: $ bundle install $ Your Ruby version is 1.9.3, but your Gemfile specified 2.0.0 RVM fails to change version: $ rvm 2.0 $ ruby-2.0.0-p451 is not installed. $ To install do: 'rvm install ruby-2.0.0-p451' RBENV also does not recognize 2.0: $ rbenv global 2.0.0-p451 $ rbenv: version `2.0.0-p451' not installed A: There is lots of advise in the comments to your question, some of it is advanced-ish rbenv or rvm usage. My advice: Decide on how to manage multiple rubies - either use your OS package manager (in your case the apt-get/PPA stuff) OR rvm OR rbenv. For the OS package manager, there should be a way to call ruby with version explicitely (e.g. /usr/bin/ruby1.9.3), or research on and call update-alternative. As bundler comes with a gem, you might get the interpreters confused here. For rvm, change ruby version with rvm use 2.5.1 (once it is installed). For rbenv I actually do not know but it should be trivial, too (and people are happy with it; it just happens that I tried rvm first and it worked like a charm, never evaluated rbenv). I usually install one "system" ruby (apt-get install ruby1.9.3) and use rvm afterwards. You can still switch to the packaged "production" ruby with rvm use system. Update 2017: Most distros ship with a ruby version installed already, so you probably don't have to install it manually. Run ruby -v or which ruby to see if a ruby interpreter is already installed. In your case I would probably deinstall all system rubys (apt-get purge ...), remove the PPAs, remove your ~/.rvm and rbenv and start from scratch (install packaged stable ruby, then rvm and use rvm (r.g. rvm install 2.3.1) from there on). A: Adding the repository If you’re using Ubuntu 14.04 (Trusty) or newer then you can add the package repository like this: $ sudo apt-get install software-properties-common $ sudo apt-add-repository ppa:brightbox/ruby-ng $ sudo apt-get update Install ruby switch $ sudo apt-get install ruby-switch Commands of utiliy ruby -v ruby-switch --list Example $ sudo ruby-switch --set ruby2.1 Hope this help you. From: https://www.brightbox.com/docs/ruby/ubuntu/ A: export PATH=$PATH:~yourusername/.rbenv/shims/ruby This will set ruby in your shell to the current rbenv ruby. You can put this line in your .bashrc or other init file.
How to change ruby version
Ruby 1.8 and 1.9 are installed in my Ubuntu machine. I've just installed Ruby 2.0.0 from ppa:brightbox/ruby-ng-experimental to install a bundle which requires 2.0.0. Now all 1.8, 1.9 and 2.0 are installed though I can't tell bundle to use 2.0: $ bundle install $ Your Ruby version is 1.9.3, but your Gemfile specified 2.0.0 RVM fails to change version: $ rvm 2.0 $ ruby-2.0.0-p451 is not installed. $ To install do: 'rvm install ruby-2.0.0-p451' RBENV also does not recognize 2.0: $ rbenv global 2.0.0-p451 $ rbenv: version `2.0.0-p451' not installed
[ "There is lots of advise in the comments to your question, some of it is advanced-ish rbenv or rvm usage.\nMy advice: Decide on how to manage multiple rubies - either use your OS package manager (in your case the apt-get/PPA stuff) OR rvm OR rbenv.\nFor the OS package manager, there should be a way to call ruby with version explicitely (e.g. /usr/bin/ruby1.9.3), or research on and call update-alternative. As bundler comes with a gem, you might get the interpreters confused here.\nFor rvm, change ruby version with rvm use 2.5.1 (once it is installed).\nFor rbenv I actually do not know but it should be trivial, too (and people are happy with it; it just happens that I tried rvm first and it worked like a charm, never evaluated rbenv).\nI usually install one \"system\" ruby (apt-get install ruby1.9.3) and use rvm afterwards. You can still switch to the packaged \"production\" ruby with rvm use system.\nUpdate 2017: Most distros ship with a ruby version installed already, so you probably don't have to install it manually. Run ruby -v or which ruby to see if a ruby interpreter is already installed.\nIn your case I would probably deinstall all system rubys (apt-get purge ...), remove the PPAs, remove your ~/.rvm and rbenv and start from scratch (install packaged stable ruby, then rvm and use rvm (r.g. rvm install 2.3.1) from there on).\n", "Adding the repository\nIf you’re using Ubuntu 14.04 (Trusty) or newer then you can add the package repository like this:\n$ sudo apt-get install software-properties-common\n$ sudo apt-add-repository ppa:brightbox/ruby-ng\n$ sudo apt-get update\n\nInstall ruby switch\n$ sudo apt-get install ruby-switch\n\nCommands of utiliy\nruby -v\nruby-switch --list\n\nExample\n$ sudo ruby-switch --set ruby2.1\n\nHope this help you.\nFrom: https://www.brightbox.com/docs/ruby/ubuntu/\n", "export PATH=$PATH:~yourusername/.rbenv/shims/ruby\n\nThis will set ruby in your shell to the current rbenv ruby.\nYou can put this line in your .bashrc or other init file.\n" ]
[ 25, 5, 0 ]
[]
[]
[ "ruby" ]
stackoverflow_0022034498_ruby.txt
Q: Raid 5 algorithm in Python I have been trying to create a simple RAID 5 algorithm with 3 lists in Python, with the first 2 lists acting as the main storage, and the third as a parity bit. I would also like to be able to completely restore any deleted lists with the correct contents. I seem to be unable to figure out how to actually write this program, I only understand the logic behind it. Thanks. A: This code implements a RAID 5 system in Python by defining two functions: initialize_raid_system and restore_missing_list. The initialize_raid_system function takes two lists as input and returns a tuple containing the two input lists and a third list that holds the parity bits for the elements in the input lists. The function first checks that the input lists are of equal length, and then initializes the parity bit list with zeros. It then computes the parity bit for each element in the input lists by XORing the values in the corresponding positions of the input lists. Finally, it returns a tuple containing the input lists and the computed parity bit list. The restore_missing_list function takes a list and the parity bits as input, and returns the restored list. The function first checks that the input list and the parity bits have the same length, and then restores the missing list by XORing the values in the corresponding positions of the input list and the parity bits. Finally, it returns the restored list. The code then uses these functions to initialize a RAID 5 system with the input lists [1, 2, 3, 4, 5] and [5, 4, 3, 2, 1], and then restores each of the input lists from the parity bits and the other input list. The resulting lists are printed to the console to verify that the restoration was successful. from typing import List, Tuple def initialize_raid_system(list1: List[int], list2: List[int]) -> Tuple[List[int], List[int], List[int]]: assert len(list1) == len(list2), "input lists must be same length" # Initialize the parity bit list with zeros parity_bits = [0 for _ in range(len(list1))] # Compute the parity bit for each element in the main storage lists for i in range(len(list1)): parity_bits[i] = list1[i] ^ list2[i] # Print the parity bit return list1, list2, parity_bits def restore_missing_list(listx: List[int], parity_bits: List[int]) -> List[int]: assert len(listx) == len(parity_bits), "there must be as many parity bits as list elements" # Restore the missing list by XORing the values in the corresponding positions of the given list and the parity bits restored_list = [listx[i] ^ parity_bits[i] for i in range(len(listx))] return restored_list # Produce raid system tuple of three lists raid_system: Tuple[List[int], List[int], List[int]] = initialize_raid_system([1,2,3,4,5], [5,4,3,2,1]) # Restore each list from the parity bits and the other list list1: List[int] = restore_missing_list(raid_system[1], raid_system[2]) list2: List[int] = restore_missing_list(raid_system[0], raid_system[2]) # Verify the result print(f"list1 = {list1}") print(f"list2 = {list2}")
Raid 5 algorithm in Python
I have been trying to create a simple RAID 5 algorithm with 3 lists in Python, with the first 2 lists acting as the main storage, and the third as a parity bit. I would also like to be able to completely restore any deleted lists with the correct contents. I seem to be unable to figure out how to actually write this program, I only understand the logic behind it. Thanks.
[ "This code implements a RAID 5 system in Python by defining two functions: initialize_raid_system and restore_missing_list.\nThe initialize_raid_system function takes two lists as input and returns a tuple containing the two input lists and a third list that holds the parity bits for the elements in the input lists. The function first checks that the input lists are of equal length, and then initializes the parity bit list with zeros. It then computes the parity bit for each element in the input lists by XORing the values in the corresponding positions of the input lists. Finally, it returns a tuple containing the input lists and the computed parity bit list.\nThe restore_missing_list function takes a list and the parity bits as input, and returns the restored list. The function first checks that the input list and the parity bits have the same length, and then restores the missing list by XORing the values in the corresponding positions of the input list and the parity bits. Finally, it returns the restored list.\nThe code then uses these functions to initialize a RAID 5 system with the input lists [1, 2, 3, 4, 5] and [5, 4, 3, 2, 1], and then restores each of the input lists from the parity bits and the other input list. The resulting lists are printed to the console to verify that the restoration was successful.\nfrom typing import List, Tuple\n\ndef initialize_raid_system(list1: List[int], list2: List[int]) -> Tuple[List[int], List[int], List[int]]:\n assert len(list1) == len(list2), \"input lists must be same length\"\n \n # Initialize the parity bit list with zeros\n parity_bits = [0 for _ in range(len(list1))]\n \n # Compute the parity bit for each element in the main storage lists\n for i in range(len(list1)):\n parity_bits[i] = list1[i] ^ list2[i]\n \n # Print the parity bit\n return list1, list2, parity_bits\n\ndef restore_missing_list(listx: List[int], parity_bits: List[int]) -> List[int]:\n assert len(listx) == len(parity_bits), \"there must be as many parity bits as list elements\"\n\n # Restore the missing list by XORing the values in the corresponding positions of the given list and the parity bits\n restored_list = [listx[i] ^ parity_bits[i] for i in range(len(listx))]\n\n return restored_list\n\n# Produce raid system tuple of three lists \nraid_system: Tuple[List[int], List[int], List[int]] = initialize_raid_system([1,2,3,4,5], [5,4,3,2,1])\n\n# Restore each list from the parity bits and the other list\nlist1: List[int] = restore_missing_list(raid_system[1], raid_system[2])\nlist2: List[int] = restore_missing_list(raid_system[0], raid_system[2])\n\n# Verify the result\nprint(f\"list1 = {list1}\")\nprint(f\"list2 = {list2}\")\n\n" ]
[ 1 ]
[]
[]
[ "python_3.x", "raid" ]
stackoverflow_0074679730_python_3.x_raid.txt
Q: Can we reduce the time complexity here? I have an AoC problem where I have been given the data below: data = """2-4,6-8 2-3,4-5 5-7,7-9 2-8,3-7 6-6,4-6 2-6,4-8""" I need to find the number of pairs which fully contain another pair. For example, 2-8 fully contains 3-7, and 6-6 is fully contained by 4-6. I have solved it using the below code: def aoc_part1(self, data): counter = 0 for lines_data in data.splitlines(): lines_data = lines_data.strip() first_range, second_range = self.__get_first_second_list_of_elements(lines_data) check_first_side_if_returns_true = all(item in first_range for item in second_range) check_second_side_if_returns_true = all(item in second_range for item in first_range) if check_first_side_if_returns_true or check_second_side_if_returns_true: counter += 1 return counter def __get_first_second_list_of_elements(self, data): first_elf, second_elf = data.split(",")[0], data.split(",")[1] first_range_start, first_range_end = map(int, first_elf.split("-")) second_range_start, second_range_end = map(int, second_elf.split("-")) first_range = list(range(first_range_start, first_range_end + 1)) second_range = list(range(second_range_start, second_range_end + 1)) return first_range, second_range I was just wondering about the time complexity here. I think it should be a brute force here because for every iteration all will run another loop. How can I optimize this solution in order to get linear time complexity? first_range and second_range are of int types. check_first_side_if_returns_true and check_second_side_if_returns_true are the boolean variables that check if the list is entirely contained or not. Based on that, it returns True or False. A: Your solution looks pretty complicated. Why not do something like: data = """2-4,6-8 2-3,4-5 5-7,7-9 2-8,3-7 6-6,4-6 2-6,4-8 """ def included(line): (a1, b1), (a2, b2) = (map(int, pair.split("-")) for pair in line.strip().split(",")) return (a1 <= a2 and b2 <= b1) or (a2 <= a1 and b1 <= b2) print(sum(included(line) for line in data.splitlines())) I did some timing with my AoC-input for day 4 (1,000 lines): from timeit import timeit # Extract the interval boundaries for the pairs boundaries = [ [tuple(map(int, pair.split("-"))) for pair in line.strip().split(",")] for line in data.splitlines() ] # Version 1 with simple comparison of boundaries def test1(boundaries): def included(pairs): (a1, b1), (a2, b2) = pairs return (a1 <= a2 and b2 <= b1) or (a2 <= a1 and b1 <= b2) return sum(included(pairs) for pairs in boundaries) # Version 2 with range-subset test def test2(boundaries): def included(pairs): (a1, b1), (a2, b2) = pairs numbers1, numbers2 = set(range(a1, b1 + 1)), set(range(a2, b2 + 1)) return numbers1 <= numbers2 or numbers2 <= numbers1 return sum(included(pairs) for pairs in boundaries) # Test for identical result print(test1(boundaries) == test2(boundaries)) # Timing for i in 1, 2: t = timeit(f"test{i}(boundaries)", globals=globals(), number=1_000) print(f"Duration version {i}: {t:.1f} seconds") Result here, on a mediocre machine (repl.it): Duration version 1: 0.4 seconds Duration version 2: 5.4 seconds A: You're prob. making it overcomplicated in the current approach. If you split the pairs to two sets - eg. a, and b then you could easily do a set ops. to check if there is overlapping. That should be faster than yours. Something like this one-line: # some input reading, and split to a, b sets. # count = 0 if set(range(a, b + 1)) & set(range(x, y + 1)): count += 1 # that's part1 answer. # part 2 for line in open('04.in'): a, b, x, y = map(int, line.replace(",", "-").split("-")) if set(range(a, b + 1)) & set(range(x, y + 1)): ans += 1 There are questions about the memory efficiency about this approach earlier, I've run some profiling and this is the result to share - it's confirmed there should be NO problem given this puzzle's input size. Filename: day04.py Line # Mem usage Increment Occurrences Line Contents ============================================================= 27 43.758 MiB 43.758 MiB 1 @profile 28 def part2(file): 29 43.762 MiB 0.004 MiB 1 ans = 0 30 31 43.770 MiB 0.000 MiB 1001 for line in open(file): 32 43.770 MiB 0.004 MiB 1000 a, b, x, y = map(int, line.replace(",", "-").split("-")) 33 43.770 MiB 0.000 MiB 1000 if set(range(a, b + 1)) & set(range(x, y + 1)): 34 43.770 MiB 0.004 MiB 847 ans += 1 35 36 43.770 MiB 0.000 MiB 1 return ans
Can we reduce the time complexity here?
I have an AoC problem where I have been given the data below: data = """2-4,6-8 2-3,4-5 5-7,7-9 2-8,3-7 6-6,4-6 2-6,4-8""" I need to find the number of pairs which fully contain another pair. For example, 2-8 fully contains 3-7, and 6-6 is fully contained by 4-6. I have solved it using the below code: def aoc_part1(self, data): counter = 0 for lines_data in data.splitlines(): lines_data = lines_data.strip() first_range, second_range = self.__get_first_second_list_of_elements(lines_data) check_first_side_if_returns_true = all(item in first_range for item in second_range) check_second_side_if_returns_true = all(item in second_range for item in first_range) if check_first_side_if_returns_true or check_second_side_if_returns_true: counter += 1 return counter def __get_first_second_list_of_elements(self, data): first_elf, second_elf = data.split(",")[0], data.split(",")[1] first_range_start, first_range_end = map(int, first_elf.split("-")) second_range_start, second_range_end = map(int, second_elf.split("-")) first_range = list(range(first_range_start, first_range_end + 1)) second_range = list(range(second_range_start, second_range_end + 1)) return first_range, second_range I was just wondering about the time complexity here. I think it should be a brute force here because for every iteration all will run another loop. How can I optimize this solution in order to get linear time complexity? first_range and second_range are of int types. check_first_side_if_returns_true and check_second_side_if_returns_true are the boolean variables that check if the list is entirely contained or not. Based on that, it returns True or False.
[ "Your solution looks pretty complicated. Why not do something like:\ndata = \"\"\"2-4,6-8\n2-3,4-5\n5-7,7-9\n2-8,3-7\n6-6,4-6\n2-6,4-8\n\"\"\"\n\ndef included(line):\n (a1, b1), (a2, b2) = (map(int, pair.split(\"-\")) for pair in line.strip().split(\",\"))\n return (a1 <= a2 and b2 <= b1) or (a2 <= a1 and b1 <= b2)\n\nprint(sum(included(line) for line in data.splitlines()))\n\nI did some timing with my AoC-input for day 4 (1,000 lines):\nfrom timeit import timeit\n\n# Extract the interval boundaries for the pairs\nboundaries = [\n [tuple(map(int, pair.split(\"-\"))) for pair in line.strip().split(\",\")]\n for line in data.splitlines()\n]\n\n# Version 1 with simple comparison of boundaries\ndef test1(boundaries):\n def included(pairs):\n (a1, b1), (a2, b2) = pairs\n return (a1 <= a2 and b2 <= b1) or (a2 <= a1 and b1 <= b2)\n \n return sum(included(pairs) for pairs in boundaries)\n\n# Version 2 with range-subset test\ndef test2(boundaries):\n def included(pairs):\n (a1, b1), (a2, b2) = pairs\n numbers1, numbers2 = set(range(a1, b1 + 1)), set(range(a2, b2 + 1))\n return numbers1 <= numbers2 or numbers2 <= numbers1\n \n return sum(included(pairs) for pairs in boundaries)\n\n# Test for identical result\nprint(test1(boundaries) == test2(boundaries))\n\n# Timing\nfor i in 1, 2:\n t = timeit(f\"test{i}(boundaries)\", globals=globals(), number=1_000)\n print(f\"Duration version {i}: {t:.1f} seconds\")\n\nResult here, on a mediocre machine (repl.it):\nDuration version 1: 0.4 seconds\nDuration version 2: 5.4 seconds\n\n", "You're prob. making it overcomplicated in the current approach. If you split the pairs to two sets - eg. a, and b then you could easily do a set ops. to check if there is overlapping. That should be faster than yours.\nSomething like this one-line:\n # some input reading, and split to a, b sets.\n # count = 0\n\n if set(range(a, b + 1)) & set(range(x, y + 1)):\n count += 1 # that's part1 answer.\n\n\n# part 2\nfor line in open('04.in'):\n a, b, x, y = map(int, line.replace(\",\", \"-\").split(\"-\"))\n if set(range(a, b + 1)) & set(range(x, y + 1)):\n ans += 1\n\nThere are questions about the memory efficiency about this approach earlier, I've run some profiling and this is the result to share - it's confirmed there should be NO problem given this puzzle's input size.\nFilename: day04.py\n\nLine # Mem usage Increment Occurrences Line Contents\n=============================================================\n 27 43.758 MiB 43.758 MiB 1 @profile\n 28 def part2(file):\n 29 43.762 MiB 0.004 MiB 1 ans = 0\n 30\n 31 43.770 MiB 0.000 MiB 1001 for line in open(file):\n 32 43.770 MiB 0.004 MiB 1000 a, b, x, y = map(int, line.replace(\",\", \"-\").split(\"-\"))\n 33 43.770 MiB 0.000 MiB 1000 if set(range(a, b + 1)) & set(range(x, y + 1)):\n 34 43.770 MiB 0.004 MiB 847 ans += 1\n 35\n 36 43.770 MiB 0.000 MiB 1 return ans\n\n" ]
[ 1, 0 ]
[]
[]
[ "python", "python_3.x" ]
stackoverflow_0074675785_python_python_3.x.txt
Q: I tried the problem Leetcode-724 (Pivot index), Please help me correct my approach Given an array of integers nums, calculate the pivot index of this array. The pivot index is the index where the sum of all the numbers strictly to the left of the index is equal to the sum of all the numbers strictly to the index's right. If the index is on the left edge of the array, then the left sum is 0 because there are no elements to the left. This also applies to the right edge of the array. Return the leftmost pivot index. If no such index exists, return -1. class Solution { public int sumA(int a, int b, int[] s){ int res=0; for(int i = a; i<b; i++){ res= res + s[i]; } return res; } public int pivotIndex(int[] nums) { int sum = 0; int i=0; int flag = 0; int x = nums.length; sum= sumA(0, x, nums); for(i = 1; i < nums.length; i++){ if((sum - nums[i] - sumA(0, i-1, nums)) == (sumA(0, i-1, nums))){ flag=1; break; } } if(flag == 1) return i; else if((i==0) ||(i==x)) return 0; else return -1; } } A: The given code computes the sum of all elements in the array and then checks for each index if the sum of elements to the left of that index is equal to the sum of elements to the right of that index. If this is the case, it returns the index. There are several issues with this approach: The method sumA is not used, so it can be removed. The method does not check if the pivot index is on the left or right edge of the array, so it can return incorrect results in these cases. The method only checks for the first pivot index and returns it, but there can be multiple pivot indices in an array. It should return the leftmost pivot index. To fix these issues, the following changes can be made to the code: Initialize a variable leftSum to 0 and a variable rightSum to the sum of all elements in the array. Iterate over the elements in the array from left to right. For each index, add the element at that index to leftSum and subtract it from rightSum. If leftSum is equal to rightSum, return the index. If the end of the array is reached without finding a pivot index, return -1. The updated code would look like this: class Solution { public int pivotIndex(int[] nums) { // Initialize leftSum to 0 and rightSum to the sum of all elements in the array int leftSum = 0; int rightSum = 0; for (int num : nums) { rightSum += num; } // Iterate over the elements in the array from left to right for (int i = 0; i < nums.length; i++) { // Add the element at the current index to leftSum and subtract it from rightSum leftSum += nums[i]; rightSum -= nums[i]; // If leftSum is equal to rightSum, return the index if (leftSum == rightSum) { return i; } } // If the end of the array is reached without finding a pivot index, return -1 return -1; } }
I tried the problem Leetcode-724 (Pivot index), Please help me correct my approach
Given an array of integers nums, calculate the pivot index of this array. The pivot index is the index where the sum of all the numbers strictly to the left of the index is equal to the sum of all the numbers strictly to the index's right. If the index is on the left edge of the array, then the left sum is 0 because there are no elements to the left. This also applies to the right edge of the array. Return the leftmost pivot index. If no such index exists, return -1. class Solution { public int sumA(int a, int b, int[] s){ int res=0; for(int i = a; i<b; i++){ res= res + s[i]; } return res; } public int pivotIndex(int[] nums) { int sum = 0; int i=0; int flag = 0; int x = nums.length; sum= sumA(0, x, nums); for(i = 1; i < nums.length; i++){ if((sum - nums[i] - sumA(0, i-1, nums)) == (sumA(0, i-1, nums))){ flag=1; break; } } if(flag == 1) return i; else if((i==0) ||(i==x)) return 0; else return -1; } }
[ "The given code computes the sum of all elements in the array and then checks for each index if the sum of elements to the left of that index is equal to the sum of elements to the right of that index. If this is the case, it returns the index.\nThere are several issues with this approach:\nThe method sumA is not used, so it can be removed.\nThe method does not check if the pivot index is on the left or right edge of the array, so it can return incorrect results in these cases.\nThe method only checks for the first pivot index and returns it, but there can be multiple pivot indices in an array. It should return the leftmost pivot index.\nTo fix these issues, the following changes can be made to the code:\nInitialize a variable leftSum to 0 and a variable rightSum to the sum of all elements in the array.\nIterate over the elements in the array from left to right.\nFor each index, add the element at that index to leftSum and subtract it from rightSum.\nIf leftSum is equal to rightSum, return the index.\nIf the end of the array is reached without finding a pivot index, return -1.\nThe updated code would look like this:\nclass Solution {\n public int pivotIndex(int[] nums) {\n // Initialize leftSum to 0 and rightSum to the sum of all elements in the array\n int leftSum = 0;\n int rightSum = 0;\n for (int num : nums) {\n rightSum += num;\n }\n\n // Iterate over the elements in the array from left to right\n for (int i = 0; i < nums.length; i++) {\n // Add the element at the current index to leftSum and subtract it from rightSum\n leftSum += nums[i];\n rightSum -= nums[i];\n\n // If leftSum is equal to rightSum, return the index\n if (leftSum == rightSum) {\n return i;\n }\n }\n\n // If the end of the array is reached without finding a pivot index, return -1\n return -1;\n }\n}\n\n" ]
[ 0 ]
[]
[]
[ "java" ]
stackoverflow_0074679907_java.txt
Q: Automatically load newly inserted rows (SQL Server database) in ASP.NET webforms front-end? I have a SQL Server database with one table and I am doing operations using Entity Framework 6 in C#. I have a jQuery in my frontend which calls a method in the C# backend and loads the data from the database using Entity Framework. Now I want to invoke this method automatically as soon my table in database gets any new rows / data. I don't want to use SignalR, wants to keep it simple as possible. I am still figuring out the best possible and easiest way to do this. If I add a trigger on my table in SQL Server, then how it will invoke my jQuery method or C# method? A: One option to automatically load newly inserted rows from a SQL database on your front-end webforms is to use a timer control. You can set the timer control to periodically check the database for new rows, and if any are found, it can load them onto your webform. To implement this, you would first need to add a Timer control to your webform. Then, in the code behind file for your webform (e.g. in a .aspx.cs file), you can add an event handler for the Timer control's Tick event. This event will be raised periodically according to the interval that you set for the Timer control. In the event handler for the Tick event, you can use Entity Framework to query the database for any new rows. If any are found, you can use jQuery to update the webform with the new data. You may also need to add a trigger to your SQL database table that updates a timestamp column whenever a new row is inserted. This will allow you to only query for rows that have been inserted since the last time the Timer control's Tick event was raised. Overall, using a Timer control to periodically check the database for new rows is a relatively simple approach that does not require using SignalR. However, keep in mind that this approach may not be suitable for high-traffic or real-time applications, as it may not be able to keep up with the rate of incoming data. In those cases, you may want to consider using a different approach such as SignalR.
Automatically load newly inserted rows (SQL Server database) in ASP.NET webforms front-end?
I have a SQL Server database with one table and I am doing operations using Entity Framework 6 in C#. I have a jQuery in my frontend which calls a method in the C# backend and loads the data from the database using Entity Framework. Now I want to invoke this method automatically as soon my table in database gets any new rows / data. I don't want to use SignalR, wants to keep it simple as possible. I am still figuring out the best possible and easiest way to do this. If I add a trigger on my table in SQL Server, then how it will invoke my jQuery method or C# method?
[ "One option to automatically load newly inserted rows from a SQL database on your front-end webforms is to use a timer control. You can set the timer control to periodically check the database for new rows, and if any are found, it can load them onto your webform.\nTo implement this, you would first need to add a Timer control to your webform. Then, in the code behind file for your webform (e.g. in a .aspx.cs file), you can add an event handler for the Timer control's Tick event. This event will be raised periodically according to the interval that you set for the Timer control.\nIn the event handler for the Tick event, you can use Entity Framework to query the database for any new rows. If any are found, you can use jQuery to update the webform with the new data.\nYou may also need to add a trigger to your SQL database table that updates a timestamp column whenever a new row is inserted. This will allow you to only query for rows that have been inserted since the last time the Timer control's Tick event was raised.\nOverall, using a Timer control to periodically check the database for new rows is a relatively simple approach that does not require using SignalR. However, keep in mind that this approach may not be suitable for high-traffic or real-time applications, as it may not be able to keep up with the rate of incoming data. In those cases, you may want to consider using a different approach such as SignalR.\n" ]
[ 0 ]
[]
[]
[ "c#", "entity_framework", "jquery", "sql_server", "triggers" ]
stackoverflow_0074679860_c#_entity_framework_jquery_sql_server_triggers.txt
Q: Not able to pull from nested array and query return sub-document using MongoTemplate I am using mongodb in springboot. And here is a part of my data: { "topic": [ { "_topicId": "5e5e4d4bb431502946c15342", "name": "testName0", "username": "test0", "date": 1583238474961, "reply": [ { "_replyId": "38d29dcb-1a79-4788-b721-5fbe700cc99d", "username": "test0", "content": "reply0", "date": 1583240780072 }, { "_replyId": "07a0293a-22a1-45fb-9aa2-775fa24e9915", "username": "test1", "content": "reply1", "date": 1583240955561 } ] }, { "_topicId": "5e5e4d4bb431502946c15343", "name": "testName1", "username": "test1", "date": 1583238475241, "reply": [] } ] } I have two problems: (1) I try to pull a reply(a object of in java) from a topic, I try these queries: Query query = Query.query(Criteria.where("_topicId").is(topicId)); Update update = new Update().pull("reply.$._replyId", topicReplyId); mongoTemplate.updateFirst(query, update, "colletionName"); And I got a error The positional operator did not find the match needed from the query Query query = Query.query(Criteria.where("_topicId").is(topicId)); Update update = new Update().pull("reply._replyId", topicReplyId); mongoTemplate.updateFirst(query, update, "colletionName"); And I got a error Cannot use the part (_replyId) of (reply._replyId) to traverse the element Then I decide to use the third way: Query query = Query.query(Criteria.where("_topicId").is(topicId)); Update update = new Update().pull("reply", replyEntity); mongoTemplate.updateFirst(query, update, "colletionName"); I try to new a ReplyEntity replyEntity, and I got my second problem: (2) How can I get the subdocument from a document? Query query = Query.query(Criteria.where("_topicId").is(topicId).and("reply._replyId").is(replyId)); TopicEntity t = mongoTemplate.findOne(query, TopicEntity.class, "colletionName"); I used the query but I get the outer-document(topic), include two reply on the example above of topic1. I just want the reply,how can make it? Thanks a lot. A: (1) Update (pull) reply array element: This code will update the document; that is removes the specific element (sub-document) from the reply array: // Query criteria for topic and reply String topicId = "5e5e4d4bb431502946c15342"; String topicReplyId = "07a0293a-22a1-45fb-9aa2-775fa24e9915"; MongoOperations mongoTemplate = new MongoTemplate(MongoClients.create(), "test"); Query query = Query.query(Criteria .where("topic._topicId").is(topicId) .and("topic.reply._replyId").is(topicReplyId)); Update update = new Update().pull("topic.$.reply", new Document("_replyId", topicReplyId)); mongoTemplate.updateFirst(query, update, "topics"); // "topics" is the collection name [ EDIT ADD ] (2) Aggregation query to get the reply document: db.topics.aggregate( [ { $unwind: "$topic" }, { $match: { "topic._topicId": topicId } }, { $unwind: "$topic.reply" }, { $match: { "topic.reply._replyId": topicReplyId } }, { $project: { _id: 0, reply: "$topic.reply" } } ] ).pretty() This returns: { "reply" : { "_replyId" : "07a0293a-22a1-45fb-9aa2-775fa24e9915", "username" : "test1", "content" : "reply1", "date" : 1583240955561 } }
Not able to pull from nested array and query return sub-document using MongoTemplate
I am using mongodb in springboot. And here is a part of my data: { "topic": [ { "_topicId": "5e5e4d4bb431502946c15342", "name": "testName0", "username": "test0", "date": 1583238474961, "reply": [ { "_replyId": "38d29dcb-1a79-4788-b721-5fbe700cc99d", "username": "test0", "content": "reply0", "date": 1583240780072 }, { "_replyId": "07a0293a-22a1-45fb-9aa2-775fa24e9915", "username": "test1", "content": "reply1", "date": 1583240955561 } ] }, { "_topicId": "5e5e4d4bb431502946c15343", "name": "testName1", "username": "test1", "date": 1583238475241, "reply": [] } ] } I have two problems: (1) I try to pull a reply(a object of in java) from a topic, I try these queries: Query query = Query.query(Criteria.where("_topicId").is(topicId)); Update update = new Update().pull("reply.$._replyId", topicReplyId); mongoTemplate.updateFirst(query, update, "colletionName"); And I got a error The positional operator did not find the match needed from the query Query query = Query.query(Criteria.where("_topicId").is(topicId)); Update update = new Update().pull("reply._replyId", topicReplyId); mongoTemplate.updateFirst(query, update, "colletionName"); And I got a error Cannot use the part (_replyId) of (reply._replyId) to traverse the element Then I decide to use the third way: Query query = Query.query(Criteria.where("_topicId").is(topicId)); Update update = new Update().pull("reply", replyEntity); mongoTemplate.updateFirst(query, update, "colletionName"); I try to new a ReplyEntity replyEntity, and I got my second problem: (2) How can I get the subdocument from a document? Query query = Query.query(Criteria.where("_topicId").is(topicId).and("reply._replyId").is(replyId)); TopicEntity t = mongoTemplate.findOne(query, TopicEntity.class, "colletionName"); I used the query but I get the outer-document(topic), include two reply on the example above of topic1. I just want the reply,how can make it? Thanks a lot.
[ "(1) Update (pull) reply array element:\nThis code will update the document; that is removes the specific element (sub-document) from the reply array:\n// Query criteria for topic and reply\nString topicId = \"5e5e4d4bb431502946c15342\";\nString topicReplyId = \"07a0293a-22a1-45fb-9aa2-775fa24e9915\";\n\nMongoOperations mongoTemplate = new MongoTemplate(MongoClients.create(), \"test\");\nQuery query = Query.query(Criteria\n .where(\"topic._topicId\").is(topicId)\n .and(\"topic.reply._replyId\").is(topicReplyId));\nUpdate update = new Update().pull(\"topic.$.reply\", new Document(\"_replyId\", topicReplyId));\nmongoTemplate.updateFirst(query, update, \"topics\"); // \"topics\" is the collection name\n\n\n\n[ EDIT ADD ]\n(2) Aggregation query to get the reply document:\ndb.topics.aggregate( [\n { $unwind: \"$topic\" },\n { $match: { \"topic._topicId\": topicId } },\n { $unwind: \"$topic.reply\" },\n { $match: { \"topic.reply._replyId\": topicReplyId } },\n { $project: { _id: 0, reply: \"$topic.reply\" } }\n] ).pretty()\n\nThis returns:\n{\n \"reply\" : {\n \"_replyId\" : \"07a0293a-22a1-45fb-9aa2-775fa24e9915\",\n \"username\" : \"test1\",\n \"content\" : \"reply1\",\n \"date\" : 1583240955561\n }\n}\n\n" ]
[ 2 ]
[ "Update update = new Update().pull(\"topic.$.reply\", new Document(\"_replyId\", topicReplyId));\n\nthere is one problem here,\nnew Document(\"_replyId\", topicReplyId), incase my key is \"id\", spring data mongo lib changes this value as \"_id\" while sending to mongo db.\n\ncan you pls tell how to fix this?\n" ]
[ -2 ]
[ "mongodb", "mongotemplate", "spring_data" ]
stackoverflow_0060510065_mongodb_mongotemplate_spring_data.txt
Q: How I can assign value to variable using a button? I'm trying to write a graphic calculator using buttons. How I can assign value to variable using a button? I wrote the code: from tkinter import * a=0 def button_0a(): a=0 return 0 button0= Button(kalkulator, text="0", command=przycisk_0a) button0.grid(row=1, column=0) Of course, it is only a fragment of the code, but it enough to describe my problem. It changes only variable a, but next time i would like to change variable b using the same button. A: przycisk_0a is your callback function bind to the button0 button, so you must define your function, but you have defined the button0 button which is nonsense. It must be like this: def przycisk_0a(): A: This changes the value of a and then changes the value of b the next time you press the button. from tkinter import * kalkulator = Tk() a=0 b=0 def button_0a(): b=0 return 0 def button_0a(): a=0 button0.configure(command = button_0b) return 0 button0= Button(kalkulator, text="0", command=button_0a) button0.grid(row=1, column=0) kalkulator.mainloop()
How I can assign value to variable using a button?
I'm trying to write a graphic calculator using buttons. How I can assign value to variable using a button? I wrote the code: from tkinter import * a=0 def button_0a(): a=0 return 0 button0= Button(kalkulator, text="0", command=przycisk_0a) button0.grid(row=1, column=0) Of course, it is only a fragment of the code, but it enough to describe my problem. It changes only variable a, but next time i would like to change variable b using the same button.
[ "przycisk_0a is your callback function bind to the button0 button, so you must define your function, but you have defined the button0 button which is nonsense. \nIt must be like this:\ndef przycisk_0a():\n\n", "This changes the value of a and then changes the value of b the next time you press the button.\nfrom tkinter import *\n\nkalkulator = Tk()\n\na=0\nb=0\n\ndef button_0a():\n b=0\n return 0\n\ndef button_0a():\n a=0\n button0.configure(command = button_0b)\n return 0\n\nbutton0= Button(kalkulator, text=\"0\", command=button_0a)\n\nbutton0.grid(row=1, column=0)\n\nkalkulator.mainloop()\n\n" ]
[ 0, 0 ]
[]
[]
[ "python", "python_3.x", "tkinter" ]
stackoverflow_0038443208_python_python_3.x_tkinter.txt
Q: fs::read_to_string(file_path) error 123: InvalidFilename works when hardcoded but fails when a variable with an equivalent string is passed in I have some text files in my project's folder on the same level as my src folder. I have a run function that gets user input and makes a readfile struct from it. pub fn run() { loop { let file = ReadFile::println_recieve_h("What is the filepath?"); let query = ReadFile::println_recieve_h("What phrase do you want to find?"); let readfile = ReadFile::new(&file, &query); ... Here is the helper function that I made to reduce redundancy (I believe I have isolated the issue here): fn println_recieve_h(print: &str) -> String { println!("{print}"); let mut input = String::new(); std::io::stdin().read_line(&mut input).unwrap(); input } Here is the function that invokes the point of failure pub struct ReadFile{ query: String, file_path: String, contents: String } impl ReadFile { //Builds a readfile struct with a path and phrase. fn new(file_path: &String, phrase: &String) -> ReadFile{ use std::fs; ReadFile { query: phrase.clone(), file_path: file_path.clone(), contents: fs::read_to_string(file_path).expect("ERROR 003: FILE NOT FOUND"), } } contents: fs::read_to_string(file_path).expect("ERROR 003: FILE NOT FOUND") fails with the use case however succeeds with hardcoded string slice (I have tried converting the string reference to a slice). Example: contents: fs::read_to_string("longtxt1.txt").expect("ERROR 003: FILE NOT FOUND"). Does anyone see the problem that is causing this? I am new to Rust. I have tried converting the reference to a string slice, cloning the reference, etc. I made sure there are no spelling errors when I do my inputs in the console. I have hardcoded file in the run function to String::from("longtxt1.txt") which makes everything work. I believe this isolates the issue to my usage of the helper function. A: read_line includes a trailing newline in the returned string, which is not in your filename. You'll need to trim the returned string.
fs::read_to_string(file_path) error 123: InvalidFilename works when hardcoded but fails when a variable with an equivalent string is passed in
I have some text files in my project's folder on the same level as my src folder. I have a run function that gets user input and makes a readfile struct from it. pub fn run() { loop { let file = ReadFile::println_recieve_h("What is the filepath?"); let query = ReadFile::println_recieve_h("What phrase do you want to find?"); let readfile = ReadFile::new(&file, &query); ... Here is the helper function that I made to reduce redundancy (I believe I have isolated the issue here): fn println_recieve_h(print: &str) -> String { println!("{print}"); let mut input = String::new(); std::io::stdin().read_line(&mut input).unwrap(); input } Here is the function that invokes the point of failure pub struct ReadFile{ query: String, file_path: String, contents: String } impl ReadFile { //Builds a readfile struct with a path and phrase. fn new(file_path: &String, phrase: &String) -> ReadFile{ use std::fs; ReadFile { query: phrase.clone(), file_path: file_path.clone(), contents: fs::read_to_string(file_path).expect("ERROR 003: FILE NOT FOUND"), } } contents: fs::read_to_string(file_path).expect("ERROR 003: FILE NOT FOUND") fails with the use case however succeeds with hardcoded string slice (I have tried converting the string reference to a slice). Example: contents: fs::read_to_string("longtxt1.txt").expect("ERROR 003: FILE NOT FOUND"). Does anyone see the problem that is causing this? I am new to Rust. I have tried converting the reference to a string slice, cloning the reference, etc. I made sure there are no spelling errors when I do my inputs in the console. I have hardcoded file in the run function to String::from("longtxt1.txt") which makes everything work. I believe this isolates the issue to my usage of the helper function.
[ "read_line includes a trailing newline in the returned string, which is not in your filename. You'll need to trim the returned string.\n" ]
[ 0 ]
[]
[]
[ "makefile", "rust" ]
stackoverflow_0074679915_makefile_rust.txt
Q: How can I get the maximum range that exists in an arbitrary list of integers? I'm new to prolog, I don't understand much of the language and I had already posted a question about Prolog before. Now I want to obtain, from a list of integers, the numbers in the interval that contains the largest amount of numbers in that list, in other words the maximum range. Example: maxrange(X,Y,[1,3,2,7,4,5,6,9,8]). X = 1, Y= 10. maxrange(1,10,[1,3,2,7,4,5,6,9,8]. True. maxrange(1,8,[1,3,2,7,4,5,6,9,8]. False. Lists must contain all numbers between [X,Y) and must be the maximum interval. A: Do you mean a simple min & max of the list: list_min_max([H|T], Min, Max) :- list_min_max_(T, H, H, Min, Max). list_min_max_([], Min, Max, Min, Max). list_min_max_([H|T], Min0, Max0, Min, Max) :- min(H, Min0, Min1), max(H, Max0, Max1), list_min_max_(T, Min1, Max1, Min, Max). Results in swi-prolog: ?- list_min_max([4,9,2,6], Min, Max). Min = 2, Max = 9. A: Try this code: maxrange(A, B, List) :- maxranges(List, Ranges), member(A-B, Ranges). maxranges(List, Ranges) :- sort(List, [Min|Sorted]), maxranges_loop(Sorted, Min, Min, [], Ranges). maxranges_loop([], A, B, Ranges, UpdatedRanges) :- update(Ranges, A, B, UpdatedRanges). maxranges_loop([X|Xs], A, B, Ranges, UpdatedRanges) :- Bool is abs(sign(X - (B + 1))), maxranges_case(Bool, X, Xs, A, B, Ranges, UpdatedRanges). maxranges_case(0, X, Xs, A, _, Ranges, UpdatedRanges) :- maxranges_loop(Xs, A, X, Ranges, UpdatedRanges). maxranges_case(1, X, Xs, A, B, Ranges, UpdatedRanges) :- maxranges_loop(Xs, X, X, Ranges, NewRanges), update(NewRanges, A, B, UpdatedRanges). update([], A, B0, [A-B]) :- B is B0+1. update([A0-B0|Ranges], A, B, UpdatedRanges) :- B1 is B+1, Sign is sign((B1-A)-(B0-A0)), update_case(Sign, [A0-B0|Ranges], A, B1, UpdatedRanges). update_case( 0, Ranges, A, B, [A-B|Ranges]). update_case(+1, _, A, B, [A-B]). update_case(-1, Ranges, _, _, Ranges). Examples: ?- maxrange(X, Y, [1,3,2,7,4,5,6,9,8]). X = 1, Y = 10. ?- maxrange(1, 10, [1,3,2,7,4,5,6,9,8]). true. ?- maxrange(1, 8, [1,3,2,7,4,5,6,9,8]). false. ?- maxrange(A, B, [1,2, 4,5,6,7, 9,10,11, 14,15,16,17]). A = 4, B = 8 ; A = 14, B = 18. ?- maxranges([1,2, 4,5,6,7, 9,10,11, 14,15,16,17], R). R = [4-8, 14-18]. EXPLANATION The predicate update_case/5 updates a list of maximum ranges, with respect to a new range [A..B), written in Prolog as A-B. If its first argument is 0, then the new range [A..B) is added to that list; if its first argument is +1, that list changes to a new list containing only the range [A..B); and, its first argument is -1, then that list is kept unchanged. ?- update_case(0, [14-18], 5, 9, R). R = [5-9, 14-18]. ?- update_case(+1, [14-18], 1, 9, R). R = [1-9]. ?- update_case(-1, [14-18], 3, 7, R). R = [14-18]. The predicate update/4 updates a list of maximum ranges with respect to a new range [A..B]. First, it transforms the closed range [A..B] into a corresponding right-open range [A..B+1). Then, if that list is empty, the predicate adds the new range to it. Otherwise, the predicate compares the range [A..B+1) with the first range in that list, say [A0..B0). The comparison is made by the expression Sign is sign((B+1-A)-(B0-A0). If both ranges are equal, then Sign is 0; if the first range is bigger than the second, then Sign is +1; and, if the first range is lesser than the second, then Sign is -1. The result of the comparision is used to call the predicate update_case/5. ?- update([], 17, 19, R). R = [17-20]. ?- update([17-20], 11, 13, R). % equal R = [11-14, 17-20]. ?- update([11-14, 17-20], 5, 9, R). % bigger R = [5-10]. ?- update([5-10], 2, 3, R). % lesser R = [5-10]. The predicate maxranges_loop/5 traverses a sorted list (without duplicates) and finds all ranges that are part of it. For each range found, the list of current maximum ranges is updated with a call to the predicate update/4. The predicate maxranges/2 is just a wrapper to the predicate maxranges_loop/5, that ensures the list is sorted and does not contain duplicates. The predicate maxrange/3 call maxranges/2 to get the list of maximum ranges and checks whether the range [A..B) belongs to it. A: Borra esta wea antes que te pillen los ayudantes xd
How can I get the maximum range that exists in an arbitrary list of integers?
I'm new to prolog, I don't understand much of the language and I had already posted a question about Prolog before. Now I want to obtain, from a list of integers, the numbers in the interval that contains the largest amount of numbers in that list, in other words the maximum range. Example: maxrange(X,Y,[1,3,2,7,4,5,6,9,8]). X = 1, Y= 10. maxrange(1,10,[1,3,2,7,4,5,6,9,8]. True. maxrange(1,8,[1,3,2,7,4,5,6,9,8]. False. Lists must contain all numbers between [X,Y) and must be the maximum interval.
[ "Do you mean a simple min & max of the list:\nlist_min_max([H|T], Min, Max) :-\n list_min_max_(T, H, H, Min, Max).\n \nlist_min_max_([], Min, Max, Min, Max).\nlist_min_max_([H|T], Min0, Max0, Min, Max) :-\n min(H, Min0, Min1),\n max(H, Max0, Max1),\n list_min_max_(T, Min1, Max1, Min, Max).\n\nResults in swi-prolog:\n?- list_min_max([4,9,2,6], Min, Max).\nMin = 2,\nMax = 9.\n\n", "Try this code:\nmaxrange(A, B, List) :-\n maxranges(List, Ranges),\n member(A-B, Ranges).\n\nmaxranges(List, Ranges) :-\n sort(List, [Min|Sorted]),\n maxranges_loop(Sorted, Min, Min, [], Ranges).\n\nmaxranges_loop([], A, B, Ranges, UpdatedRanges) :-\n update(Ranges, A, B, UpdatedRanges).\nmaxranges_loop([X|Xs], A, B, Ranges, UpdatedRanges) :-\n Bool is abs(sign(X - (B + 1))),\n maxranges_case(Bool, X, Xs, A, B, Ranges, UpdatedRanges).\n\nmaxranges_case(0, X, Xs, A, _, Ranges, UpdatedRanges) :-\n maxranges_loop(Xs, A, X, Ranges, UpdatedRanges).\nmaxranges_case(1, X, Xs, A, B, Ranges, UpdatedRanges) :-\n maxranges_loop(Xs, X, X, Ranges, NewRanges),\n update(NewRanges, A, B, UpdatedRanges).\n\nupdate([], A, B0, [A-B]) :- B is B0+1.\nupdate([A0-B0|Ranges], A, B, UpdatedRanges) :-\n B1 is B+1,\n Sign is sign((B1-A)-(B0-A0)),\n update_case(Sign, [A0-B0|Ranges], A, B1, UpdatedRanges).\n\nupdate_case( 0, Ranges, A, B, [A-B|Ranges]).\nupdate_case(+1, _, A, B, [A-B]).\nupdate_case(-1, Ranges, _, _, Ranges).\n\nExamples:\n?- maxrange(X, Y, [1,3,2,7,4,5,6,9,8]).\nX = 1,\nY = 10.\n\n?- maxrange(1, 10, [1,3,2,7,4,5,6,9,8]).\ntrue.\n\n?- maxrange(1, 8, [1,3,2,7,4,5,6,9,8]).\nfalse.\n\n?- maxrange(A, B, [1,2, 4,5,6,7, 9,10,11, 14,15,16,17]).\nA = 4,\nB = 8 ;\nA = 14,\nB = 18.\n\n?- maxranges([1,2, 4,5,6,7, 9,10,11, 14,15,16,17], R).\nR = [4-8, 14-18].\n\nEXPLANATION\n\nThe predicate update_case/5 updates a list of maximum ranges, with respect to a new range [A..B), written in Prolog as A-B. If its first argument is 0, then the new range [A..B) is added to that list; if its first argument is +1, that list changes to a new list containing only the range [A..B); and, its first argument is -1, then that list is kept unchanged.\n\n?- update_case(0, [14-18], 5, 9, R).\nR = [5-9, 14-18].\n\n?- update_case(+1, [14-18], 1, 9, R).\nR = [1-9].\n\n?- update_case(-1, [14-18], 3, 7, R).\nR = [14-18].\n\n\nThe predicate update/4 updates a list of maximum ranges with respect to a new range [A..B]. First, it transforms the closed range [A..B] into a corresponding right-open range [A..B+1). Then, if that list is empty, the predicate adds the new range to it. Otherwise, the predicate compares the range [A..B+1) with the first range in that list, say [A0..B0). The comparison is made by the expression Sign is sign((B+1-A)-(B0-A0). If both ranges are equal, then Sign is 0; if the first range is bigger than the second, then Sign is +1; and, if the first range is lesser than the second, then Sign is -1. The result of the comparision is used to call the predicate update_case/5.\n\n?- update([], 17, 19, R).\nR = [17-20].\n\n?- update([17-20], 11, 13, R). % equal\nR = [11-14, 17-20].\n\n?- update([11-14, 17-20], 5, 9, R). % bigger\nR = [5-10].\n\n?- update([5-10], 2, 3, R). % lesser\nR = [5-10].\n\n\nThe predicate maxranges_loop/5 traverses a sorted list (without duplicates) and finds all ranges that are part of it. For each range found, the list of current maximum ranges is updated with a call to the predicate update/4.\n\nThe predicate maxranges/2 is just a wrapper to the predicate maxranges_loop/5, that ensures the list is sorted and does not contain duplicates.\n\nThe predicate maxrange/3 call maxranges/2 to get the list of maximum ranges and checks whether the range [A..B) belongs to it.\n\n\n", "Borra esta wea antes que te pillen los ayudantes xd\n" ]
[ 0, 0, 0 ]
[]
[]
[ "prolog" ]
stackoverflow_0074632445_prolog.txt
Q: Unable to update the discount command to subclass value since bas class value is set to 0 I've been trying to see if I can change the discount of an item that is necessary for computation. My challenge is that, I cannot update the discount since it was set to 0. class Dog: def __init__(self, food, amount, cost, discount=0): self.food = food self.amount = amount self.cost = cost self.discount = discount if self.discount == 0: self.cost = self.amount *100 else: self.cost = self.amount * 100 * (1-self.discount) class Malamute(Dog): def __init__(self, food, amount, cost, behavior, discount=0): super().__init__(food, amount, cost, discount=0) self.behavior = behavior if self.behavior == "very good": self.discount = 0.20 if self.behavior == "good": self.discount = 0.10 if self.behavior == "bad": self.discount = 0 class Golden(Dog): def __init__(self, food, amount, cost, damage, discount=0): super().__init__(food, amount, cost, discount=0) self.damage = damage self.discount = -self.damage class Golden_Malamute(Malamute,Golden): def __init__(self, food, amount, cost, behavior, damage, discount=0): Malamute().__init__(self,food, amount, cost, behavior, discount=0) Golden().__init__(self,food, amount, cost, damage, discount=0) self.discount=discount Brownie = Dog("Pellet", 10, 0,) print("Brownie", Brownie.cost) Mala=Malamute("Pellet",10,0,"good") print("Mala",Mala.cost) Goldie=Golden("Pellet",10,0, 0.10) print("Goldei",Goldie.cost) #Blackie=Golden_Malamute("Pellet", 10, 5, "good", 0.05) #print("Blackie", Blackie.cost) When there should be a discount, it does not directly apply since the discoutn is set to zero. I am unbale to shift the commant to other sub classes as there are instances where dog itslef will be called and if a subclass is called, it will have to undergo two processes. A: You might need to try the technique of walking through your program and speaking it out loud. For example, this is how I read your listing and how I can detect an issue. I create a new GoldenDog The GoldenDog calls the super.__init The super init calculates the cost based on the discount of zero I then run the rest of the GoldenDog.__init I set the discount to 0.10 The problem is you are setting the discount after the cost has been calculated. To solve this, you ought to calculate the cost when a caller asks for the cost. That way, it will be able to use the current value of the discount (rather than only being able to use the discount that applied when created). Alternatively, you need to pass the discount to the super.__init call. class Golden(Dog): def __init__(self, food, amount, cost, damage, discount=0): super().__init__(food, amount, cost, discount=-damage) self.damage = damage Your next task will be to fix the discount logic as it currently increases the price, rather than reducing it.
Unable to update the discount command to subclass value since bas class value is set to 0
I've been trying to see if I can change the discount of an item that is necessary for computation. My challenge is that, I cannot update the discount since it was set to 0. class Dog: def __init__(self, food, amount, cost, discount=0): self.food = food self.amount = amount self.cost = cost self.discount = discount if self.discount == 0: self.cost = self.amount *100 else: self.cost = self.amount * 100 * (1-self.discount) class Malamute(Dog): def __init__(self, food, amount, cost, behavior, discount=0): super().__init__(food, amount, cost, discount=0) self.behavior = behavior if self.behavior == "very good": self.discount = 0.20 if self.behavior == "good": self.discount = 0.10 if self.behavior == "bad": self.discount = 0 class Golden(Dog): def __init__(self, food, amount, cost, damage, discount=0): super().__init__(food, amount, cost, discount=0) self.damage = damage self.discount = -self.damage class Golden_Malamute(Malamute,Golden): def __init__(self, food, amount, cost, behavior, damage, discount=0): Malamute().__init__(self,food, amount, cost, behavior, discount=0) Golden().__init__(self,food, amount, cost, damage, discount=0) self.discount=discount Brownie = Dog("Pellet", 10, 0,) print("Brownie", Brownie.cost) Mala=Malamute("Pellet",10,0,"good") print("Mala",Mala.cost) Goldie=Golden("Pellet",10,0, 0.10) print("Goldei",Goldie.cost) #Blackie=Golden_Malamute("Pellet", 10, 5, "good", 0.05) #print("Blackie", Blackie.cost) When there should be a discount, it does not directly apply since the discoutn is set to zero. I am unbale to shift the commant to other sub classes as there are instances where dog itslef will be called and if a subclass is called, it will have to undergo two processes.
[ "You might need to try the technique of walking through your program and speaking it out loud.\nFor example, this is how I read your listing and how I can detect an issue.\n\nI create a new GoldenDog\nThe GoldenDog calls the super.__init\nThe super init calculates the cost based on the discount of zero\nI then run the rest of the GoldenDog.__init\nI set the discount to 0.10\n\nThe problem is you are setting the discount after the cost has been calculated.\nTo solve this, you ought to calculate the cost when a caller asks for the cost. That way, it will be able to use the current value of the discount (rather than only being able to use the discount that applied when created).\nAlternatively, you need to pass the discount to the super.__init call.\nclass Golden(Dog):\n def __init__(self, food, amount, cost, damage, discount=0):\n super().__init__(food, amount, cost, discount=-damage)\n self.damage = damage\n\nYour next task will be to fix the discount logic as it currently increases the price, rather than reducing it.\n" ]
[ 0 ]
[]
[]
[ "attributes", "class", "object", "oop", "python" ]
stackoverflow_0074677411_attributes_class_object_oop_python.txt
Q: Pipeline order for css bundles, jquery and .js files. Error: GET .net::ERR_ABORTED 500 In css bundles. (Internal Server Error) After deploying our application, I am getting a HTTP 500 when I load this page http://vpn.myweb.com/css/bundles/admin.css and app.css for the first time. http://vpn.myweb.com/css/bundles/admin.css gives a http 500 first time and then loads correctly. My observation in developer tools, when I load it for the first time, both app.css and admin.css are blank. How do I make this issue happen in debug mode? It happens only for QA or prod. GET http://vpn.myweb.com/css/bundles/admin.css net::ERR_ABORTED 500 (Internal Server Error) GET http://vpn.myweb.com/css/bundles/app.css net::ERR_ABORTED 500 (Internal Server Error) GET http://vpn.myweb.com/js/bundles/modernizr.js net::ERR_ABORTED 500 (Internal Server Error) GET http://vpn.myweb.com/js/bundles/jquery.js net::ERR_ABORTED 500 (Internal Server Error) GET http://vpn.myweb.com/js/bundles/Admin.js net::ERR_ABORTED 500 (Internal Server Error) GET http://vpn.myweb.com/js/bundles/bootstrap.js net::ERR_ABORTED 500 (Internal Server Error) jquery.validate.min.js:4 Uncaught ReferenceError: jQuery is not defined _Layout.cshtml <link rel="stylesheet" href="~/css/bundles/app.css" /> <link rel="stylesheet" href="~/css/bundles/admin.css" /> <script src="~/js/bundles/modernizr.js"></script> Program.cs builder.Services.AddWebOptimizer(pipeline => { pipeline.AddJavaScriptBundle("/js/bundles/datetimepicker.js", "/js/moment.min.js", "/js/bootstrap-datetimepicker.min.js"); pipeline.AddJavaScriptBundle("/js/bundles/dataTables.js", "/js/moment.min.js", "/js/DataTables/jquery.dataTables.min.js", "/js/DataTables/dataTables.buttons.min.js", "/js/DataTables/jszip.min.js", "/js/DataTables/buttons.html5.min.js", "/js/DataTables/dataTables.bootstrap.min.js", "/js/DataTables/datetime-moment.js", "/js/DataTables/dataTables.select.min.js"); pipeline.AddJavaScriptBundle("/js/bundles/modernizr.js", "/js/modernizr-2.8.3.js"); pipeline.AddJavaScriptBundle("/js/bundles/jqueryval.js", "/js/jquery.validate*"); pipeline.AddJavaScriptBundle("/js/bundles/bootstrap.js", "/js/bootstrap.min.js", "/js/respond.js"); pipeline.AddJavaScriptBundle("/js/bundles/admin.js", "/js/AdminLTE/adminlte.js", "/js/bootbox.min.js"); pipeline.AddCssBundle("/css/bundles/app.css", "/css/bootstrap.min.css", "/css/bootstrap4-classes.css", "/css/font-awesome.min.css", "/css/Site.css"); pipeline.AddCssBundle("/css/bundles/admin.css", "/css/AdminLTE.css", "/css/skins/skin-blue.css"); pipeline.AddCssBundle("/css/bundles/dt.css", "/datatables/css/dataTables.bootstrap.min.css", "/datatables/css/buttons.dataTables.min.css"); pipeline.AddCssBundle("/css/bundles/datetimepicker.css", "/css/bootstrap-datetimepicker.min.css"); }); A: This problem is usually solved by adding write permission to the AppPool user for the WebOptimizer cache directory. The cache directory can be set in appsettings.json file. See here for more info { "webOptimizer": { "enableCaching": true, "enableMemoryCache": true, "enableDiskCache": true, "cacheDirectory": "/var/temp/weboptimizercache", /*Set permission for this line*/ "enableTagHelperBundling": true, "cdnUrl": "https://my-cdn.com/", "allowEmptyBundle": false } } This link may also help for setting permissions on Linux. Also some people say you should give write permission to all script/css folders, but documents and my experience do not confirm that.
Pipeline order for css bundles, jquery and .js files. Error: GET .net::ERR_ABORTED 500 In css bundles. (Internal Server Error)
After deploying our application, I am getting a HTTP 500 when I load this page http://vpn.myweb.com/css/bundles/admin.css and app.css for the first time. http://vpn.myweb.com/css/bundles/admin.css gives a http 500 first time and then loads correctly. My observation in developer tools, when I load it for the first time, both app.css and admin.css are blank. How do I make this issue happen in debug mode? It happens only for QA or prod. GET http://vpn.myweb.com/css/bundles/admin.css net::ERR_ABORTED 500 (Internal Server Error) GET http://vpn.myweb.com/css/bundles/app.css net::ERR_ABORTED 500 (Internal Server Error) GET http://vpn.myweb.com/js/bundles/modernizr.js net::ERR_ABORTED 500 (Internal Server Error) GET http://vpn.myweb.com/js/bundles/jquery.js net::ERR_ABORTED 500 (Internal Server Error) GET http://vpn.myweb.com/js/bundles/Admin.js net::ERR_ABORTED 500 (Internal Server Error) GET http://vpn.myweb.com/js/bundles/bootstrap.js net::ERR_ABORTED 500 (Internal Server Error) jquery.validate.min.js:4 Uncaught ReferenceError: jQuery is not defined _Layout.cshtml <link rel="stylesheet" href="~/css/bundles/app.css" /> <link rel="stylesheet" href="~/css/bundles/admin.css" /> <script src="~/js/bundles/modernizr.js"></script> Program.cs builder.Services.AddWebOptimizer(pipeline => { pipeline.AddJavaScriptBundle("/js/bundles/datetimepicker.js", "/js/moment.min.js", "/js/bootstrap-datetimepicker.min.js"); pipeline.AddJavaScriptBundle("/js/bundles/dataTables.js", "/js/moment.min.js", "/js/DataTables/jquery.dataTables.min.js", "/js/DataTables/dataTables.buttons.min.js", "/js/DataTables/jszip.min.js", "/js/DataTables/buttons.html5.min.js", "/js/DataTables/dataTables.bootstrap.min.js", "/js/DataTables/datetime-moment.js", "/js/DataTables/dataTables.select.min.js"); pipeline.AddJavaScriptBundle("/js/bundles/modernizr.js", "/js/modernizr-2.8.3.js"); pipeline.AddJavaScriptBundle("/js/bundles/jqueryval.js", "/js/jquery.validate*"); pipeline.AddJavaScriptBundle("/js/bundles/bootstrap.js", "/js/bootstrap.min.js", "/js/respond.js"); pipeline.AddJavaScriptBundle("/js/bundles/admin.js", "/js/AdminLTE/adminlte.js", "/js/bootbox.min.js"); pipeline.AddCssBundle("/css/bundles/app.css", "/css/bootstrap.min.css", "/css/bootstrap4-classes.css", "/css/font-awesome.min.css", "/css/Site.css"); pipeline.AddCssBundle("/css/bundles/admin.css", "/css/AdminLTE.css", "/css/skins/skin-blue.css"); pipeline.AddCssBundle("/css/bundles/dt.css", "/datatables/css/dataTables.bootstrap.min.css", "/datatables/css/buttons.dataTables.min.css"); pipeline.AddCssBundle("/css/bundles/datetimepicker.css", "/css/bootstrap-datetimepicker.min.css"); });
[ "This problem is usually solved by adding write permission to the AppPool user for the WebOptimizer cache directory. The cache directory can be set in appsettings.json file. See here for more info\n{\n \"webOptimizer\": {\n \"enableCaching\": true,\n \"enableMemoryCache\": true,\n \"enableDiskCache\": true,\n \"cacheDirectory\": \"/var/temp/weboptimizercache\", /*Set permission for this line*/\n \"enableTagHelperBundling\": true,\n \"cdnUrl\": \"https://my-cdn.com/\",\n \"allowEmptyBundle\": false\n }\n}\n\nThis link may also help for setting permissions on Linux. Also some people say you should give write permission to all script/css folders, but documents and my experience do not confirm that.\n" ]
[ 0 ]
[]
[]
[ ".net_6.0", "jquery" ]
stackoverflow_0074609657_.net_6.0_jquery.txt
Q: How to display two different return values from if/elif in Python? I'm reading a csv file and appending the data into a list and later using another function, I'm calculating these numbers and try to return two values using if/elif statements. To display the result I have created a procedure called displayData(numbers) and here I'm struggling to show the calculated values from previous function called seyrogus(numbers) against the original csv values. Currently, it's showing all only "4" as result. Where I'm doing wrong? My code so far def getData(): numbers = [] file = open("numbers.csv","r") for line in file: data = line.strip() numbers.append(int(data.strip())) return numbers def seyrogus(numbers): for counter in range(len(numbers)): if numbers[counter] % 2 != 0: return (int(numbers[counter] * 3) + 1) elif numbers[counter] % 2 == 0: return (int(numbers[counter] / 2)) def displayData(numbers): print("Original Numbers \t Converted Numbers") for counter in range(len(numbers)): print(f"{numbers[counter]} \t \t \t {seyrogus(numbers)}") def main(): numbers = getData() displayData(numbers) main() output Original Numbers Converted Numbers 1 4 2 4 3 4 4 4 5 4 6 4 7 4 8 4 9 4 10 4 11 4 12 4 13 4 14 4 15 4 16 4 17 4 18 4 19 4 20 4 21 4 22 4 23 4 24 4 25 4 26 4 27 4 28 4 29 4 30 4 31 4 32 4 33 4 34 4 35 4 36 4 37 4 38 4 39 4 40 4 41 4 42 4 43 4 44 4 45 4 46 4 47 4 48 4 49 4 50 4 51 4 52 4 53 4 54 4 55 4 56 4 57 4 58 4 59 4 60 4 61 4 62 4 63 4 64 4 65 4 66 4 67 4 68 4 69 4 70 4 71 4 72 4 73 4 74 4 75 4 76 4 77 4 78 4 79 4 80 4 81 4 82 4 83 4 84 4 85 4 86 4 87 4 88 4 89 4 90 4 91 4 92 4 93 4 94 4 95 4 96 4 97 4 98 4 99 4 100 4 csv file 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 A: Your seyrogus function needs to return a list rather than returning a single value. The reason you're only getting 4 as the result is that every time you call it, it iterates over numbers from the beginning and then returns the first converted value rather than iterating over the entire list. Both getData and seyrogus can be implemented very simply as list comprehensions. You then need to iterate over both numbers and seyrogus(numbers) in parallel in displayData; an easy way of doing that is the zip function. def getData(): with open("numbers.csv") as file: return [int(line.strip()) for line in file] def seyrogus(numbers): return [n * 3 + 1 if n % 2 else n // 2 for n in numbers] def displayData(numbers): print("Original Numbers \t Converted Numbers") for original, converted in zip(numbers, seyrogus(numbers)): print(f"{original} \t \t \t {converted}") def main(): displayData(getData()) main() prints: Original Numbers Converted Numbers 1 4 2 1 3 10 4 2 5 16 6 3 7 22 8 4 9 28 10 5 11 34 etc.
How to display two different return values from if/elif in Python?
I'm reading a csv file and appending the data into a list and later using another function, I'm calculating these numbers and try to return two values using if/elif statements. To display the result I have created a procedure called displayData(numbers) and here I'm struggling to show the calculated values from previous function called seyrogus(numbers) against the original csv values. Currently, it's showing all only "4" as result. Where I'm doing wrong? My code so far def getData(): numbers = [] file = open("numbers.csv","r") for line in file: data = line.strip() numbers.append(int(data.strip())) return numbers def seyrogus(numbers): for counter in range(len(numbers)): if numbers[counter] % 2 != 0: return (int(numbers[counter] * 3) + 1) elif numbers[counter] % 2 == 0: return (int(numbers[counter] / 2)) def displayData(numbers): print("Original Numbers \t Converted Numbers") for counter in range(len(numbers)): print(f"{numbers[counter]} \t \t \t {seyrogus(numbers)}") def main(): numbers = getData() displayData(numbers) main() output Original Numbers Converted Numbers 1 4 2 4 3 4 4 4 5 4 6 4 7 4 8 4 9 4 10 4 11 4 12 4 13 4 14 4 15 4 16 4 17 4 18 4 19 4 20 4 21 4 22 4 23 4 24 4 25 4 26 4 27 4 28 4 29 4 30 4 31 4 32 4 33 4 34 4 35 4 36 4 37 4 38 4 39 4 40 4 41 4 42 4 43 4 44 4 45 4 46 4 47 4 48 4 49 4 50 4 51 4 52 4 53 4 54 4 55 4 56 4 57 4 58 4 59 4 60 4 61 4 62 4 63 4 64 4 65 4 66 4 67 4 68 4 69 4 70 4 71 4 72 4 73 4 74 4 75 4 76 4 77 4 78 4 79 4 80 4 81 4 82 4 83 4 84 4 85 4 86 4 87 4 88 4 89 4 90 4 91 4 92 4 93 4 94 4 95 4 96 4 97 4 98 4 99 4 100 4 csv file 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100
[ "Your seyrogus function needs to return a list rather than returning a single value. The reason you're only getting 4 as the result is that every time you call it, it iterates over numbers from the beginning and then returns the first converted value rather than iterating over the entire list.\nBoth getData and seyrogus can be implemented very simply as list comprehensions. You then need to iterate over both numbers and seyrogus(numbers) in parallel in displayData; an easy way of doing that is the zip function.\ndef getData():\n with open(\"numbers.csv\") as file:\n return [int(line.strip()) for line in file]\n\ndef seyrogus(numbers):\n return [n * 3 + 1 if n % 2 else n // 2 for n in numbers]\n\ndef displayData(numbers):\n print(\"Original Numbers \\t Converted Numbers\")\n for original, converted in zip(numbers, seyrogus(numbers)):\n print(f\"{original} \\t \\t \\t {converted}\")\n\ndef main():\n displayData(getData())\n\nmain()\n\nprints:\nOriginal Numbers Converted Numbers\n1 4\n2 1\n3 10\n4 2\n5 16\n6 3\n7 22\n8 4\n9 28\n10 5\n11 34\n\netc.\n" ]
[ 4 ]
[]
[]
[ "python" ]
stackoverflow_0074679896_python.txt
Q: 'PDOException' with message 'SQLSTATE[22001]: String data, right truncated: 0 NOTE: I have narrowed this problem down to specifically PDO because I am able to successfully prepare and execute statements using the odbc_* functions. Why can't I bind this parameter to the PDO prepared statement? This works: $mssqldriver = 'ODBC Driver 13 for SQL Server'; $pdoDB = new PDO("odbc:Driver=$mssqldriver;Server=$hostname;Database=$dbname", $username, $password); $pdoDB->setAttribute( PDO::ATTR_ERRMODE, PDO::ERRMODE_EXCEPTION ); $sql = "SELECT 'value' AS col where 'this' = 'this'"; $stmt = $pdoDB->prepare($sql); $params = []; $stmt->execute($params); print_r($stmt->fetch()); Array ( [col] => value [0] => value ) Does not work: $sql = "SELECT 'value' AS col where 'this' = ?"; $stmt = $pdoDB->prepare($sql); $params = ['this']; $stmt->execute($params); print_r($stmt->fetch()); Web Server is running PHP 5.5.9 on Linux Ubuntu 14.04 with ODBC Driver 13 for SQL Server and connecting to Microsoft SQL Server 2012 on Windows Server 2012 Here's the full error: Fatal error: Uncaught exception 'PDOException' with message 'SQLSTATE[22001]: String data, right truncated: 0 [Microsoft][ODBC Driver 13 for SQL Server] String data, right truncation (SQLExecute[0] at /build/buildd/php5-5.5.9+dfsg/ext/pdo_odbc/odbc_stmt.c:254)' in /var/www/scratch.php:46 Stack trace: #0 /var/www/scratch.php(46): PDOStatement->execute(Array) #1 {main} thrown in /var/www/scratch.php on line 46 I have also tried setting: $pdoDB->setAttribute( PDO::ATTR_EMULATE_PREPARES, true ); And using named parameters: $sql = "SELECT 'value' AS col where 'this' = :myVal"; $stmt = $pdoDB->prepare($sql); $params = ['myVal' => 'this']; $stmt->execute($params); print_r($stmt->fetch()); Even with an explicit colon: $params = [':myVal' => 'this']; I also tried just using bindParam as demonstrated in this answer: $sql = "SELECT 'value' AS col where 'this' = ?"; $stmt = $pdoDB->prepare($sql); $param = 'this'; $stmt->bindParam(1, $param); $stmt->execute(); print_r($stmt->fetch()); As well as with named parameters: $sql = "SELECT 'value' AS col where 'this' = :myVal"; $stmt = $pdoDB->prepare($sql); $param = 'this'; $stmt->bindParam(':myVal', $param, PDO::PARAM_STR); $stmt->execute(); print_r($stmt->fetch()); If I try to explicitly set the length: $stmt->bindParam(':myVal', $param, PDO::PARAM_STR, 4); I get a bonus error: Fatal error: Uncaught exception 'PDOException' with message 'SQLSTATE[42000]: Syntax error or access violation: 102 [Microsoft][ODBC Driver 13 for SQL Server][SQL Server] Incorrect syntax near 'OUTPUT'. And yes, all this is a trivialized example without tables so that you can easily reproduce it, but just to be sure, I have actually tried this with a real table. CREATE TABLE myTable ( id INT IDENTITY PRIMARY KEY, val NVARCHAR(255) ); INSERT INTO myTable (val) VALUES ('hello world'); Works: $sql = "SELECT * FROM myTable WHERE val = 'hello world'"; $stmt = $pdoDB->prepare($sql); $params = []; $stmt->execute($params); print_r($stmt->fetch()); Array ( [id] => 1 [0] => 1 [val] => hello world [1] => hello world ) Does not work: $sql = "SELECT * FROM myTable WHERE val = ?"; $stmt = $pdoDB->prepare($sql); $params = ['hello world']; $stmt->execute($params); print_r($stmt->fetch()); All paths lead to the same error: String data, right truncated A: Unfortunately, It's a PDO_ODBC 64-bit incompatibility problem (#61777, #64824) and without any doubts you are on a 64-bit build which doesn't allow you to bind parameters. Fortunately, It has a patch that was first included in the 5.6 release: This bug is also referenced in #61777 and is still present in the latest stable release of the 5.5 branch. I see two tickets exist for this problem already, and I'm just submitting these changes via github as a reminder that this is a serious problem for anyone using PDO_ODBC on the x64 builds. What is wrong with your PHP's shipped PDO_ODBC? By looking at one of those recommended patches: diff --git a/ext/pdo_odbc/odbc_stmt.c b/ext/pdo_odbc/odbc_stmt.c index 8b0ccf3..1d275cd 100644 --- a/ext/pdo_odbc/odbc_stmt.c +++ b/ext/pdo_odbc/odbc_stmt.c @@ -551,7 +551,7 @@ static int odbc_stmt_describe(pdo_stmt_t *stmt, int colno TSRMLS_DC) struct pdo_column_data *col = &stmt->columns[colno]; RETCODE rc; SWORD colnamelen; - SDWORD colsize; + SQLULEN colsize; SQLLEN displaysize; We see the only thing that's changed is SDWORD (16-bit signed integer) which is substituted with new ODBC type SQLULEN that is 64 bits in a 64-bit ODBC application and 32 bits in a 32-bit ODBC application. I believe committer wasn't aware of colsize data type only since in the very next line SQLLEN is defined properly. What should I do now? Upgrade to PHP version >= 5.6 Stick with odbc_* functions as a working solution. Compile a PHP v5.5.9 with provided patches. Build your own PDO wrapper as recommended by @GordonM A: This is probably not what you want to hear, but this has all the hallmarks of a bug in PHP's ODBC driver for PDO (which is not heavily used as PHP programmers tend to favour open source databases like MySQL/SQLite/Postgres over commercial offerings), or in the underlying SQL server driver (which is poorly supported in Linux, for similar reasons), though if odbc_* works then it's probably not the underlying driver. If you try to do the exact same tasks, except using "sqlite::memory:" as the DSN, all your examples work. This makes it highly unlikely that you're doing anything wrong (unless MS Server has some really weird non-conforming SQL syntax of which I'm not aware). Your examples work fine for me with SQLite when ATTR_EMULATE_PREPARES is both enabled and disabled. All I think you can realistically do is file a bug report and hope somebody picks it up. You may be in for a long wait though. As for practical solutions to your problem, your options are either a) switch to a DBMS that PHP supports or b) resort to SQL string construction instead of prepared statements and be ready to accept the burden of avoiding SQL injection attacks yourself. This should be considered a last resort though! Using PDO::quote() may help but I'd also make sure your data is very thoroughly validated as well. I know it's not an ideal solution, but if you must use MS SQL and can't wait for the PHP team to bugfix it then I don't really see that you have much of a choice. Or there's option c) which is use odbc_* functions instead, if they work. Of course if you want to use OOP style then you'll have to implement your own class that wraps the procedural odbc functions in OO methods so that could potentially be a lot of work. EDIT: I found another question on Stack Overflow where the asker seems to have a similar problem. His solution was to ditch the "official" MS driver in favour of FreeTDS. This might be something of a shot in the dark, but it could be worth a try. A: I had the same error Uncaught Uncaught PDOException: SQLSTATE[22001]. It turned out that the length of the database for password was too short(30) for the encrypted password. So, I just increased it and solved the problem.
'PDOException' with message 'SQLSTATE[22001]: String data, right truncated: 0
NOTE: I have narrowed this problem down to specifically PDO because I am able to successfully prepare and execute statements using the odbc_* functions. Why can't I bind this parameter to the PDO prepared statement? This works: $mssqldriver = 'ODBC Driver 13 for SQL Server'; $pdoDB = new PDO("odbc:Driver=$mssqldriver;Server=$hostname;Database=$dbname", $username, $password); $pdoDB->setAttribute( PDO::ATTR_ERRMODE, PDO::ERRMODE_EXCEPTION ); $sql = "SELECT 'value' AS col where 'this' = 'this'"; $stmt = $pdoDB->prepare($sql); $params = []; $stmt->execute($params); print_r($stmt->fetch()); Array ( [col] => value [0] => value ) Does not work: $sql = "SELECT 'value' AS col where 'this' = ?"; $stmt = $pdoDB->prepare($sql); $params = ['this']; $stmt->execute($params); print_r($stmt->fetch()); Web Server is running PHP 5.5.9 on Linux Ubuntu 14.04 with ODBC Driver 13 for SQL Server and connecting to Microsoft SQL Server 2012 on Windows Server 2012 Here's the full error: Fatal error: Uncaught exception 'PDOException' with message 'SQLSTATE[22001]: String data, right truncated: 0 [Microsoft][ODBC Driver 13 for SQL Server] String data, right truncation (SQLExecute[0] at /build/buildd/php5-5.5.9+dfsg/ext/pdo_odbc/odbc_stmt.c:254)' in /var/www/scratch.php:46 Stack trace: #0 /var/www/scratch.php(46): PDOStatement->execute(Array) #1 {main} thrown in /var/www/scratch.php on line 46 I have also tried setting: $pdoDB->setAttribute( PDO::ATTR_EMULATE_PREPARES, true ); And using named parameters: $sql = "SELECT 'value' AS col where 'this' = :myVal"; $stmt = $pdoDB->prepare($sql); $params = ['myVal' => 'this']; $stmt->execute($params); print_r($stmt->fetch()); Even with an explicit colon: $params = [':myVal' => 'this']; I also tried just using bindParam as demonstrated in this answer: $sql = "SELECT 'value' AS col where 'this' = ?"; $stmt = $pdoDB->prepare($sql); $param = 'this'; $stmt->bindParam(1, $param); $stmt->execute(); print_r($stmt->fetch()); As well as with named parameters: $sql = "SELECT 'value' AS col where 'this' = :myVal"; $stmt = $pdoDB->prepare($sql); $param = 'this'; $stmt->bindParam(':myVal', $param, PDO::PARAM_STR); $stmt->execute(); print_r($stmt->fetch()); If I try to explicitly set the length: $stmt->bindParam(':myVal', $param, PDO::PARAM_STR, 4); I get a bonus error: Fatal error: Uncaught exception 'PDOException' with message 'SQLSTATE[42000]: Syntax error or access violation: 102 [Microsoft][ODBC Driver 13 for SQL Server][SQL Server] Incorrect syntax near 'OUTPUT'. And yes, all this is a trivialized example without tables so that you can easily reproduce it, but just to be sure, I have actually tried this with a real table. CREATE TABLE myTable ( id INT IDENTITY PRIMARY KEY, val NVARCHAR(255) ); INSERT INTO myTable (val) VALUES ('hello world'); Works: $sql = "SELECT * FROM myTable WHERE val = 'hello world'"; $stmt = $pdoDB->prepare($sql); $params = []; $stmt->execute($params); print_r($stmt->fetch()); Array ( [id] => 1 [0] => 1 [val] => hello world [1] => hello world ) Does not work: $sql = "SELECT * FROM myTable WHERE val = ?"; $stmt = $pdoDB->prepare($sql); $params = ['hello world']; $stmt->execute($params); print_r($stmt->fetch()); All paths lead to the same error: String data, right truncated
[ "Unfortunately,\nIt's a PDO_ODBC 64-bit incompatibility problem (#61777, #64824) and without any doubts you are on a 64-bit build which doesn't allow you to bind parameters.\nFortunately,\nIt has a patch that was first included in the 5.6 release:\n\nThis bug is also referenced in\n #61777 and is still present\n in the latest stable release of the 5.5 branch. I see two tickets\n exist for this problem already, and I'm just submitting these changes\n via github as a reminder that this is a serious problem for anyone\n using PDO_ODBC on the x64 builds.\n\nWhat is wrong with your PHP's shipped PDO_ODBC?\nBy looking at one of those recommended patches:\ndiff --git a/ext/pdo_odbc/odbc_stmt.c b/ext/pdo_odbc/odbc_stmt.c\nindex 8b0ccf3..1d275cd 100644\n--- a/ext/pdo_odbc/odbc_stmt.c\n+++ b/ext/pdo_odbc/odbc_stmt.c\n@@ -551,7 +551,7 @@ static int odbc_stmt_describe(pdo_stmt_t *stmt, int colno TSRMLS_DC)\n struct pdo_column_data *col = &stmt->columns[colno];\n RETCODE rc;\n SWORD colnamelen;\n- SDWORD colsize;\n+ SQLULEN colsize;\n SQLLEN displaysize;\n\nWe see the only thing that's changed is SDWORD (16-bit signed integer) which is substituted with new ODBC type SQLULEN that is 64 bits in a 64-bit ODBC application and 32 bits in a 32-bit ODBC application.\nI believe committer wasn't aware of colsize data type only since in the very next line SQLLEN is defined properly.\nWhat should I do now?\n\nUpgrade to PHP version >= 5.6\nStick with odbc_* functions as a working solution.\nCompile a PHP v5.5.9 with provided patches.\nBuild your own PDO wrapper as recommended by @GordonM\n\n", "This is probably not what you want to hear, but this has all the hallmarks of a bug in PHP's ODBC driver for PDO (which is not heavily used as PHP programmers tend to favour open source databases like MySQL/SQLite/Postgres over commercial offerings), or in the underlying SQL server driver (which is poorly supported in Linux, for similar reasons), though if odbc_* works then it's probably not the underlying driver.\nIf you try to do the exact same tasks, except using \"sqlite::memory:\" as the DSN, all your examples work. This makes it highly unlikely that you're doing anything wrong (unless MS Server has some really weird non-conforming SQL syntax of which I'm not aware). Your examples work fine for me with SQLite when ATTR_EMULATE_PREPARES is both enabled and disabled. \nAll I think you can realistically do is file a bug report and hope somebody picks it up. You may be in for a long wait though. \nAs for practical solutions to your problem, your options are either a) switch to a DBMS that PHP supports or b) resort to SQL string construction instead of prepared statements and be ready to accept the burden of avoiding SQL injection attacks yourself. This should be considered a last resort though! Using PDO::quote() may help but I'd also make sure your data is very thoroughly validated as well. I know it's not an ideal solution, but if you must use MS SQL and can't wait for the PHP team to bugfix it then I don't really see that you have much of a choice. \nOr there's option c) which is use odbc_* functions instead, if they work. Of course if you want to use OOP style then you'll have to implement your own class that wraps the procedural odbc functions in OO methods so that could potentially be a lot of work. \nEDIT: I found another question on Stack Overflow where the asker seems to have a similar problem. His solution was to ditch the \"official\" MS driver in favour of FreeTDS. This might be something of a shot in the dark, but it could be worth a try. \n", "I had the same error Uncaught Uncaught PDOException: SQLSTATE[22001]. It turned out that the length of the database for password was too short(30) for the encrypted password. So, I just increased it and solved the problem.\n" ]
[ 21, 4, 0 ]
[]
[]
[ "odbc", "pdo", "php", "prepared_statement", "sql_server" ]
stackoverflow_0038255659_odbc_pdo_php_prepared_statement_sql_server.txt
Q: Laravel merge eloquent validation rule with a Rule:: How to merge eloquent validation rule with a Rule:: This is what I am attempting to run, but it chokes on the [ ] with Method Illuminate\Validation\Validator::validateRequired|email does not exist. public function rules() { return [ 'email' => [ 'required|email:rfc,dns|min:5|max:75', Rule::unique('email_updates', 'email', 'product_uuid', 'affiliate_uuid') ], ]; } This line works independently return [ 'email' => 'required|email:rfc,dns|min:5|max:75', ]; This also works return [ 'email' => Rule::unique('email_updates', 'email', 'product_uuid', 'affiliate_uuid'), ]; How do I merge these differing validation syntaxs? A: A comment on this question gave me the answer. Method Illuminate\Validation\Validator::validateRequired|min does not exist You can't use a mix of | and rule So this is my working answer public function rules() { return [ 'email' => [ 'required', 'email:rfc,dns', 'min:5', 'max:75', Rule::unique('email_updates', 'email', 'product_uuid', 'affiliate_uuid') ], ]; } It works, but I seemingly lost the eloquent messages that came with the | syntax, but that is a small price to pay.
Laravel merge eloquent validation rule with a Rule::
How to merge eloquent validation rule with a Rule:: This is what I am attempting to run, but it chokes on the [ ] with Method Illuminate\Validation\Validator::validateRequired|email does not exist. public function rules() { return [ 'email' => [ 'required|email:rfc,dns|min:5|max:75', Rule::unique('email_updates', 'email', 'product_uuid', 'affiliate_uuid') ], ]; } This line works independently return [ 'email' => 'required|email:rfc,dns|min:5|max:75', ]; This also works return [ 'email' => Rule::unique('email_updates', 'email', 'product_uuid', 'affiliate_uuid'), ]; How do I merge these differing validation syntaxs?
[ "A comment on this question gave me the answer. Method Illuminate\\Validation\\Validator::validateRequired|min does not exist\nYou can't use a mix of | and rule\nSo this is my working answer\npublic function rules()\n{\n return [\n 'email' => [\n 'required', 'email:rfc,dns', 'min:5', 'max:75',\n Rule::unique('email_updates', 'email', 'product_uuid', 'affiliate_uuid')\n ],\n ];\n}\n\nIt works, but I seemingly lost the eloquent messages that came with the | syntax, but that is a small price to pay.\n" ]
[ 0 ]
[]
[]
[ "laravel_9", "laravel_formrequest", "validation" ]
stackoverflow_0074679075_laravel_9_laravel_formrequest_validation.txt
Q: Serverless Error, CloudFormation cannot update a stack when a custom-named resource requires replacing I have the following error. Serverless: Operation failed! Serverless Error --------------------------------------- An error occurred: phoneNumberTable - CloudFormation cannot update a stack when a custom-named resource requires replacing. Rename mysite-api-phonenumber-dev and update the stack again… I tried deleting the database to see if it could re-create it then but it still gives the same error and doesn’t remake the database? What do I do here? What I did was recently change in my serverless.yml file the following for the resource. phoneNumberTable: #This table is used to track phone numbers used in the system Type: AWS::DynamoDB::Table Properties: TableName: ${self:custom.phoneNumberTable} AttributeDefinitions: #UserID in this case will be created once and constantly updated as it changes with status regarding the user. - AttributeName: phoneNumber AttributeType: S KeySchema: - AttributeName: phoneNumber KeyType: HASH ProvisionedThroughput: ReadCapacityUnits: ${self:custom.dynamoDbCapacityUnits.${self:custom.pstage}} WriteCapacityUnits: ${self:custom.dynamoDbCapacityUnits.${self:custom.pstage}} I accidentally created it with userId when I was copying and pasting so I changed it to phoneNumber for the hash key but the change won't reflect now! Edit:: I found a solution but it's terrible. If I do sls remove --stage dev it will remove everything for my stage, but literally everything... then I have to do sls deploy --stage dev to start the deploy over again, in the meantime my database is cleared of all data... there has to be a better way somehow. A: The AWS recommended solution is to rename: https://aws.amazon.com/premiumsupport/knowledge-center/cloudformation-custom-name/ A: I found I needed to insert some variables to make it work. Environment variable: USERS_TABLE: "users-${opt:stage, self:provider.stage}-${self:provider.environment.BUILD_NUMBER}" Table name: TableName: ${self:provider.environment.USERS_TABLE} In my code: const existingUser = await dynamoDb.get({ TableName: process.env.USERS_TABLE, Key: { email, }, }).promise(); A: Rename your resource to something else, deploy it, rename it back (if you need) and deploy again. A: According to https://aws.amazon.com/premiumsupport/knowledge-center/cloudformation-custom-name/ This error typically occurs when a stack update tries to replace resources that have properties with custom names. AWS CloudFormation doesn't replace a resource that has a custom name unless that custom name is changed to a different name. To prevent a stack failure and avoid the error message, change any resources with custom names to use different names before you update a stack. To resolve this you need change TableName to some other string. What that will do: Serverless will delete your table (because that's no longer a part of a stack) and will create a new table with a new name and a keys.
Serverless Error, CloudFormation cannot update a stack when a custom-named resource requires replacing
I have the following error. Serverless: Operation failed! Serverless Error --------------------------------------- An error occurred: phoneNumberTable - CloudFormation cannot update a stack when a custom-named resource requires replacing. Rename mysite-api-phonenumber-dev and update the stack again… I tried deleting the database to see if it could re-create it then but it still gives the same error and doesn’t remake the database? What do I do here? What I did was recently change in my serverless.yml file the following for the resource. phoneNumberTable: #This table is used to track phone numbers used in the system Type: AWS::DynamoDB::Table Properties: TableName: ${self:custom.phoneNumberTable} AttributeDefinitions: #UserID in this case will be created once and constantly updated as it changes with status regarding the user. - AttributeName: phoneNumber AttributeType: S KeySchema: - AttributeName: phoneNumber KeyType: HASH ProvisionedThroughput: ReadCapacityUnits: ${self:custom.dynamoDbCapacityUnits.${self:custom.pstage}} WriteCapacityUnits: ${self:custom.dynamoDbCapacityUnits.${self:custom.pstage}} I accidentally created it with userId when I was copying and pasting so I changed it to phoneNumber for the hash key but the change won't reflect now! Edit:: I found a solution but it's terrible. If I do sls remove --stage dev it will remove everything for my stage, but literally everything... then I have to do sls deploy --stage dev to start the deploy over again, in the meantime my database is cleared of all data... there has to be a better way somehow.
[ "The AWS recommended solution is to rename:\nhttps://aws.amazon.com/premiumsupport/knowledge-center/cloudformation-custom-name/\n", "I found I needed to insert some variables to make it work.\nEnvironment variable:\nUSERS_TABLE: \"users-${opt:stage, self:provider.stage}-${self:provider.environment.BUILD_NUMBER}\"\nTable name:\nTableName: ${self:provider.environment.USERS_TABLE}\nIn my code:\n\nconst existingUser = await dynamoDb.get({\n TableName: process.env.USERS_TABLE,\n Key: {\n email,\n },\n }).promise();\n\n", "Rename your resource to something else, deploy it, rename it back (if you need) and deploy again.\n", "According to https://aws.amazon.com/premiumsupport/knowledge-center/cloudformation-custom-name/\nThis error typically occurs when a stack update tries to replace resources that have properties with custom names. AWS CloudFormation doesn't replace a resource that has a custom name unless that custom name is changed to a different name. To prevent a stack failure and avoid the error message, change any resources with custom names to use different names before you update a stack.\nTo resolve this you need change TableName to some other string. What that will do: Serverless will delete your table (because that's no longer a part of a stack) and will create a new table with a new name and a keys.\n" ]
[ 13, 3, 1, 0 ]
[]
[]
[ "amazon_cloudformation", "amazon_dynamodb", "amazon_web_services", "serverless", "serverless_framework" ]
stackoverflow_0047567549_amazon_cloudformation_amazon_dynamodb_amazon_web_services_serverless_serverless_framework.txt
Q: issue with returning Struct from C to Python using ctypes I am trying to get the values of C struct member variables from within python using ctypes. My expected return values for x and y are 10 and 20 respectively. I am thinking I might be neglecting something subtle but not sure what it is. The output I get is 0 for x and y as shown at the end of the post. Any pointers appreciated. Python code: import ctypes import os class Point(ctypes.Structure): _fields_ = [("x", ctypes.c_int), ("y", ctypes.c_int)] directory = os.path.dirname(os.path.realpath(__file__)) print(directory) source = directory + "\\cstruct.so" clibrary = ctypes.CDLL(source) clibrary.getPoint.restype = ctypes.POINTER(Point) p1 = clibrary.getPoint() print(p1.contents.x, p1.contents.y) C code: #include <stdio.h> #include <string.h> #include <stdlib.h> struct Point { int x; int y; }; struct Point* getPoint() { struct Point *p; p->x = 10; p->y = 20; return p; } C code is compiled into a shared library file cstruct.so which is called in the python file. Python Output: 0 0 A: I found out what the issue is. I had to dynamically allocate the size of the struct Point in the C file. Previously, I had not done this. This solves the issue. Just modified the first line in the struct Point* getPoint() function as shown below. struct Point *p = malloc(sizeof(struct Point)); Also added a c function in the c file to free the memory from the struct pointer as shown below. void free_mem(struct Point* p) { free(p); }
issue with returning Struct from C to Python using ctypes
I am trying to get the values of C struct member variables from within python using ctypes. My expected return values for x and y are 10 and 20 respectively. I am thinking I might be neglecting something subtle but not sure what it is. The output I get is 0 for x and y as shown at the end of the post. Any pointers appreciated. Python code: import ctypes import os class Point(ctypes.Structure): _fields_ = [("x", ctypes.c_int), ("y", ctypes.c_int)] directory = os.path.dirname(os.path.realpath(__file__)) print(directory) source = directory + "\\cstruct.so" clibrary = ctypes.CDLL(source) clibrary.getPoint.restype = ctypes.POINTER(Point) p1 = clibrary.getPoint() print(p1.contents.x, p1.contents.y) C code: #include <stdio.h> #include <string.h> #include <stdlib.h> struct Point { int x; int y; }; struct Point* getPoint() { struct Point *p; p->x = 10; p->y = 20; return p; } C code is compiled into a shared library file cstruct.so which is called in the python file. Python Output: 0 0
[ "I found out what the issue is. I had to dynamically allocate the size of the struct Point in the C file. Previously, I had not done this. This solves the issue.\nJust modified the first line in the struct Point* getPoint() function as shown below.\nstruct Point *p = malloc(sizeof(struct Point));\n\nAlso added a c function in the c file to free the memory from the struct pointer as shown below.\nvoid free_mem(struct Point* p) \n{\n free(p);\n}\n\n" ]
[ 0 ]
[]
[]
[ "ctype", "struct" ]
stackoverflow_0074671834_ctype_struct.txt
Q: How can I convert Pine-script to Python? (QQE signal) enter image description here Pine-script code //@version=4 // This source code is subject to the terms of the Mozilla Public License 2.0 at https://mozilla.org/MPL/2.0/ // © colinmck study("QQE signals", overlay=true) RSI_Period = input(14, title='RSI Length') SF = input(5, title='RSI Smoothing') QQE = input(4.238, title='Fast QQE Factor') ThreshHold = input(10, title="Thresh-hold") src = close Wilders_Period = RSI_Period * 2 - 1 Rsi = rsi(src, RSI_Period) RsiMa = ema(Rsi, SF) AtrRsi = abs(RsiMa[1] - RsiMa) MaAtrRsi = ema(AtrRsi, Wilders_Period) dar = ema(MaAtrRsi, Wilders_Period) * QQE longband = 0.0 shortband = 0.0 trend = 0 DeltaFastAtrRsi = dar RSIndex = RsiMa newshortband = RSIndex + DeltaFastAtrRsi newlongband = RSIndex - DeltaFastAtrRsi longband := RSIndex[1] > longband[1] and RSIndex > longband[1] ? max(longband[1], newlongband) : newlongband shortband := RSIndex[1] < shortband[1] and RSIndex < shortband[1] ? min(shortband[1], newshortband) : newshortband cross_1 = cross(longband[1], RSIndex) trend := cross(RSIndex, shortband[1]) ? 1 : cross_1 ? -1 : nz(trend[1], 1) FastAtrRsiTL = trend == 1 ? longband : shortband // Find all the QQE Crosses QQExlong = 0 QQExlong := nz(QQExlong[1]) QQExshort = 0 QQExshort := nz(QQExshort[1]) QQExlong := FastAtrRsiTL < RSIndex ? QQExlong + 1 : 0 QQExshort := FastAtrRsiTL > RSIndex ? QQExshort + 1 : 0 //Conditions qqeLong = QQExlong == 1 ? FastAtrRsiTL[1] - 50 : na qqeShort = QQExshort == 1 ? FastAtrRsiTL[1] - 50 : na // Plotting plotshape(qqeLong, title="QQE long", text="Long", textcolor=color.white, style=shape.labelup, location=location.belowbar, color=color.green, transp=0, size=size.tiny) plotshape(qqeShort, title="QQE short", text="Short", textcolor=color.white, style=shape.labeldown, location=location.abovebar, color=color.red, transp=0, size=size.tiny) // Alerts alertcondition(qqeLong, title="Long", message="Long") alertcondition(qqeShort, title="Short", message="Short") python code import pandas as pd import numpy as np import talib as ta import math import ccxt RSI_Period = 6 Wilders_Period = RSI_Period * 2 - 1 SF = 5 QQE = 3 ThresHold = 3 data = client.klines(symbol='BTCUSDT', interval='3m', limit=1000) ## binance API data df = pd.DataFrame(data)# DATA Rsi = ta.RSI(df['close'], RSI_Period) ## RSI RsiMa = ta.EMA(Rsi, SF) ## EMA AtrRsi = abs(RsiMa[-1] - RsiMa) MaAtrRsi = ta.EMA(AtrRsi, Wilders_Period) ## EMA dar = ta.EMA(MaAtrRsi, Wilders_Period) * QQE It is incomplete. I'm not trying to implement a graph, I simply want to run longs and shorts in real time. I want to alert in python console whether it is long or short. Is there a way to convert it to python? I want to continuously fetch the data and determine when it is long and when it is short. A: #qqe signal df["RSI_Period"]=ta.rsi (df['close'],14) SF = 5 QQE = 4.238 ThreshHold = input(10, title="Thresh-hold") src = df['close'] Wilders_Period = df["RSI2"] * 2 - 1 Rsi = rsi(src, RSI_Period) RsiMa = ta.ema(Rsi, SF) AtrRsi = abs(RsiMa[1] - RsiMa) MaAtrRsi = ta.ema(AtrRsi, Wilders_Period) dar = ema(MaAtrRsi, Wilders_Period) * QQE longband = 0.0 shortband = 0.0 trend = 0 DeltaFastAtrRsi = dar RSIndex = RsiMa newshortband = RSIndex + DeltaFastAtrRsi newlongband = RSIndex - DeltaFastAtrRsi if longband == RSIndex[-1] > longband[-1] and RSIndex > longband[-1] ? max(longband[-1], newlongband) : newlongband shortband == RSIndex[-1] < shortband[-1] and RSIndex < shortband[-1] ? min(shortband[-1], newshortband) : newshortband cross_1 = cross(longband[1], RSIndex) trend == cross(RSIndex, shortband[1]) ? 1 : cross_1 ? -1 : nz(trend[1], 1) FastAtrRsiTL = trend == 1 ? longband : shortband # Find all the QQE Crosses QQExlong = 0 QQExlong == nz(QQExlong[-1]) QQExshort = 0 QQExshort == nz(QQExshort[-1]) QQExlong == FastAtrRsiTL < RSIndex ? QQExlong + 1 : 0 QQExshort == FastAtrRsiTL > RSIndex ? QQExshort + 1 : 0 #Conditions qqeLong = QQExlong == 1 ? FastAtrRsiTL[-1] - 50 : na qqeShort = QQExshort == 1 ? FastAtrRsiTL[-1] - 50 : na ? I couldn't find what to use instead. A: // This source code is subject to the terms of the Mozilla Public License 2.0 at https://mozilla.org/MPL/2.0/ // © blackcat1402 //@version=4 study("[blackcat] L3 Banker Fund Flow Trend Oscillator", overlay=false) //functions xrf(values, length) => r_val = float(na) if length >= 1 for i = 0 to length by 1 if na(r_val) or not na(values[i]) r_val := values[i] r_val r_val xsa(src,len,wei) => sumf = 0.0 ma = 0.0 out = 0.0 sumf := nz(sumf[1]) - nz(src[len]) + src ma := na(src[len]) ? na : sumf/len out := na(out[1]) ? ma : (src*wei+out[1]*(len-wei))/len out //set up a simple model of banker fund flow trend fundtrend = ((3*xsa((close- lowest(low,27))/(highest(high,27)-lowest(low,27))*100,5,1)-2*xsa(xsa((close-lowest(low,27))/(highest(high,27)-lowest(low,27))*100,5,1),3,1)-50)*1.032+50) //define typical price for banker fund typ = (2*close+high+low+open)/5 //lowest low with mid term fib # 34 lol = lowest(low,34) //highest high with mid term fib # 34 hoh = highest(high,34) //define banker fund flow bull bear line bullbearline = ema((typ-lol)/(hoh-lol)*100,13) //define banker entry signal bankerentry = crossover(fundtrend,bullbearline) and bullbearline<25 //banker fund entry with yellow candle plotcandle(0,50,0,50,color=bankerentry ? color.new(color.yellow,0):na) //banker increase position with green candle plotcandle(fundtrend,bullbearline,fundtrend,bullbearline,color=fundtrend>bullbearline ? color.new(color.green,0):na) //banker decrease position with white candle plotcandle(fundtrend,bullbearline,fundtrend,bullbearline,color=fundtrend<(xrf(fundtrend*0.95,1)) ? color.new(color.white,0):na) //banker fund exit/quit with red candle plotcandle(fundtrend,bullbearline,fundtrend,bullbearline,color=fundtrend<bullbearline ? color.new(color.red,0):na) //banker fund Weak rebound with blue candle plotcandle(fundtrend,bullbearline,fundtrend,bullbearline,color=fundtrend<bullbearline and fundtrend>(xrf(fundtrend*0.95,1)) ? color.new(color.blue,0):na) //overbought and oversold threshold lines h1 = hline(80,color=color.red, linestyle=hline.style_dotted) h2 = hline(20, color=color.yellow, linestyle=hline.style_dotted) h3 = hline(10,color=color.lime, linestyle=hline.style_dotted) h4 = hline(90, color=color.fuchsia, linestyle=hline.style_dotted) fill(h2,h3,color=color.yellow,transp=70) fill(h1,h4,color=color.fuchsia,transp=70) alertcondition(bankerentry, title='Alert on Yellow Candle', message='Yellow Candle!') alertcondition(fundtrend>bullbearline, title='Alert on Green Candle', message='Green Candle!') alertcondition(fundtrend<(xrf(fundtrend*0.95,1)), title='Alert on White Candle', message='White Candle!') alertcondition(fundtrend<bullbearline, title='Alert on Red Candle', message='Red Candle!') alertcondition(fundtrend<bullbearline and fundtrend>(xrf(fundtrend*0.95,1)), title='Alert on Blue Candle', message='Blue Candle!') Can you help me convert this to python?
How can I convert Pine-script to Python? (QQE signal)
enter image description here Pine-script code //@version=4 // This source code is subject to the terms of the Mozilla Public License 2.0 at https://mozilla.org/MPL/2.0/ // © colinmck study("QQE signals", overlay=true) RSI_Period = input(14, title='RSI Length') SF = input(5, title='RSI Smoothing') QQE = input(4.238, title='Fast QQE Factor') ThreshHold = input(10, title="Thresh-hold") src = close Wilders_Period = RSI_Period * 2 - 1 Rsi = rsi(src, RSI_Period) RsiMa = ema(Rsi, SF) AtrRsi = abs(RsiMa[1] - RsiMa) MaAtrRsi = ema(AtrRsi, Wilders_Period) dar = ema(MaAtrRsi, Wilders_Period) * QQE longband = 0.0 shortband = 0.0 trend = 0 DeltaFastAtrRsi = dar RSIndex = RsiMa newshortband = RSIndex + DeltaFastAtrRsi newlongband = RSIndex - DeltaFastAtrRsi longband := RSIndex[1] > longband[1] and RSIndex > longband[1] ? max(longband[1], newlongband) : newlongband shortband := RSIndex[1] < shortband[1] and RSIndex < shortband[1] ? min(shortband[1], newshortband) : newshortband cross_1 = cross(longband[1], RSIndex) trend := cross(RSIndex, shortband[1]) ? 1 : cross_1 ? -1 : nz(trend[1], 1) FastAtrRsiTL = trend == 1 ? longband : shortband // Find all the QQE Crosses QQExlong = 0 QQExlong := nz(QQExlong[1]) QQExshort = 0 QQExshort := nz(QQExshort[1]) QQExlong := FastAtrRsiTL < RSIndex ? QQExlong + 1 : 0 QQExshort := FastAtrRsiTL > RSIndex ? QQExshort + 1 : 0 //Conditions qqeLong = QQExlong == 1 ? FastAtrRsiTL[1] - 50 : na qqeShort = QQExshort == 1 ? FastAtrRsiTL[1] - 50 : na // Plotting plotshape(qqeLong, title="QQE long", text="Long", textcolor=color.white, style=shape.labelup, location=location.belowbar, color=color.green, transp=0, size=size.tiny) plotshape(qqeShort, title="QQE short", text="Short", textcolor=color.white, style=shape.labeldown, location=location.abovebar, color=color.red, transp=0, size=size.tiny) // Alerts alertcondition(qqeLong, title="Long", message="Long") alertcondition(qqeShort, title="Short", message="Short") python code import pandas as pd import numpy as np import talib as ta import math import ccxt RSI_Period = 6 Wilders_Period = RSI_Period * 2 - 1 SF = 5 QQE = 3 ThresHold = 3 data = client.klines(symbol='BTCUSDT', interval='3m', limit=1000) ## binance API data df = pd.DataFrame(data)# DATA Rsi = ta.RSI(df['close'], RSI_Period) ## RSI RsiMa = ta.EMA(Rsi, SF) ## EMA AtrRsi = abs(RsiMa[-1] - RsiMa) MaAtrRsi = ta.EMA(AtrRsi, Wilders_Period) ## EMA dar = ta.EMA(MaAtrRsi, Wilders_Period) * QQE It is incomplete. I'm not trying to implement a graph, I simply want to run longs and shorts in real time. I want to alert in python console whether it is long or short. Is there a way to convert it to python? I want to continuously fetch the data and determine when it is long and when it is short.
[ "#qqe signal\n df[\"RSI_Period\"]=ta.rsi (df['close'],14)\n SF = 5\n QQE = 4.238\n ThreshHold = input(10, title=\"Thresh-hold\")\n\n src = df['close']\n Wilders_Period = df[\"RSI2\"] * 2 - 1\n\n Rsi = rsi(src, RSI_Period)\n RsiMa = ta.ema(Rsi, SF)\n AtrRsi = abs(RsiMa[1] - RsiMa)\n MaAtrRsi = ta.ema(AtrRsi, Wilders_Period)\n dar = ema(MaAtrRsi, Wilders_Period) * QQE\n\n longband = 0.0\n shortband = 0.0\n trend = 0\n\n DeltaFastAtrRsi = dar\n RSIndex = RsiMa\n newshortband = RSIndex + DeltaFastAtrRsi\n newlongband = RSIndex - DeltaFastAtrRsi\n if longband == RSIndex[-1] > longband[-1] and RSIndex > longband[-1] ? max(longband[-1], newlongband) : newlongband\n shortband == RSIndex[-1] < shortband[-1] and RSIndex < shortband[-1] ? min(shortband[-1], newshortband) : newshortband\n cross_1 = cross(longband[1], RSIndex)\n trend == cross(RSIndex, shortband[1]) ? 1 : cross_1 ? -1 : nz(trend[1], 1)\n FastAtrRsiTL = trend == 1 ? longband : shortband\n\n # Find all the QQE Crosses\n\n QQExlong = 0\n QQExlong == nz(QQExlong[-1])\n QQExshort = 0\n QQExshort == nz(QQExshort[-1])\n QQExlong == FastAtrRsiTL < RSIndex ? QQExlong + 1 : 0\n QQExshort == FastAtrRsiTL > RSIndex ? QQExshort + 1 : 0\n\n #Conditions\n\n qqeLong = QQExlong == 1 ? FastAtrRsiTL[-1] - 50 : na\n qqeShort = QQExshort == 1 ? FastAtrRsiTL[-1] - 50 : na\n \n\n? I couldn't find what to use instead.\n", "// This source code is subject to the terms of the Mozilla Public License 2.0 at https://mozilla.org/MPL/2.0/\n// © blackcat1402\n//@version=4\n\nstudy(\"[blackcat] L3 Banker Fund Flow Trend Oscillator\", overlay=false)\n\n//functions\nxrf(values, length) =>\n r_val = float(na)\n if length >= 1\n for i = 0 to length by 1\n if na(r_val) or not na(values[i])\n r_val := values[i]\n r_val\n r_val\n\nxsa(src,len,wei) =>\n sumf = 0.0\n ma = 0.0\n out = 0.0\n sumf := nz(sumf[1]) - nz(src[len]) + src\n ma := na(src[len]) ? na : sumf/len\n out := na(out[1]) ? ma : (src*wei+out[1]*(len-wei))/len\n out\n \n//set up a simple model of banker fund flow trend \nfundtrend = ((3*xsa((close- lowest(low,27))/(highest(high,27)-lowest(low,27))*100,5,1)-2*xsa(xsa((close-lowest(low,27))/(highest(high,27)-lowest(low,27))*100,5,1),3,1)-50)*1.032+50)\n//define typical price for banker fund\ntyp = (2*close+high+low+open)/5\n//lowest low with mid term fib # 34\nlol = lowest(low,34)\n//highest high with mid term fib # 34\nhoh = highest(high,34)\n//define banker fund flow bull bear line\nbullbearline = ema((typ-lol)/(hoh-lol)*100,13)\n//define banker entry signal\nbankerentry = crossover(fundtrend,bullbearline) and bullbearline<25\n\n//banker fund entry with yellow candle\nplotcandle(0,50,0,50,color=bankerentry ? color.new(color.yellow,0):na)\n\n//banker increase position with green candle\nplotcandle(fundtrend,bullbearline,fundtrend,bullbearline,color=fundtrend>bullbearline ? color.new(color.green,0):na)\n\n//banker decrease position with white candle\nplotcandle(fundtrend,bullbearline,fundtrend,bullbearline,color=fundtrend<(xrf(fundtrend*0.95,1)) ? color.new(color.white,0):na)\n\n//banker fund exit/quit with red candle\nplotcandle(fundtrend,bullbearline,fundtrend,bullbearline,color=fundtrend<bullbearline ? color.new(color.red,0):na)\n\n//banker fund Weak rebound with blue candle\nplotcandle(fundtrend,bullbearline,fundtrend,bullbearline,color=fundtrend<bullbearline and fundtrend>(xrf(fundtrend*0.95,1)) ? color.new(color.blue,0):na)\n\n//overbought and oversold threshold lines\nh1 = hline(80,color=color.red, linestyle=hline.style_dotted)\nh2 = hline(20, color=color.yellow, linestyle=hline.style_dotted)\nh3 = hline(10,color=color.lime, linestyle=hline.style_dotted)\nh4 = hline(90, color=color.fuchsia, linestyle=hline.style_dotted)\nfill(h2,h3,color=color.yellow,transp=70)\nfill(h1,h4,color=color.fuchsia,transp=70)\n\nalertcondition(bankerentry, title='Alert on Yellow Candle', message='Yellow Candle!')\nalertcondition(fundtrend>bullbearline, title='Alert on Green Candle', message='Green Candle!')\nalertcondition(fundtrend<(xrf(fundtrend*0.95,1)), title='Alert on White Candle', message='White Candle!')\nalertcondition(fundtrend<bullbearline, title='Alert on Red Candle', message='Red Candle!')\nalertcondition(fundtrend<bullbearline and fundtrend>(xrf(fundtrend*0.95,1)), title='Alert on Blue Candle', message='Blue Candle!')\n\nCan you help me convert this to python?\n" ]
[ 0, 0 ]
[]
[]
[ "pine_script", "python", "tradingview_api" ]
stackoverflow_0074604279_pine_script_python_tradingview_api.txt
Q: Dynamic replacement of URL text with a hyperlink I have a v-data-table component which cells are filled with text containing URLs: "Text with URL https://stackoverflow.com that I'd like to replace with hyperlink" How can I dynamically replace all these URLs inside text with a-tags? The rest of the text except the URLs should remain unchanged. <template> <v-data-table :headers="headers" :items="items" :single-expand="singleExpand" :expanded.sync="expanded" item-key="id" show-expand > <template v-slot:expanded-item="{ headers, item }"> <td :colspan="headers.length"> {{ item.text }} </td> </template> </v-data-table> </template> <script> data: () => ({ singleExpand: false, expanded: [], headers: [], }), methods: { urlify(text) { const urlRegex = /(https?:\/\/[^\s]+)/g; return text.replace(urlRegex, '<a href="$1">$1</a>'); }, }, </script> A: In order to output real HTML, you will need to use the v-html directive. You already have the urlify(text) method in your script section to convert text to URL. A simple use case of this directive with a method is: <td :colspan="headers.length" v-html="urlify(item.text)"> </td> For more information, you can read the official documentation.
Dynamic replacement of URL text with a hyperlink
I have a v-data-table component which cells are filled with text containing URLs: "Text with URL https://stackoverflow.com that I'd like to replace with hyperlink" How can I dynamically replace all these URLs inside text with a-tags? The rest of the text except the URLs should remain unchanged. <template> <v-data-table :headers="headers" :items="items" :single-expand="singleExpand" :expanded.sync="expanded" item-key="id" show-expand > <template v-slot:expanded-item="{ headers, item }"> <td :colspan="headers.length"> {{ item.text }} </td> </template> </v-data-table> </template> <script> data: () => ({ singleExpand: false, expanded: [], headers: [], }), methods: { urlify(text) { const urlRegex = /(https?:\/\/[^\s]+)/g; return text.replace(urlRegex, '<a href="$1">$1</a>'); }, }, </script>
[ "In order to output real HTML, you will need to use the v-html directive.\nYou already have the urlify(text) method in your script section to convert text to URL. A simple use case of this directive with a method is:\n <td :colspan=\"headers.length\" v-html=\"urlify(item.text)\">\n </td>\n\nFor more information, you can read the official documentation.\n" ]
[ 0 ]
[]
[]
[ "vuejs2", "vuetify.js" ]
stackoverflow_0071793146_vuejs2_vuetify.js.txt
Q: interaction of a chrome extension based on React TSX UI chrome API I'm attempting to build some extension which contains a form and an option to capture screen with desktopCapture, which looks like this: The form is written in React TypeScript and the code for capturing the screen (taken from here) is the following: chrome.runtime.onMessage.addListener( (message, sender, senderResponse) => { if (message.name === "stream" && message.streamId) { let track, canvas; navigator.mediaDevices .getUserMedia({ video: { mandatory: { chromeMediaSource: "desktop", chromeMediaSourceId: message.streamId, }, }, }) .then((stream) => { track = stream.getVideoTracks()[0]; const imageCapture = new ImageCapture(track); return imageCapture.grabFrame(); }) .then((bitmap) => { track.stop(); canvas = document.createElement("canvas"); canvas.width = bitmap.width; canvas.height = bitmap.height; let context = canvas.getContext("2d"); context.drawImage(bitmap, 0, 0, bitmap.width, bitmap.height); return canvas .toDataURL() .then((url) => { //TODO download the image from the URL chrome.runtime.sendMessage( { name: "download", url }, (response) => { if (response.success) { alert("Screenshot saved"); } else { alert("Could not save screenshot"); } canvas.remove(); senderResponse({ success: true }); } ); }) .catch((err) => { alert("Could not take screenshot"); senderResponse({ success: false, message: err }); }); }); } return true; } ); My intention is that when the user will click on "take screen shot", the code above will run, and then, on save, the image will be presented in that box. I was able to 'grab' the two elements, both the box where I wish the image to appear after screenshooting, and the "TAKE SCREEN SHOT" button. as far as I'm aware of, content_script only injects into web-pages (browser), and has no access to extension, therefor, that's not the way to add the code inside.. What am I missing? How could I add an eventListener, that if the button is clicked, the screenCapturing code will run, and I'll be able to set the box to be the captured image? Best regards! A: As i understand, you want to take screenshot of tab's page content. (I assume you don't need to grab playing video or audio content) Fix 1: Use chrome.tabs.captureVisibleTab api for capture screenshot. API link chrome.tabs Add this in background.js const takeShot = async (windowId) => { try { let imgUrl64 = await chrome.tabs.captureVisibleTab(windowId, { format: "jpeg", quality: 80 }); console.log(imgUrl64); } catch (error) { console.error(error); } }; chrome.runtime.onMessage.addListener(async (req, sender, sendResponse) => { if(req.msg === "take_screenshot") takeShot(sender.tab.windowId) } Fix 2: Content_script has limited api access. Check this page. Understand content script capabilities Solution: Send message from content_script to background and ask them to capture screenshot. Background capture screenshot
interaction of a chrome extension based on React TSX UI chrome API
I'm attempting to build some extension which contains a form and an option to capture screen with desktopCapture, which looks like this: The form is written in React TypeScript and the code for capturing the screen (taken from here) is the following: chrome.runtime.onMessage.addListener( (message, sender, senderResponse) => { if (message.name === "stream" && message.streamId) { let track, canvas; navigator.mediaDevices .getUserMedia({ video: { mandatory: { chromeMediaSource: "desktop", chromeMediaSourceId: message.streamId, }, }, }) .then((stream) => { track = stream.getVideoTracks()[0]; const imageCapture = new ImageCapture(track); return imageCapture.grabFrame(); }) .then((bitmap) => { track.stop(); canvas = document.createElement("canvas"); canvas.width = bitmap.width; canvas.height = bitmap.height; let context = canvas.getContext("2d"); context.drawImage(bitmap, 0, 0, bitmap.width, bitmap.height); return canvas .toDataURL() .then((url) => { //TODO download the image from the URL chrome.runtime.sendMessage( { name: "download", url }, (response) => { if (response.success) { alert("Screenshot saved"); } else { alert("Could not save screenshot"); } canvas.remove(); senderResponse({ success: true }); } ); }) .catch((err) => { alert("Could not take screenshot"); senderResponse({ success: false, message: err }); }); }); } return true; } ); My intention is that when the user will click on "take screen shot", the code above will run, and then, on save, the image will be presented in that box. I was able to 'grab' the two elements, both the box where I wish the image to appear after screenshooting, and the "TAKE SCREEN SHOT" button. as far as I'm aware of, content_script only injects into web-pages (browser), and has no access to extension, therefor, that's not the way to add the code inside.. What am I missing? How could I add an eventListener, that if the button is clicked, the screenCapturing code will run, and I'll be able to set the box to be the captured image? Best regards!
[ "As i understand,\nyou want to take screenshot of tab's page content.\n(I assume you don't need to grab playing video or audio content)\nFix 1:\nUse chrome.tabs.captureVisibleTab api for capture screenshot.\nAPI link\nchrome.tabs\nAdd this in background.js\nconst takeShot = async (windowId) => {\ntry {\n let imgUrl64 = await chrome.tabs.captureVisibleTab(windowId, { format: \"jpeg\", quality: 80 });\n console.log(imgUrl64);\n} catch (error) {\n console.error(error);\n}\n};\n\nchrome.runtime.onMessage.addListener(async (req, sender, sendResponse) => {\n if(req.msg === \"take_screenshot\") takeShot(sender.tab.windowId)\n}\n\nFix 2:\nContent_script has limited api access.\nCheck this page. Understand content script capabilities\nSolution:\nSend message from content_script to background and ask them to capture screenshot.\nBackground capture screenshot\n" ]
[ 0 ]
[]
[]
[ "google_chrome_extension", "javascript", "reactjs" ]
stackoverflow_0074678237_google_chrome_extension_javascript_reactjs.txt
Q: Is this way of writing UPDATE query is false? [SOLVED] I'm updating my postgres database with this query : await client.connect(); const statusDeconnect = product.fields.StatusSendDeconnect; const UserDeconnect = product.fields.UserDeconnect; console.log('Status to add : ' + statusDeconnect + ' UserId that will deconnect : ' + UserDeconnect); const updateSend = await client.queryArray`UPDATE userconnectionstatus SET status = ${statusDeconnect} WHERE idconnection = ${UserDeconnect}`; console.log('User ' + UserDeconnect + ' Has been deconnected'); i get a postgres error that says idconnection column doesn't exist result in the console : enter image description here Pgadmin page of my table : enter image description here
Is this way of writing UPDATE query is false? [SOLVED]
I'm updating my postgres database with this query : await client.connect(); const statusDeconnect = product.fields.StatusSendDeconnect; const UserDeconnect = product.fields.UserDeconnect; console.log('Status to add : ' + statusDeconnect + ' UserId that will deconnect : ' + UserDeconnect); const updateSend = await client.queryArray`UPDATE userconnectionstatus SET status = ${statusDeconnect} WHERE idconnection = ${UserDeconnect}`; console.log('User ' + UserDeconnect + ' Has been deconnected'); i get a postgres error that says idconnection column doesn't exist result in the console : enter image description here Pgadmin page of my table : enter image description here
[]
[]
[ "You're missing brackets around your queryArray function. It should be:\nEDIT: As pointed out in the comments, I'm not right - brackets are not required.\nconst updateSend = await client.queryArray(`UPDATE userconnectionstatus SET status = ${statusDeconnect} WHERE idconnection = ${UserDeconnect}`);\n\n" ]
[ -1 ]
[ "deno", "javascript", "postgresql", "typescript" ]
stackoverflow_0074679958_deno_javascript_postgresql_typescript.txt
Q: Entity Framework core, foreign key constraint due to cascade delete Using Entity Framework Core with a code-first approach and ASP.NET MVC. Building a mini-clone of a game I've played as practice. A player has villages, a village has resource fields, and armies that can move between villages. They can attack, and whatnot. Just started working on the army aspect of it and I'm getting the following error when running dotnet ef database update (migrations add works fine): Introducing FOREIGN KEY constraint 'FK_Army_Villages_VillageId' on table 'Army' may cause cycles or multiple cascade paths. Specify ON DELETE NO ACTION or ON UPDATE NO ACTION, or modify other FOREIGN KEY constraints. Could not create constraint or index. See previous errors. A somewhat clear message I suppose, and I've found 4-5 topics covering this. But I cannot get any solutions to work. In a situation like this, if I try to delete an object that is related to another object through a foreign key, it will cause a cascade of deletions, where the related objects are also deleted. Which I'm guessing is the problem here. A screenshot of the database if anyone is interested: I've tried specifying .OnDelete(DeleteBehavior.NoAction); in OnModelCreating. It looks like this: modelBuilder.Entity<Army>() .HasOne(t => t.Village) .WithMany(a => a.Armies) .HasForeignKey(t => t.VillageId) .OnDelete(DeleteBehavior.NoAction); But that does absolutely nothing. You used to be able to add the following to OnModelCreating: builder.Conventions.Remove<OneToManyCascadeDeleteConvention>(); That does not work in EF Core, but supposedly this is the new way to do it: foreach (var relationship in modelBuilder.Model.GetEntityTypes().SelectMany(e => e.GetForeignKeys())) { relationship.DeleteBehavior = DeleteBehavior.Restrict; } But this results in the same error for me. Some threads suggest the problem has to do with properties not being nullable. Thus I tried setting the props inside Army to this: public int? VillageId { get; set; } public Village? Village { get; set; } The army class looks like this: public Army() { } public int ArmyId { get; set; } public Tile Location { get; set; } public ArmyStatus ArmyStatus { get; set; } public List<Troop> Troops { get; set; } = new List<Troop>(); // Navigation props public int? VillageId { get; set; } public Village? Village { get; set; } and Village like this: public Village() { } [Key] public int VillageId { get; set; } public string Name { get; set; } // Navigation props public virtual ICollection<ResourceField> ResourceFields { get; set; } public virtual Player Player { get; set; } public int PlayerId { get; set; } public virtual VillageData VillageData { get; set; } public virtual ICollection<Army> Armies { get; set; } public int TileId { get; set; } public virtual Tile Tile { get; set; } Can anyone point me in the right direction to solve this? A: When I also declared Armies in the Village class nullable, it worked. public virtual ICollection<Army>? Armies { get; set; } Though it has happened before with EF core that I need to delete migrations, the DB, and save every file 3 times over, then repeat the same thing twice, and it suddenly works. So I'm not sure it was actually making it nullable that made it work. In OnModelCreating I only kept: foreach (var relationship in modelBuilder.Model.GetEntityTypes().SelectMany(e => e.GetForeignKeys())) { relationship.DeleteBehavior = DeleteBehavior.Restrict; }
Entity Framework core, foreign key constraint due to cascade delete
Using Entity Framework Core with a code-first approach and ASP.NET MVC. Building a mini-clone of a game I've played as practice. A player has villages, a village has resource fields, and armies that can move between villages. They can attack, and whatnot. Just started working on the army aspect of it and I'm getting the following error when running dotnet ef database update (migrations add works fine): Introducing FOREIGN KEY constraint 'FK_Army_Villages_VillageId' on table 'Army' may cause cycles or multiple cascade paths. Specify ON DELETE NO ACTION or ON UPDATE NO ACTION, or modify other FOREIGN KEY constraints. Could not create constraint or index. See previous errors. A somewhat clear message I suppose, and I've found 4-5 topics covering this. But I cannot get any solutions to work. In a situation like this, if I try to delete an object that is related to another object through a foreign key, it will cause a cascade of deletions, where the related objects are also deleted. Which I'm guessing is the problem here. A screenshot of the database if anyone is interested: I've tried specifying .OnDelete(DeleteBehavior.NoAction); in OnModelCreating. It looks like this: modelBuilder.Entity<Army>() .HasOne(t => t.Village) .WithMany(a => a.Armies) .HasForeignKey(t => t.VillageId) .OnDelete(DeleteBehavior.NoAction); But that does absolutely nothing. You used to be able to add the following to OnModelCreating: builder.Conventions.Remove<OneToManyCascadeDeleteConvention>(); That does not work in EF Core, but supposedly this is the new way to do it: foreach (var relationship in modelBuilder.Model.GetEntityTypes().SelectMany(e => e.GetForeignKeys())) { relationship.DeleteBehavior = DeleteBehavior.Restrict; } But this results in the same error for me. Some threads suggest the problem has to do with properties not being nullable. Thus I tried setting the props inside Army to this: public int? VillageId { get; set; } public Village? Village { get; set; } The army class looks like this: public Army() { } public int ArmyId { get; set; } public Tile Location { get; set; } public ArmyStatus ArmyStatus { get; set; } public List<Troop> Troops { get; set; } = new List<Troop>(); // Navigation props public int? VillageId { get; set; } public Village? Village { get; set; } and Village like this: public Village() { } [Key] public int VillageId { get; set; } public string Name { get; set; } // Navigation props public virtual ICollection<ResourceField> ResourceFields { get; set; } public virtual Player Player { get; set; } public int PlayerId { get; set; } public virtual VillageData VillageData { get; set; } public virtual ICollection<Army> Armies { get; set; } public int TileId { get; set; } public virtual Tile Tile { get; set; } Can anyone point me in the right direction to solve this?
[ "When I also declared Armies in the Village class nullable, it worked.\npublic virtual ICollection<Army>? Armies { get; set; }\n\nThough it has happened before with EF core that I need to delete migrations, the DB, and save every file 3 times over, then repeat the same thing twice, and it suddenly works. So I'm not sure it was actually making it nullable that made it work.\nIn OnModelCreating I only kept:\nforeach (var relationship in modelBuilder.Model.GetEntityTypes().SelectMany(e => e.GetForeignKeys()))\n {\n relationship.DeleteBehavior = DeleteBehavior.Restrict;\n } \n\n" ]
[ 0 ]
[]
[]
[ "asp.net_mvc", "c#", "entity_framework_core" ]
stackoverflow_0074679797_asp.net_mvc_c#_entity_framework_core.txt
Q: Im a python beginner, is there any way i can repeat this simple calculator code infinity times? x=int(input("please type in any number: ")) y=input("please type operation: +,-,*,/: ") z=int(input("please type in your 2nd number: ")) if(y=="+"): print("your answer is: ", x+z) print("thanks for using this calculator!") print("goodbye") elif(y=="-"): print("your answer is: ", x-z) print("thanks for using this calculator!") print("goodbye") elif(y=="*"): print("your answer is: ", x*z) print("thanks for using this calculator!") print("goodbye") elif(y=="/"): print("your answer is: ", x/z) print("thanks for using this calculator!") print("goodbye") Nothing yet, i dont know what do do, people keep saying online that something like While true() Restart Idk it was smth like that A: Start the loop while True: x=int(input("please type in any number: ")) y=input("please type operation: +,-,*,/: ") z=int(input("please type in your 2nd number: ")) if(y=="+"): print("your answer is: ", x+z) print("thanks for using this calculator!") print("goodbye") elif(y=="-"): print("your answer is: ", x-z) print("thanks for using this calculator!") print("goodbye") elif(y=="*"): print("your answer is: ", x*z) print("thanks for using this calculator!") print("goodbye") elif(y=="/"): print("your answer is: ", x/z) print("thanks for using this calculator!") print("goodbye")
Im a python beginner, is there any way i can repeat this simple calculator code infinity times?
x=int(input("please type in any number: ")) y=input("please type operation: +,-,*,/: ") z=int(input("please type in your 2nd number: ")) if(y=="+"): print("your answer is: ", x+z) print("thanks for using this calculator!") print("goodbye") elif(y=="-"): print("your answer is: ", x-z) print("thanks for using this calculator!") print("goodbye") elif(y=="*"): print("your answer is: ", x*z) print("thanks for using this calculator!") print("goodbye") elif(y=="/"): print("your answer is: ", x/z) print("thanks for using this calculator!") print("goodbye") Nothing yet, i dont know what do do, people keep saying online that something like While true() Restart Idk it was smth like that
[ "Start the loop\nwhile True:\n x=int(input(\"please type in any number: \"))\n y=input(\"please type operation: +,-,*,/: \")\n z=int(input(\"please type in your 2nd number: \"))\n if(y==\"+\"):\n print(\"your answer is: \", x+z)\n print(\"thanks for using this calculator!\")\n print(\"goodbye\")\n elif(y==\"-\"):\n print(\"your answer is: \", x-z)\n print(\"thanks for using this calculator!\")\n print(\"goodbye\")\n elif(y==\"*\"):\n print(\"your answer is: \", x*z)\n print(\"thanks for using this calculator!\")\n print(\"goodbye\")\n elif(y==\"/\"):\n print(\"your answer is: \", x/z)\n print(\"thanks for using this calculator!\")\n print(\"goodbye\")\n\n" ]
[ 0 ]
[]
[]
[ "python" ]
stackoverflow_0074679948_python.txt
Q: Working with a python array full of array Ok so I have an array of arrays. I'm currently wondering if I'm better to export all of it in my mysql database and do the sorting once there, or work with the array itself. Here is part of the array: datas = [['Anonymous User-b82a42', 'DYDXUSDT', 'Short', 20, 258.2, 2.332, 2.333, -0.26, -0.8573, '2022-11-16 14:02:28'], ['Anonymous User-b82a42', 'OCEANUSDT', 'Long', 20, 4732.0, 0.13113, 0.13145, 1.51, 4.8688, '2022-11-16 09:04:04'], ['Anonymous User-b82a42', 'CHZUSDT', 'Short', 20, 2684.0, 0.22187, 0.22637, -12.08, -39.7579, '2022-11-16 11:10:17'], ['Anonymous User-b82a42', 'DUSKUSDT', 'Long', 20, 6636.0, 0.09043, 0.09007, -2.38, -7.9724, '2022-11-16 12:40:17'], ['Anonymous User-b82a42', 'CTSIUSDT', 'Long', 20, 5614.0, 0.1062, 0.1058, -2.22, -7.4594, '2022-11-16 13:47:25']...] Here is the things I need to do: For the same symbol data[1], get the biggest leverage data[3] and remove/don't save the others If 2 symbol data[1] have the same direction data[2], but not the same leverage data[3], keep only the biggest If 2 symbol data[1] have the opposite direction data[2] but same leverage data[3], delete/skip/don't save both The thing I face is it seems a lot to processes from the array itself. And the case I have multiple same symbol data[1], if I do a for each trade loop, I will maybe delete trades that are valid compare to others but in this loop with this trade it's not. What should I do? I understand sorted() can be used but I can't find the way to achieve the things I need to do and I wonder if I better save all to mysql and use sql query to achieve it. A: I solved this storing in 2 database tables and comparing them after.
Working with a python array full of array
Ok so I have an array of arrays. I'm currently wondering if I'm better to export all of it in my mysql database and do the sorting once there, or work with the array itself. Here is part of the array: datas = [['Anonymous User-b82a42', 'DYDXUSDT', 'Short', 20, 258.2, 2.332, 2.333, -0.26, -0.8573, '2022-11-16 14:02:28'], ['Anonymous User-b82a42', 'OCEANUSDT', 'Long', 20, 4732.0, 0.13113, 0.13145, 1.51, 4.8688, '2022-11-16 09:04:04'], ['Anonymous User-b82a42', 'CHZUSDT', 'Short', 20, 2684.0, 0.22187, 0.22637, -12.08, -39.7579, '2022-11-16 11:10:17'], ['Anonymous User-b82a42', 'DUSKUSDT', 'Long', 20, 6636.0, 0.09043, 0.09007, -2.38, -7.9724, '2022-11-16 12:40:17'], ['Anonymous User-b82a42', 'CTSIUSDT', 'Long', 20, 5614.0, 0.1062, 0.1058, -2.22, -7.4594, '2022-11-16 13:47:25']...] Here is the things I need to do: For the same symbol data[1], get the biggest leverage data[3] and remove/don't save the others If 2 symbol data[1] have the same direction data[2], but not the same leverage data[3], keep only the biggest If 2 symbol data[1] have the opposite direction data[2] but same leverage data[3], delete/skip/don't save both The thing I face is it seems a lot to processes from the array itself. And the case I have multiple same symbol data[1], if I do a for each trade loop, I will maybe delete trades that are valid compare to others but in this loop with this trade it's not. What should I do? I understand sorted() can be used but I can't find the way to achieve the things I need to do and I wonder if I better save all to mysql and use sql query to achieve it.
[ "I solved this storing in 2 database tables and comparing them after.\n" ]
[ 0 ]
[]
[]
[ "arrays", "mysql", "python", "sorting" ]
stackoverflow_0074465997_arrays_mysql_python_sorting.txt
Q: I was building iOS and Android build fine, but now when I try to build Android it does not work? (Unity 2021.3.6f1) I added the apple App Tracking Transparency(ATT) a month ago and everything was working fine. I get the pop that asks users if it should track or not. I made sure it only works for iOS and does not work for android. Everything was fine but now when I try to build an android build it gives me these 5 errors: Assets/Scripts/Services/AdMobController.cs(60,9): error CS0234: The type or namespace name 'ATTrackingStatusBinding' does not exist in the namespace 'Unity.Advertisement.IosSupport' (are you missing an assembly reference?) Assets/Samples/iOS 14 Advertising Support/1.0.0/01 Context Screen/Scripts/ContextScreenView.cs(20,29): warning CS0067: The event 'ContextScreenView.sentTrackingAuthorizationRequest' is never used Error building Player because scripts had compiler errors Build completed with a result of 'Failed' in 4 seconds (4237 ms) UnityEngine.GUIUtility:ProcessEvent (int,intptr,bool&) (at /Users/bokken/buildslave/unity/build/Modules/IMGUI/GUIUtility.cs:189) UnityEditor.BuildPlayerWindow+BuildMethodException: 2 errors at UnityEditor.BuildPlayerWindow+DefaultBuildMethods.BuildPlayer (UnityEditor.BuildPlayerOptions options) [0x002ce] in /Users/bokken/buildslave/unity/build/Editor/Mono/BuildPlayerWindowBuildMethods.cs:193 at UnityEditor.BuildPlayerWindow.CallBuildMethods (System.Boolean askForBuildLocation, UnityEditor.BuildOptions defaultBuildOptions) [0x00080] in Everything in my project was fine and was building fine. I haven't touched my project or updated the Unity Editor Version. I don't know how to solve it. It says I have complier error but it was working fine before. What is the cause of this and when I go to my Admob account I get this error: Some apps haven't been configured to use Apple's SKAdNetwork To ensure you're getting credit for all ads activity, like app installs, be sure to configure SKAdNetwork with Google's recommended network IDs. How can I fix this, please help me out. Like I said everything was working fine but suddenly I got these errors. I even when to Unity Preference --> External Tools --> unchecked and checked again the android jdk, ndk, grate, etc. A: Disable ATT code when building for Android.
I was building iOS and Android build fine, but now when I try to build Android it does not work? (Unity 2021.3.6f1)
I added the apple App Tracking Transparency(ATT) a month ago and everything was working fine. I get the pop that asks users if it should track or not. I made sure it only works for iOS and does not work for android. Everything was fine but now when I try to build an android build it gives me these 5 errors: Assets/Scripts/Services/AdMobController.cs(60,9): error CS0234: The type or namespace name 'ATTrackingStatusBinding' does not exist in the namespace 'Unity.Advertisement.IosSupport' (are you missing an assembly reference?) Assets/Samples/iOS 14 Advertising Support/1.0.0/01 Context Screen/Scripts/ContextScreenView.cs(20,29): warning CS0067: The event 'ContextScreenView.sentTrackingAuthorizationRequest' is never used Error building Player because scripts had compiler errors Build completed with a result of 'Failed' in 4 seconds (4237 ms) UnityEngine.GUIUtility:ProcessEvent (int,intptr,bool&) (at /Users/bokken/buildslave/unity/build/Modules/IMGUI/GUIUtility.cs:189) UnityEditor.BuildPlayerWindow+BuildMethodException: 2 errors at UnityEditor.BuildPlayerWindow+DefaultBuildMethods.BuildPlayer (UnityEditor.BuildPlayerOptions options) [0x002ce] in /Users/bokken/buildslave/unity/build/Editor/Mono/BuildPlayerWindowBuildMethods.cs:193 at UnityEditor.BuildPlayerWindow.CallBuildMethods (System.Boolean askForBuildLocation, UnityEditor.BuildOptions defaultBuildOptions) [0x00080] in Everything in my project was fine and was building fine. I haven't touched my project or updated the Unity Editor Version. I don't know how to solve it. It says I have complier error but it was working fine before. What is the cause of this and when I go to my Admob account I get this error: Some apps haven't been configured to use Apple's SKAdNetwork To ensure you're getting credit for all ads activity, like app installs, be sure to configure SKAdNetwork with Google's recommended network IDs. How can I fix this, please help me out. Like I said everything was working fine but suddenly I got these errors. I even when to Unity Preference --> External Tools --> unchecked and checked again the android jdk, ndk, grate, etc.
[ "Disable ATT code when building for Android.\n" ]
[ 0 ]
[]
[]
[ "admob", "android", "ios", "unity3d" ]
stackoverflow_0074679869_admob_android_ios_unity3d.txt
Q: Update Old Angular codebase to latest (v15) I am looking for the least painful way to update an angular application from v7 to v15. I have started out on it, its a pretty large codebase with loads of packages and implementations that are deprecated. Is there an easier way to go about this? Thank you for your answers! I used ng update @angular/cli also did for the core angular modules. Its just really a lot and I wanted to find out if there was an easier way around it. A: The best way of updateing such big steps is by made it to little simpler steps one by one. I always use the angular update guide to get all the recommended and needed changes: https://update.angular.io/ If you use this guide and update major version by major version it should be easier to you.
Update Old Angular codebase to latest (v15)
I am looking for the least painful way to update an angular application from v7 to v15. I have started out on it, its a pretty large codebase with loads of packages and implementations that are deprecated. Is there an easier way to go about this? Thank you for your answers! I used ng update @angular/cli also did for the core angular modules. Its just really a lot and I wanted to find out if there was an easier way around it.
[ "The best way of updateing such big steps is by made it to little simpler steps one by one.\nI always use the angular update guide to get all the recommended and needed changes: https://update.angular.io/\nIf you use this guide and update major version by major version it should be easier to you.\n" ]
[ 0 ]
[ "1.. we learned how to get our Todo application up and running and deploy it to GitHub pages. This worked just fine but, unfortunately, the whole app was crammed into a single component.\n2.. we examined a more modular component architecture and learned how to break this single component into a structured tree of smaller components that are easier to understand, reuse and maintain.\n3.. we updated our application to communicate with a REST API backend using RxJS and Angular’s HTTP service.\n4.., we introduced Angular Router and learned how the router updates our application when the browser URL changes and how we can use the router to resolve data from our backend API.\n5.., we added authentication to our application and learned how we can protect sections from our application from unauthorized access.\n" ]
[ -1 ]
[ "angular", "angular_cli", "javascript", "typescript" ]
stackoverflow_0074673710_angular_angular_cli_javascript_typescript.txt
Q: Quit button assistance needed I'm trying to make a code for this topic i'm doing and I've manage to get some of it done but when it comes to quiting my tkinter menu it doesn't close unless I manually close it, I've got the the button for the option to close it but it doesn't work. Can anyone help with my problem. Here's my code below. import sys import tkinter from tkinter import* import time global v global popJ popJ = 0 def genInput(): #Allows the user to input the data gen = Toplevel() gen.wm_title("Data Input") v = IntVar() ent1 = Entry(gen, textvariable = v).pack() ent1Txt = Label(gen, text = 'Please input Juvenile Populations') ent1Txt.pack() v2 = StringVar() ent2 = Entry(gen, textvariable = v2) ent2Txt = Label(gen, text = 'Please input Adult Populations') ent2.pack() ent2Txt.pack() v3 = StringVar() ent3 = Entry(gen, textvariable = v3) ent3Txt = Label(gen, text = 'Please input Senile Populations') ent3.pack() ent3Txt.pack() v4 = StringVar() ent4 = Entry(gen, textvariable = v4) ent4Txt = Label(gen, text = 'Please input Survival rates for Juveniles') ent4.pack() ent4Txt.pack() v5 = StringVar() ent5 = Entry(gen, textvariable = v5) ent5Txt = Label(gen, text = 'Please input Survival rates for Adults') ent5.pack() ent5Txt.pack() v6 = StringVar() ent6 = Entry(gen, textvariable = v6) ent6Txt = Label(gen, text = 'Please input Survival rates for Seniles') ent6.pack() ent6Txt.pack() v7 = StringVar() ent7 = Entry(gen, textvariable = v7) ent7Txt = Label(gen, text = 'Please input the birth rate') ent7.pack() ent7Txt.pack() v8 = StringVar() ent8 = Entry(gen, textvariable = v8) ent8Txt = Label(gen, text = 'Number of Generations') ent8.pack() ent8Txt.pack() def quit1(): # Needs to be here or it breaks the program gen.destroy() return def submit(): global popJ popJ = v.get() popJtxt = Label(gen, text= v.get()).pack() return submit1= Button(gen, text="Submit") submit1.pack() submit1.configure(command = submit) return1 = Button(gen, text = 'Return to Menu') return1.pack(pady=30) return1.configure(command = quit1) return def genView(): # should display the data disp = Toplevel() disp.wm_title('Displaying data Values') popJuvenilesTxt = Label (disp, text = popJ) popJuvenilesTxt.grid(row =1, column = 1) def menu(): # creates the gui menu menu = Tk() menu.wm_title("Greenfly model") genInp = Button(menu,text = "Set Generation Values") genVew = Button(menu,text = 'Dysplay Generation Values') modelCal = Button(menu,text = 'Run model') exportData = Button(menu,text = 'Export Data') quitProgram = Button(menu,text = 'Quit') genTxt = Label(menu, text= 'Input the Generation values') genvTxt = Label (menu, text = 'View the current generation values') modelTxt = Label (menu, text = 'Run the model') exportTxt = Label (menu, text = 'Export data') quitTxt = Label (menu, text= 'Exit the program') genInp.grid(row=1, column=1) genVew.grid(row=2, column=1) modelCal.grid(row=3, column=1) exportData.grid(row=4 , column=1) quitProgram.grid(row=5, column=1) genTxt.grid(row=1, column = 2) genvTxt.grid(row=2, column = 2) modelTxt.grid(row=3, column = 2) exportTxt.grid(row=4, column = 2) quitTxt.grid(row=5, column = 2) genInp.configure(command = genInput) genVew.configure(command = genView) menu.mainloop() menu() A: For Tkinter you can just pass gen.quit to the command of a button widget, like so: close = Button(gen, text = 'Close', command = gen.quit).pack() A: You can use sys.exit() to close the program. close(gen, text="Close", command = lambda: sys.exit()).pack()
Quit button assistance needed
I'm trying to make a code for this topic i'm doing and I've manage to get some of it done but when it comes to quiting my tkinter menu it doesn't close unless I manually close it, I've got the the button for the option to close it but it doesn't work. Can anyone help with my problem. Here's my code below. import sys import tkinter from tkinter import* import time global v global popJ popJ = 0 def genInput(): #Allows the user to input the data gen = Toplevel() gen.wm_title("Data Input") v = IntVar() ent1 = Entry(gen, textvariable = v).pack() ent1Txt = Label(gen, text = 'Please input Juvenile Populations') ent1Txt.pack() v2 = StringVar() ent2 = Entry(gen, textvariable = v2) ent2Txt = Label(gen, text = 'Please input Adult Populations') ent2.pack() ent2Txt.pack() v3 = StringVar() ent3 = Entry(gen, textvariable = v3) ent3Txt = Label(gen, text = 'Please input Senile Populations') ent3.pack() ent3Txt.pack() v4 = StringVar() ent4 = Entry(gen, textvariable = v4) ent4Txt = Label(gen, text = 'Please input Survival rates for Juveniles') ent4.pack() ent4Txt.pack() v5 = StringVar() ent5 = Entry(gen, textvariable = v5) ent5Txt = Label(gen, text = 'Please input Survival rates for Adults') ent5.pack() ent5Txt.pack() v6 = StringVar() ent6 = Entry(gen, textvariable = v6) ent6Txt = Label(gen, text = 'Please input Survival rates for Seniles') ent6.pack() ent6Txt.pack() v7 = StringVar() ent7 = Entry(gen, textvariable = v7) ent7Txt = Label(gen, text = 'Please input the birth rate') ent7.pack() ent7Txt.pack() v8 = StringVar() ent8 = Entry(gen, textvariable = v8) ent8Txt = Label(gen, text = 'Number of Generations') ent8.pack() ent8Txt.pack() def quit1(): # Needs to be here or it breaks the program gen.destroy() return def submit(): global popJ popJ = v.get() popJtxt = Label(gen, text= v.get()).pack() return submit1= Button(gen, text="Submit") submit1.pack() submit1.configure(command = submit) return1 = Button(gen, text = 'Return to Menu') return1.pack(pady=30) return1.configure(command = quit1) return def genView(): # should display the data disp = Toplevel() disp.wm_title('Displaying data Values') popJuvenilesTxt = Label (disp, text = popJ) popJuvenilesTxt.grid(row =1, column = 1) def menu(): # creates the gui menu menu = Tk() menu.wm_title("Greenfly model") genInp = Button(menu,text = "Set Generation Values") genVew = Button(menu,text = 'Dysplay Generation Values') modelCal = Button(menu,text = 'Run model') exportData = Button(menu,text = 'Export Data') quitProgram = Button(menu,text = 'Quit') genTxt = Label(menu, text= 'Input the Generation values') genvTxt = Label (menu, text = 'View the current generation values') modelTxt = Label (menu, text = 'Run the model') exportTxt = Label (menu, text = 'Export data') quitTxt = Label (menu, text= 'Exit the program') genInp.grid(row=1, column=1) genVew.grid(row=2, column=1) modelCal.grid(row=3, column=1) exportData.grid(row=4 , column=1) quitProgram.grid(row=5, column=1) genTxt.grid(row=1, column = 2) genvTxt.grid(row=2, column = 2) modelTxt.grid(row=3, column = 2) exportTxt.grid(row=4, column = 2) quitTxt.grid(row=5, column = 2) genInp.configure(command = genInput) genVew.configure(command = genView) menu.mainloop() menu()
[ "For Tkinter you can just pass gen.quit to the command of a button widget, like so:\nclose = Button(gen, text = 'Close', command = gen.quit).pack()\n\n", "You can use sys.exit() to close the program.\nclose(gen, text=\"Close\", command = lambda: sys.exit()).pack()\n\n" ]
[ 0, 0 ]
[]
[]
[ "button", "python", "tkinter" ]
stackoverflow_0039767084_button_python_tkinter.txt
Q: Getting FormatException response from my flutter api response I have a button which when clicked, prints out response. This is how the response is { "status": "success", "user": "Worked well" } when I test it with postman it works fine, but when I try it from my flutter project, I get this error I/flutter ( 5147): Response: - Instance of 'Response' I/flutter ( 5147): FormatException: Unexpected character (at character 1) I/flutter ( 5147): <!DOCTYPE html> I/flutter ( 5147): ^ This is my flutter code: http.Response response = await http.post( Uri.parse(url + 'testMe.php'), headers: headers, body: body, ); print('response ${response}'); if (response.body.isNotEmpty) { json.decode(json.encode(response.body)); } else { print('Response is empty...'); } One thing I noticed is that, sometimes it does not throw the error above in flutter, it works fine and sometimes it throws the error, so I don't why it happen that way. Flu Postman Header A: This worked for me: Map<String, String> headers = { 'Content-Type': 'application/json', 'Charset': 'utf-8', }; A: Your api return you a html instead of json, you can do this to avoid getting FormatException: http.Response response = await http.post( Uri.parse(url + 'testMe.php'), headers: headers, body: body, ); print('response ${response}'); if (response.statusCode == 200) { json.decode(response.body); } else { print('Response is empty...'); } usually when statuscode is 500 or 404 this happened, when you check for status code 200, you can avoid getting this FormatException. Also you don't need to encode the response and decode it again, your response is already encoded in server side, just decode it.
Getting FormatException response from my flutter api response
I have a button which when clicked, prints out response. This is how the response is { "status": "success", "user": "Worked well" } when I test it with postman it works fine, but when I try it from my flutter project, I get this error I/flutter ( 5147): Response: - Instance of 'Response' I/flutter ( 5147): FormatException: Unexpected character (at character 1) I/flutter ( 5147): <!DOCTYPE html> I/flutter ( 5147): ^ This is my flutter code: http.Response response = await http.post( Uri.parse(url + 'testMe.php'), headers: headers, body: body, ); print('response ${response}'); if (response.body.isNotEmpty) { json.decode(json.encode(response.body)); } else { print('Response is empty...'); } One thing I noticed is that, sometimes it does not throw the error above in flutter, it works fine and sometimes it throws the error, so I don't why it happen that way. Flu Postman Header
[ "This worked for me:\nMap<String, String> headers = {\n 'Content-Type': 'application/json',\n 'Charset': 'utf-8',\n};\n\n", "Your api return you a html instead of json, you can do this to avoid getting FormatException:\nhttp.Response response = await http.post(\n Uri.parse(url + 'testMe.php'),\n headers: headers,\n body: body,\n);\n\nprint('response ${response}');\nif (response.statusCode == 200) {\n json.decode(response.body);\n} else {\n print('Response is empty...');\n}\n\nusually when statuscode is 500 or 404 this happened, when you check for status code 200, you can avoid getting this FormatException.\nAlso you don't need to encode the response and decode it again, your response is already encoded in server side, just decode it.\n" ]
[ 0, 0 ]
[]
[]
[ "flutter" ]
stackoverflow_0074679884_flutter.txt
Q: Purpose of singletons in programming This is admittedly a rather loose question. My current understanding of singletons is that they are a class that you set up in such a way that only one instance is ever created. This sounds a lot like a static class to me. The main difference being that with a static class you don't / can't instance it, you just use it such as Math.pi(). With a singleton class, you would still need to do something like singleton firstSingleton = new singleton(); firstSingleton.set_name("foo"); singleton secondSingleton = new singleton(); Correct me if i am wrong, but firstSingleton == secondSingleton right now, yes? secondSingleston.set_name("bar"); firstSingleton.report_name(); // will output "bar" won't it? Please note, I am asking this language independently, more about the concept. So I am not worried about actually how to code such a class, but more why you would wan't to and what thing you would need to consider. A: The main advantage of a singleton over a class consisting of statics is that you can later easily decide that you need in fact more than one instance, e.g. one per thread. However, in practice the main purpose of singletons is to make people feel less bad about having global variables. A practical example for a good use of a singleton: you have an app that uses an SQL database and you need a connection pool. The purpose of such a pool is to reuse DB connection, so you definitely want all clients to use the same pool. Thus, having it as a singleton is the correct design. But one day you need the app to connect to a second DB server, and realize that you cannot have connections to different servers in the same pool. Thus your "one instance overall" singleton becomes "one instance per DB server". A: why you would wan't to I wouldn't because singletons usually are very bad way to solve your problems. My recommendation to you is to avoid them completely. The main reasons are: Singletons mostly represent global state (which is evil). Correct dependency injection becomes impossible. I suggest you read the rest (including thorough explanations) in this Google employee's blog: http://misko.hevery.com/2008/08/17/singletons-are-pathological-liars/ http://misko.hevery.com/2008/08/21/where-have-all-the-singletons-gone/ http://misko.hevery.com/2008/08/25/root-cause-of-singletons/ http://misko.hevery.com/code-reviewers-guide/flaw-brittle-global-state-singletons/ A: Like others have said: Singletons are global variables by another name. Singletons are usually a bad idea. Singletons could be replaced by "monostate" classes - classes that have apparently normal construction / destruction semantics but all share the same state. Note that in my opinion "static classes" are usually also a bad idea, a hackish workaround for a language that does not allow free functions, or for sharing state between a bunch of functions without wanting to pass that state as a parameter. In my experience nearly all designs with singletons or static classes can be turned into something better, more easily understood and more flexible by getting rid of those constructs. Edit: By request, why most singletons are global variables by another name. In most of the languages I know, most singleton classes are accessed through a static member function of that class. The single instance is available to all code that has access to the definition of the singleton class. This is a global variable - all code that includes the class could be making modifications to the single instance of your singleton. If you do not use the static member function (or some static factory method which has the same implications), but instead pass the singleton object to all clients that need it, then you would have no need for the singleton pattern, just pass the same object to all clients. A: Singletons are mostly useful when you want an interface to a singleton service, but you don't know until runtime which concrete class will be instantiated. For instance, you might want to declare a central logging service, but only decide at runtime whether to hook in a file logger, stub logger, database logger, or message-queue logger. A: A little knowledge is a dangerous thing and Singletons are dangerous entities. In addition to written things above, I can emphasize the life-time management of Singleton objects are also important. In ACE framework, it is handled successfully. You can find the paper here: http://www.cs.wustl.edu/~schmidt/PDF/ObjMan.pdf Please also note that singletons should be non-copyable classes. This pattern may seem to be the easiest one, but, on the contrary it is one of the difficult. Therefore, I ask to candidates about this evil points in Singletons. A: Not all languages have "static classes" (for example C++ doesn't have them). Again with the C++ example, adding static variables to a class is a pain because you need to put them in both the header and the .cpp file, so a singleton in that case is very useful. Every language is different. I guess in C# they are not very useful (and in fact, from what I know, they are not used very often) A: Singleton is a very useful replacement of global variables, used all across the code. Singletons are usually not "new"ed or "delete"d, they tend to be initialized on first use and deleted along with program scope Singletons perfectly match for wrapping logging, configuration and other hardware-interfacing classes. A: In addition to the other answers I'd have to say that Singletons can help you when you want a static class, but can't have it, because due to the design of your application it will be inheriting an instantiable class. A: There's two ways to use singletons. The way they should be used. Typically with immutable variables (C#'s String.Empty, classes in Smalltalk, etc.). This is approximately 1% of singleton usage. As a replacement for global variables. This is bad. The root cause of this is people that want to share common objects without understanding how to properly use a Builder. Use of Singletons in this fashion is typically a sign of a lack of deep understanding of object-oriented design. A: It is pretty much another word for "Global Variables", which has its pros and cons. However, the only thing that would make the Singleton worthy your time is it assures some sort of "maintainability" in the future for your code in case you decided that there is a actually a need for more than one "instance" of that class.
Purpose of singletons in programming
This is admittedly a rather loose question. My current understanding of singletons is that they are a class that you set up in such a way that only one instance is ever created. This sounds a lot like a static class to me. The main difference being that with a static class you don't / can't instance it, you just use it such as Math.pi(). With a singleton class, you would still need to do something like singleton firstSingleton = new singleton(); firstSingleton.set_name("foo"); singleton secondSingleton = new singleton(); Correct me if i am wrong, but firstSingleton == secondSingleton right now, yes? secondSingleston.set_name("bar"); firstSingleton.report_name(); // will output "bar" won't it? Please note, I am asking this language independently, more about the concept. So I am not worried about actually how to code such a class, but more why you would wan't to and what thing you would need to consider.
[ "The main advantage of a singleton over a class consisting of statics is that you can later easily decide that you need in fact more than one instance, e.g. one per thread.\nHowever, in practice the main purpose of singletons is to make people feel less bad about having global variables.\nA practical example for a good use of a singleton: you have an app that uses an SQL database and you need a connection pool. The purpose of such a pool is to reuse DB connection, so you definitely want all clients to use the same pool. Thus, having it as a singleton is the correct design. But one day you need the app to connect to a second DB server, and realize that you cannot have connections to different servers in the same pool. Thus your \"one instance overall\" singleton becomes \"one instance per DB server\".\n", "\nwhy you would wan't to\n\nI wouldn't because singletons usually are very bad way to solve your problems. My recommendation to you is to avoid them completely.\nThe main reasons are:\n\nSingletons mostly represent global state (which is evil).\nCorrect dependency injection becomes impossible.\n\nI suggest you read the rest (including thorough explanations) in this Google employee's blog:\n\nhttp://misko.hevery.com/2008/08/17/singletons-are-pathological-liars/\nhttp://misko.hevery.com/2008/08/21/where-have-all-the-singletons-gone/\nhttp://misko.hevery.com/2008/08/25/root-cause-of-singletons/\nhttp://misko.hevery.com/code-reviewers-guide/flaw-brittle-global-state-singletons/\n\n", "Like others have said:\n\nSingletons are global variables by another name.\nSingletons are usually a bad idea.\nSingletons could be replaced by \"monostate\" classes - classes that have apparently normal construction / destruction semantics but all share the same state.\n\nNote that in my opinion \"static classes\" are usually also a bad idea, a hackish workaround for a language that does not allow free functions, or for sharing state between a bunch of functions without wanting to pass that state as a parameter.\nIn my experience nearly all designs with singletons or static classes can be turned into something better, more easily understood and more flexible by getting rid of those constructs. \nEdit: By request, why most singletons are global variables by another name.\nIn most of the languages I know, most singleton classes are accessed through a static member function of that class. The single instance is available to all code that has access to the definition of the singleton class. This is a global variable - all code that includes the class could be making modifications to the single instance of your singleton.\nIf you do not use the static member function (or some static factory method which has the same implications), but instead pass the singleton object to all clients that need it, then you would have no need for the singleton pattern, just pass the same object to all clients.\n", "Singletons are mostly useful when you want an interface to a singleton service, but you don't know until runtime which concrete class will be instantiated.\nFor instance, you might want to declare a central logging service, but only decide at runtime whether to hook in a file logger, stub logger, database logger, or message-queue logger.\n", "A little knowledge is a dangerous thing and Singletons are dangerous entities. In addition to written things above, I can emphasize the life-time management of Singleton objects are also important. In ACE framework, it is handled successfully. You can find the paper here: http://www.cs.wustl.edu/~schmidt/PDF/ObjMan.pdf\nPlease also note that singletons should be non-copyable classes. This pattern may seem to be the easiest one, but, on the contrary it is one of the difficult. Therefore, I ask to candidates about this evil points in Singletons. \n", "Not all languages have \"static classes\" (for example C++ doesn't have them).\nAgain with the C++ example, adding static variables to a class is a pain because you need to put them in both the header and the .cpp file, so a singleton in that case is very useful.\nEvery language is different. I guess in C# they are not very useful (and in fact, from what I know, they are not used very often)\n", "\nSingleton is a very useful replacement of global variables, used all across the code.\nSingletons are usually not \"new\"ed or \"delete\"d, they tend to be initialized on first use and deleted along with program scope\nSingletons perfectly match for wrapping logging, configuration and other hardware-interfacing classes.\n\n", "In addition to the other answers I'd have to say that Singletons can help you when you want a static class, but can't have it, because due to the design of your application it will be inheriting an instantiable class. \n", "There's two ways to use singletons.\n\nThe way they should be used. Typically with immutable variables (C#'s String.Empty, classes in Smalltalk, etc.). This is approximately 1% of singleton usage.\nAs a replacement for global variables. This is bad. The root cause of this is people that want to share common objects without understanding how to properly use a Builder. Use of Singletons in this fashion is typically a sign of a lack of deep understanding of object-oriented design.\n\n", "It is pretty much another word for \"Global Variables\", which has its pros and cons. However, the only thing that would make the Singleton worthy your time is it assures some sort of \"maintainability\" in the future for your code in case you decided that there is a actually a need for more than one \"instance\" of that class.\n" ]
[ 57, 14, 4, 2, 2, 0, 0, 0, 0, 0 ]
[]
[]
[ "language_agnostic", "singleton", "theory" ]
stackoverflow_0002551112_language_agnostic_singleton_theory.txt
Q: Is there any feasible solution to read WOT battle results .dat files? I am new here to try to solve one of my interesting questions in World of Tanks. I heard that every battle data is reserved in the client's disk in the Wargaming.net folder because I want to make a batch of data analysis for our clan's battle performances. image It is said that these .dat files are a kind of json files, so I tried to use a couple of lines of Python code to read but failed. import json f = open('ex.dat', 'r', encoding='unicode_escape') content = f.read() a = json.loads(content) print(type(a)) print(a) f.close() The code is very simple and obviously fails to make it. Well, could anyone tell me the truth about that? Added on Feb. 9th, 2022 After I tried another set of codes via Jupyter Notebook, it seems like something can be shown from the .dat files import struct import numpy as np import matplotlib.pyplot as plt import io with open('C:/Users/xukun/Desktop/br/ex.dat', 'rb') as f: fbuff = io.BufferedReader(f) N = len(fbuff.read()) print('byte length: ', N) with open('C:/Users/xukun/Desktop/br/ex.dat', 'rb') as f: data =struct.unpack('b'*N, f.read(1*N)) The result is a set of tuple but I have no idea how to deal with it now. A: Here's how you can parse some parts of it. import pickle import zlib file = '4402905758116487.dat' cache_file = open(file, 'rb') # This can be improved to not keep the file opened. # Converting pickle items from python2 to python3 you need to use the "bytes" encoding or "latin1". legacyBattleResultVersion, brAllDataRaw = pickle.load(cache_file, encoding='bytes', errors='ignore') arenaUniqueID, brAccount, brVehicleRaw, brOtherDataRaw = brAllDataRaw # The data stored inside the pickled file will be a compressed pickle again. vehicle_data = pickle.loads(zlib.decompress(brVehicleRaw), encoding='latin1') account_data = pickle.loads(zlib.decompress(brAccount), encoding='latin1') brCommon, brPlayersInfo, brPlayersVehicle, brPlayersResult = pickle.loads(zlib.decompress(brOtherDataRaw), encoding='latin1') # Lastly you can print all of these and see a lot of data inside. The response contains a mixture of more binary files as well as some data captured from the replays. This is not a complete solution but it's a decent start to parsing these files. A: After loading the pickle files like gabzo mentioned, you will see that it is simply a list of values and without knowing what the value is referring to, its hard to make sense of it. The identifiers for the values can be extracted from your game installation: import zipfile WOT_PKG_PATH = "Your/Game/Path/res/packages/scripts.pkg" BATTLE_RESULTS_PATH = "scripts/common/battle_results/" archive = zipfile.ZipFile(WOT_PKG_PATH, 'r') for file in archive.namelist(): if file.startswith(BATTLE_RESULTS_PATH): archive.extract(file) You can then decompile the python files(uncompyle6) and then go through the code to see the identifiers for the values. One thing to note is that the list of values for the main pickle objects (like brAccount from gabzo's code) always has a checksum as the first value. You can use this to check whether you have the right order and the correct identifiers for the values. The way these checksums are generated can be seen in the decompiled python files. I have been tackling this problem for some time and I have a solution that is works here (albeit in Rust): https://github.com/dacite/wot-battle-results-parser. Run ./wot_datfile_parser_cli --help after downloading the binary for options. Note that you can just run the binary without any options to get a folder of .json files for the .dat files currently in the cache folder A: First you can look at the replay file itself in a text editor. But it won't show the code at the beginning of the file that has to be cleaned out. Then there is a ton of info that you have to read in and figure out but it is the stats for each player in the game. THEN it comes to the part that has to do with the actual replay. You don't need that stuff. You can grab the player IDs and tank IDs from WoT developer area API if you want.
Is there any feasible solution to read WOT battle results .dat files?
I am new here to try to solve one of my interesting questions in World of Tanks. I heard that every battle data is reserved in the client's disk in the Wargaming.net folder because I want to make a batch of data analysis for our clan's battle performances. image It is said that these .dat files are a kind of json files, so I tried to use a couple of lines of Python code to read but failed. import json f = open('ex.dat', 'r', encoding='unicode_escape') content = f.read() a = json.loads(content) print(type(a)) print(a) f.close() The code is very simple and obviously fails to make it. Well, could anyone tell me the truth about that? Added on Feb. 9th, 2022 After I tried another set of codes via Jupyter Notebook, it seems like something can be shown from the .dat files import struct import numpy as np import matplotlib.pyplot as plt import io with open('C:/Users/xukun/Desktop/br/ex.dat', 'rb') as f: fbuff = io.BufferedReader(f) N = len(fbuff.read()) print('byte length: ', N) with open('C:/Users/xukun/Desktop/br/ex.dat', 'rb') as f: data =struct.unpack('b'*N, f.read(1*N)) The result is a set of tuple but I have no idea how to deal with it now.
[ "Here's how you can parse some parts of it.\nimport pickle\nimport zlib\n\nfile = '4402905758116487.dat'\ncache_file = open(file, 'rb') # This can be improved to not keep the file opened.\n\n# Converting pickle items from python2 to python3 you need to use the \"bytes\" encoding or \"latin1\". \nlegacyBattleResultVersion, brAllDataRaw = pickle.load(cache_file, encoding='bytes', errors='ignore')\n\narenaUniqueID, brAccount, brVehicleRaw, brOtherDataRaw = brAllDataRaw\n\n# The data stored inside the pickled file will be a compressed pickle again. \nvehicle_data = pickle.loads(zlib.decompress(brVehicleRaw), encoding='latin1')\naccount_data = pickle.loads(zlib.decompress(brAccount), encoding='latin1')\nbrCommon, brPlayersInfo, brPlayersVehicle, brPlayersResult = pickle.loads(zlib.decompress(brOtherDataRaw), encoding='latin1')\n\n\n# Lastly you can print all of these and see a lot of data inside. \n\nThe response contains a mixture of more binary files as well as some data captured from the replays.\nThis is not a complete solution but it's a decent start to parsing these files.\n", "After loading the pickle files like gabzo mentioned, you will see that it is simply a list of values and without knowing what the value is referring to, its hard to make sense of it. The identifiers for the values can be extracted from your game installation:\nimport zipfile\n\nWOT_PKG_PATH = \"Your/Game/Path/res/packages/scripts.pkg\"\nBATTLE_RESULTS_PATH = \"scripts/common/battle_results/\"\n\narchive = zipfile.ZipFile(WOT_PKG_PATH, 'r')\n\nfor file in archive.namelist():\n if file.startswith(BATTLE_RESULTS_PATH):\n archive.extract(file)\n\nYou can then decompile the python files(uncompyle6) and then go through the code to see the identifiers for the values.\nOne thing to note is that the list of values for the main pickle objects (like brAccount from gabzo's code) always has a checksum as the first value. You can use this to check whether you have the right order and the correct identifiers for the values. The way these checksums are generated can be seen in the decompiled python files.\nI have been tackling this problem for some time and I have a solution that is works here (albeit in Rust): https://github.com/dacite/wot-battle-results-parser.\nRun ./wot_datfile_parser_cli --help after downloading the binary for options. Note that you can just run the binary without any options to get a folder of .json files for the .dat files currently in the cache folder\n", "First you can look at the replay file itself in a text editor. But it won't show the code at the beginning of the file that has to be cleaned out. Then there is a ton of info that you have to read in and figure out but it is the stats for each player in the game. THEN it comes to the part that has to do with the actual replay. You don't need that stuff.\nYou can grab the player IDs and tank IDs from WoT developer area API if you want.\n" ]
[ 0, 0, 0 ]
[]
[]
[ "python" ]
stackoverflow_0071003839_python.txt
Q: How to concatenate dataframes considering column orders I want to combine two dataframes: df1=pd.DataFrame({'A':['a','a',],'B':['b','b']}) df2=pd.DataFrame({'B':['b','b'],'A':['a','a']}) pd.concat([df1,df2],ignore_index=True) result: But I want the output to be like this (I want the same code as SQL's union/union all): A: Another way is to use numpy to stack the two dataframes and then use pd.DataFrame constructor: pd.DataFrame(np.vstack([df1.values,df2.values]), columns = df1.columns) Output: A B 0 a b 1 a b 2 b a 3 b a A: Here is a proposition to do an SQL UNION ALL with pandas by using pandas.concat : list_dfs = [df1, df2] out = ( pd.concat([pd.DataFrame(sub_df.to_numpy()) for sub_df in list_dfs], ignore_index=True) .set_axis(df1.columns, axis=1) ) # Output : print(out) A B 0 a b 1 a b 2 b a 3 b a
How to concatenate dataframes considering column orders
I want to combine two dataframes: df1=pd.DataFrame({'A':['a','a',],'B':['b','b']}) df2=pd.DataFrame({'B':['b','b'],'A':['a','a']}) pd.concat([df1,df2],ignore_index=True) result: But I want the output to be like this (I want the same code as SQL's union/union all):
[ "Another way is to use numpy to stack the two dataframes and then use pd.DataFrame constructor:\npd.DataFrame(np.vstack([df1.values,df2.values]), columns = df1.columns)\n\nOutput:\n A B\n0 a b\n1 a b\n2 b a\n3 b a\n\n", "Here is a proposition to do an SQL UNION ALL with pandas by using pandas.concat :\nlist_dfs = [df1, df2]\n\nout = (\n pd.concat([pd.DataFrame(sub_df.to_numpy()) for sub_df in list_dfs], \n ignore_index=True)\n .set_axis(df1.columns, axis=1)\n )\n\n# Output :\nprint(out)\n\n A B\n0 a b\n1 a b\n2 b a\n3 b a\n\n" ]
[ 1, 0 ]
[]
[]
[ "pandas", "python" ]
stackoverflow_0074677671_pandas_python.txt
Q: What does it really mean real time object detection? So here is the context. I created an script in python, YOLOv4, OpenCV, CUDA and CUDNN, for object detection and object tracking to count the objects in a video. I intend to use it in real time, but what real time really means? The video I'm using is 1min long and 60FPS originally, but the video after processing is 30FPS on average and takes 3mins to finish. So comparing both videos side by side, one is clearly faster. 30FPS is industry standard for movies and stuff. I'm trying to wrap my head around what real time truly means. Imagine I need to use this information for traffic lights management or use this to lift a bridge for a passing boat, it should be done automatically. It's time sensitive or the chaos would be visible. In these cases, what it trully means to be real time? A: First, learn what "real-time" means. Wikipedia: https://en.wikipedia.org/wiki/Real-time_computing Understand the terms "hard" and "soft" real-time. Understand which aspects of your environment are soft and which require hard real-time. Understand the response times that your environment requires. Understand the time scales. This does not involve fuzzy terms like "quick" or "significant" or "accurate". It involves actual quantifiable time spans that depend on your task and its environment, acceptable error rates, ... You did not share any details about your environment. I find it unlikely that you even need 30 fps for any application involving a road intersection. You only need enough frame rate so you don't miss objects of interest, and you have fine enough data to track multiple objects with identity without mistaking them for each other. Example: assume a car moving at 200 km/h. If your camera takes a frame every 1/30 second, the car moves 1.85 meters between frames. How's your motion blur? What's the camera's exposure time? I'd recommend something on the order of a millisecond or better, giving motion blur of 0.05m How's your tracking? Can it deal with objects "jumping" that far between frames? Does it generate object identity information that is usable for matching (association)? A: Real-time refers to the fact that a system is able to process and respond to data as it is received, without any significant delay. In the context of your object detection and tracking script, real-time would mean that the system is able to process and respond to new frames of the video as they are received, without a significant delay. This would allow the system to accurately count the objects in the video in near-real-time as the video is being played. In the case of traffic lights management or lifting a bridge for a passing boat, real-time would mean that the system is able to quickly and accurately process data from sensors and other sources, and use that information to make decisions and take actions in a timely manner. This is important in these scenarios because any significant delay in processing and responding to data could have serious consequences, such as traffic accidents or collisions. Overall, real-time systems are designed to process and respond to data quickly and accurately, in order to support time-sensitive applications and scenarios.
What does it really mean real time object detection?
So here is the context. I created an script in python, YOLOv4, OpenCV, CUDA and CUDNN, for object detection and object tracking to count the objects in a video. I intend to use it in real time, but what real time really means? The video I'm using is 1min long and 60FPS originally, but the video after processing is 30FPS on average and takes 3mins to finish. So comparing both videos side by side, one is clearly faster. 30FPS is industry standard for movies and stuff. I'm trying to wrap my head around what real time truly means. Imagine I need to use this information for traffic lights management or use this to lift a bridge for a passing boat, it should be done automatically. It's time sensitive or the chaos would be visible. In these cases, what it trully means to be real time?
[ "First, learn what \"real-time\" means. Wikipedia: https://en.wikipedia.org/wiki/Real-time_computing\nUnderstand the terms \"hard\" and \"soft\" real-time. Understand which aspects of your environment are soft and which require hard real-time.\nUnderstand the response times that your environment requires. Understand the time scales.\nThis does not involve fuzzy terms like \"quick\" or \"significant\" or \"accurate\". It involves actual quantifiable time spans that depend on your task and its environment, acceptable error rates, ...\nYou did not share any details about your environment. I find it unlikely that you even need 30 fps for any application involving a road intersection.\nYou only need enough frame rate so you don't miss objects of interest, and you have fine enough data to track multiple objects with identity without mistaking them for each other.\nExample: assume a car moving at 200 km/h. If your camera takes a frame every 1/30 second, the car moves 1.85 meters between frames.\n\nHow's your motion blur? What's the camera's exposure time? I'd recommend something on the order of a millisecond or better, giving motion blur of 0.05m\nHow's your tracking? Can it deal with objects \"jumping\" that far between frames? Does it generate object identity information that is usable for matching (association)?\n\n", "Real-time refers to the fact that a system is able to process and respond to data as it is received, without any significant delay. In the context of your object detection and tracking script, real-time would mean that the system is able to process and respond to new frames of the video as they are received, without a significant delay. This would allow the system to accurately count the objects in the video in near-real-time as the video is being played.\nIn the case of traffic lights management or lifting a bridge for a passing boat, real-time would mean that the system is able to quickly and accurately process data from sensors and other sources, and use that information to make decisions and take actions in a timely manner. This is important in these scenarios because any significant delay in processing and responding to data could have serious consequences, such as traffic accidents or collisions.\nOverall, real-time systems are designed to process and respond to data quickly and accurately, in order to support time-sensitive applications and scenarios.\n" ]
[ 1, 0 ]
[]
[]
[ "computer_vision", "object_detection", "object_tracking", "python", "real_time" ]
stackoverflow_0074677722_computer_vision_object_detection_object_tracking_python_real_time.txt
Q: "A potentially dangerous Request.Form value was detected from the client" when returning JSON to client I have an app that allows a user to edit email templates. The email templates employ HTML so the client sends HTML to the controller. I am using the Jodit HTML editor (https://xdsoft.net/jodit/) for the message body so users don't have to know HTML themselves. I can post the form and the controller accepts the request as the view model is decorated with the appropriate [AllowHtml] attribute; indeed, ModelState.IsValid is true. The error occurs when the data is returned to the client; the controller does not return the json object but instead returns an error. The question is, how do I prevent asp.net from marking this as dangerous on return? Note, this app deals with PII so not validating requests is not an option. Here is the error (Adding the complete error in case someone finds that helpful): Server Error in '/ReallyAwesomeApp' Application. A potentially dangerous Request.Form value was detected from the client (messagetext="<font color="#000000..."). Description: ASP.NET has detected data in the request that is potentially dangerous because it might include HTML markup or script. The data might represent an attempt to compromise the security of your application, such as a cross-site scripting attack. If this type of input is appropriate in your application, you can include code in a web page to explicitly allow it. For more information, see http://go.microsoft.com/fwlink/?LinkID=212874. Exception Details: System.Web.HttpRequestValidationException: A potentially dangerous Request.Form value was detected from the client (messagetext="<font color="#000000..."). Source Error: An unhandled exception was generated during the execution of the current web request. Information regarding the origin and location of the exception can be identified using the exception stack trace below. Stack Trace: [HttpRequestValidationException (0x80004005): A potentially dangerous Request.Form value was detected from the client (messagetext="<font color="#000000...").] System.Web.HttpRequest.ValidateString(String value, String collectionKey, RequestValidationSource requestCollection) +322 System.Web.<>c__DisplayClass280_0.b__0(String key, String value) +18 System.Web.HttpValueCollection.EnsureKeyValidated(String key) +86 System.Web.HttpValueCollection.Get(String name) +17 System.Web.Caching.OutputCacheModule.CreateOutputCachedItemKey(String path, HttpVerb verb, HttpContext context, CachedVary cachedVary) +694 System.Web.Caching.OutputCacheModule.CreateOutputCachedItemKey(HttpContext context, CachedVary cachedVary) +56 System.Web.Caching.OutputCacheModule.OnLeave(Object source, EventArgs eventArgs) +1226 System.Web.SyncEventExecutionStep.System.Web.HttpApplication.IExecutionStep.Execute() +200 System.Web.<>c__DisplayClass285_0.b__0() +24 System.Web.StepInvoker.Invoke(Action executionStep) +100 System.Web.<>c__DisplayClass4_0.b__0() +17 Microsoft.AspNet.TelemetryCorrelation.TelemetryCorrelationHttpModule.OnExecuteRequestStep(HttpContextBase context, Action step) +64 System.Web.<>c__DisplayClass284_0.b__0(Action nextStepAction) +54 System.Web.StepInvoker.Invoke(Action executionStep) +84 System.Web.<>c__DisplayClass4_0.b__0() +17 Microsoft.ApplicationInsights.Web.ApplicationInsightsHttpModule.OnExecuteRequestStep(HttpContextBase context, Action step) in E:\A_work\21\s\WEB\Src\Web\Web.Shared.Net\ApplicationInsightsHttpModule.cs:164 System.Web.<>c__DisplayClass284_0.b__0(Action nextStepAction) +54 System.Web.StepInvoker.Invoke(Action executionStep) +84 System.Web.HttpApplication.ExecuteStepImpl(IExecutionStep step) +100 System.Web.HttpApplication.ExecuteStep(IExecutionStep step, Boolean& completedSynchronously) +73 Version Information: Microsoft .NET Framework Version:4.0.30319; ASP.NET Version:4.8.4075.0 Here is my view model: public class EmailTemplateViewModel { public IEnumerable<SelectListItem> EmailTemplates { get; set; } public List<EmailAttachmentViewModel> EmailAttachments { get; set; } = new List<EmailAttachmentViewModel>(); public string CreateUserIdentifier { get; set; } public int TemplateID { get; set; } [Display(Name = "Template Name")] public string TemplateName { get; set; } [Display(Name = "Email Subject")] public string EmailSubject { get; set; } [AllowHtml] [Display(Name = "Message Text")] public string MessageText { get; set; } } Here's my return model (Found this little gem here on SO): public class JsonReturnModel<T> { public List<ClientError> ClientErrors { get; internal set; } = new List<ClientError>(); public T Data { get; internal set; } public bool LoggedIn { get; internal set; } public string Message { get; set; } public List<ServerError> ServerErrors { get; internal set; } = new List<ServerError>(); public bool Success { get; internal set; } } Here's my Controller method: [HttpPost] [ValidateAntiForgeryToken] public JsonResult EditEmailTemplate(EmailTemplateViewModel model) { RepositoryResult result = new RepositoryResult(); JsonReturnModel<EmailTemplateViewModel> returnModel = new JsonReturnModel<EmailTemplateViewModel>(); if (model == null) { returnModel.Success = false; returnModel.Message = "No data sent to server."; return Json(returnModel); } EmailTemplateModel newModel = null; result = DataManager.UpdateEmailTemplate(model.ToEmailTemplateModel()); if (result.IsSuccessful) { newModel = (EmailTemplateModel)result.ResultingObject; returnModel.Data = newModel.ToEmailTemplateViewModel(); } returnModel.Success = result.IsSuccessful; returnModel.Message = result.Message; return Json(returnModel); } And finally the Ajax that gets called via JQuery: $('#save-btn').on('click', function (event) { var rawHtml = $('#message-editor').html(); $('#MessageText').val(ESCM.joditEditor.value); $.ajax({ type: "post", url: ESCM.EmailPostUrl, data: $("form").serialize(), async: false, success: function (data) { if (data.IsSuccessful || data.Success) { $('#email-template-editor').click(); $('#TemplateID').val(data.Data.TemplateID); $('#TemplateID').change(); } DisplayMessage(data); }, error: function (errorData) { console.debug(errorData); } }); }); A: The issue is that ASP.NET is detecting potentially dangerous HTML in the request and is blocking it. To prevent this, you need to add the ValidateInput attribute to the controller action that is receiving the request. This will tell ASP.NET to allow the HTML in the request and not block it. [HttpPost] [ValidateAntiForgeryToken] [ValidateInput(false)] public JsonResult EditEmailTemplate(EmailTemplateViewModel model) { // ... } This should allow the request to go through without any issues. A: In order to prevent ASP.NET from marking the HTML as potentially dangerous, you need to add the ValidateInput attribute to the controller action method. This attribute tells ASP.NET not to validate the input, which means that the HTML will be accepted without being marked as dangerous. Here is an example of how you can use the ValidateInput attribute: [HttpPost] [ValidateInput(false)] public ActionResult EditEmailTemplate(EmailTemplateViewModel model) { // The model will not be marked as potentially dangerous // and you can use the HTML in the model without any issues. // ... } It's important to note that using the ValidateInput attribute can make your application vulnerable to cross-site scripting (XSS) attacks. Therefore, you should only use this attribute if you are sure that the input is safe and you have taken appropriate measures to prevent XSS attacks. For more information about XSS attacks and how to prevent them, see the following article: https://www.owasp.org/index.php/Cross-site_Scripting_(XSS)
"A potentially dangerous Request.Form value was detected from the client" when returning JSON to client
I have an app that allows a user to edit email templates. The email templates employ HTML so the client sends HTML to the controller. I am using the Jodit HTML editor (https://xdsoft.net/jodit/) for the message body so users don't have to know HTML themselves. I can post the form and the controller accepts the request as the view model is decorated with the appropriate [AllowHtml] attribute; indeed, ModelState.IsValid is true. The error occurs when the data is returned to the client; the controller does not return the json object but instead returns an error. The question is, how do I prevent asp.net from marking this as dangerous on return? Note, this app deals with PII so not validating requests is not an option. Here is the error (Adding the complete error in case someone finds that helpful): Server Error in '/ReallyAwesomeApp' Application. A potentially dangerous Request.Form value was detected from the client (messagetext="<font color="#000000..."). Description: ASP.NET has detected data in the request that is potentially dangerous because it might include HTML markup or script. The data might represent an attempt to compromise the security of your application, such as a cross-site scripting attack. If this type of input is appropriate in your application, you can include code in a web page to explicitly allow it. For more information, see http://go.microsoft.com/fwlink/?LinkID=212874. Exception Details: System.Web.HttpRequestValidationException: A potentially dangerous Request.Form value was detected from the client (messagetext="<font color="#000000..."). Source Error: An unhandled exception was generated during the execution of the current web request. Information regarding the origin and location of the exception can be identified using the exception stack trace below. Stack Trace: [HttpRequestValidationException (0x80004005): A potentially dangerous Request.Form value was detected from the client (messagetext="<font color="#000000...").] System.Web.HttpRequest.ValidateString(String value, String collectionKey, RequestValidationSource requestCollection) +322 System.Web.<>c__DisplayClass280_0.b__0(String key, String value) +18 System.Web.HttpValueCollection.EnsureKeyValidated(String key) +86 System.Web.HttpValueCollection.Get(String name) +17 System.Web.Caching.OutputCacheModule.CreateOutputCachedItemKey(String path, HttpVerb verb, HttpContext context, CachedVary cachedVary) +694 System.Web.Caching.OutputCacheModule.CreateOutputCachedItemKey(HttpContext context, CachedVary cachedVary) +56 System.Web.Caching.OutputCacheModule.OnLeave(Object source, EventArgs eventArgs) +1226 System.Web.SyncEventExecutionStep.System.Web.HttpApplication.IExecutionStep.Execute() +200 System.Web.<>c__DisplayClass285_0.b__0() +24 System.Web.StepInvoker.Invoke(Action executionStep) +100 System.Web.<>c__DisplayClass4_0.b__0() +17 Microsoft.AspNet.TelemetryCorrelation.TelemetryCorrelationHttpModule.OnExecuteRequestStep(HttpContextBase context, Action step) +64 System.Web.<>c__DisplayClass284_0.b__0(Action nextStepAction) +54 System.Web.StepInvoker.Invoke(Action executionStep) +84 System.Web.<>c__DisplayClass4_0.b__0() +17 Microsoft.ApplicationInsights.Web.ApplicationInsightsHttpModule.OnExecuteRequestStep(HttpContextBase context, Action step) in E:\A_work\21\s\WEB\Src\Web\Web.Shared.Net\ApplicationInsightsHttpModule.cs:164 System.Web.<>c__DisplayClass284_0.b__0(Action nextStepAction) +54 System.Web.StepInvoker.Invoke(Action executionStep) +84 System.Web.HttpApplication.ExecuteStepImpl(IExecutionStep step) +100 System.Web.HttpApplication.ExecuteStep(IExecutionStep step, Boolean& completedSynchronously) +73 Version Information: Microsoft .NET Framework Version:4.0.30319; ASP.NET Version:4.8.4075.0 Here is my view model: public class EmailTemplateViewModel { public IEnumerable<SelectListItem> EmailTemplates { get; set; } public List<EmailAttachmentViewModel> EmailAttachments { get; set; } = new List<EmailAttachmentViewModel>(); public string CreateUserIdentifier { get; set; } public int TemplateID { get; set; } [Display(Name = "Template Name")] public string TemplateName { get; set; } [Display(Name = "Email Subject")] public string EmailSubject { get; set; } [AllowHtml] [Display(Name = "Message Text")] public string MessageText { get; set; } } Here's my return model (Found this little gem here on SO): public class JsonReturnModel<T> { public List<ClientError> ClientErrors { get; internal set; } = new List<ClientError>(); public T Data { get; internal set; } public bool LoggedIn { get; internal set; } public string Message { get; set; } public List<ServerError> ServerErrors { get; internal set; } = new List<ServerError>(); public bool Success { get; internal set; } } Here's my Controller method: [HttpPost] [ValidateAntiForgeryToken] public JsonResult EditEmailTemplate(EmailTemplateViewModel model) { RepositoryResult result = new RepositoryResult(); JsonReturnModel<EmailTemplateViewModel> returnModel = new JsonReturnModel<EmailTemplateViewModel>(); if (model == null) { returnModel.Success = false; returnModel.Message = "No data sent to server."; return Json(returnModel); } EmailTemplateModel newModel = null; result = DataManager.UpdateEmailTemplate(model.ToEmailTemplateModel()); if (result.IsSuccessful) { newModel = (EmailTemplateModel)result.ResultingObject; returnModel.Data = newModel.ToEmailTemplateViewModel(); } returnModel.Success = result.IsSuccessful; returnModel.Message = result.Message; return Json(returnModel); } And finally the Ajax that gets called via JQuery: $('#save-btn').on('click', function (event) { var rawHtml = $('#message-editor').html(); $('#MessageText').val(ESCM.joditEditor.value); $.ajax({ type: "post", url: ESCM.EmailPostUrl, data: $("form").serialize(), async: false, success: function (data) { if (data.IsSuccessful || data.Success) { $('#email-template-editor').click(); $('#TemplateID').val(data.Data.TemplateID); $('#TemplateID').change(); } DisplayMessage(data); }, error: function (errorData) { console.debug(errorData); } }); });
[ "The issue is that ASP.NET is detecting potentially dangerous HTML in the request and is blocking it. To prevent this, you need to add the ValidateInput attribute to the controller action that is receiving the request. This will tell ASP.NET to allow the HTML in the request and not block it.\n[HttpPost]\n[ValidateAntiForgeryToken]\n[ValidateInput(false)]\npublic JsonResult EditEmailTemplate(EmailTemplateViewModel model)\n{\n // ...\n}\n\nThis should allow the request to go through without any issues.\n", "In order to prevent ASP.NET from marking the HTML as potentially dangerous, you need to add the ValidateInput attribute to the controller action method. This attribute tells ASP.NET not to validate the input, which means that the HTML will be accepted without being marked as dangerous.\nHere is an example of how you can use the ValidateInput attribute:\n[HttpPost]\n[ValidateInput(false)]\npublic ActionResult EditEmailTemplate(EmailTemplateViewModel model)\n{\n // The model will not be marked as potentially dangerous\n // and you can use the HTML in the model without any issues.\n\n // ...\n}\n\nIt's important to note that using the ValidateInput attribute can make your application vulnerable to cross-site scripting (XSS) attacks. Therefore, you should only use this attribute if you are sure that the input is safe and you have taken appropriate measures to prevent XSS attacks. For more information about XSS attacks and how to prevent them, see the following article: https://www.owasp.org/index.php/Cross-site_Scripting_(XSS)\n" ]
[ 0, 0 ]
[]
[]
[ "asp.net_mvc_5", "c#", "dangerous_request" ]
stackoverflow_0062786295_asp.net_mvc_5_c#_dangerous_request.txt
Q: Mongo Atlas Search - using range operator for an array of documents I am using Mongo Atlas search feature and everything was fine until trying to query for range into a collection / array of documents and that did not worked by any means. After some failed tries I was lucky enough to find a possible answer in the docs. Limitation You cannot use the range operator to query values stored in an array. Atlas Search cannot >index numeric or date values if they are part of an array. My question and hope is that there is some fair workaround, also in respect to performance, because it might be that I have to filter out up to 500k of records and it is not an option to fetch them all on the server side. A: have you looked at the "embeddedField" index field specification?
Mongo Atlas Search - using range operator for an array of documents
I am using Mongo Atlas search feature and everything was fine until trying to query for range into a collection / array of documents and that did not worked by any means. After some failed tries I was lucky enough to find a possible answer in the docs. Limitation You cannot use the range operator to query values stored in an array. Atlas Search cannot >index numeric or date values if they are part of an array. My question and hope is that there is some fair workaround, also in respect to performance, because it might be that I have to filter out up to 500k of records and it is not an option to fetch them all on the server side.
[ "have you looked at the \"embeddedField\" index field specification?\n" ]
[ 0 ]
[]
[]
[ "mongodb", "mongodb_query", "search" ]
stackoverflow_0070967239_mongodb_mongodb_query_search.txt
Q: Android MediaPlayer synchronized start I'm trying to use Android's MediaPlayer to play a backing audio track to a music game which should stay in sync with other events. However, I'm finding that on different devices the backing track seems out of sync. After some frustration, I'm coming to the conclusion that the time it takes to start playing the audio is indeterminate. Is there a way around this? Or some kind of callback from the MediaPlayer that represents "starting to play NOW"? AFAICT I can't use the SoundPool for this backing track as the audio file is too long. A: One option to ensure that the audio stays in sync with other events is to use the MediaPlayer's setOnPreparedListener method, which is called when the MediaPlayer is ready to play the audio. You can use this callback to start playing the audio at the desired time, which should help ensure that it stays in sync with other events. Here is an example of how you could use the setOnPreparedListener method to start playing the audio at the desired time: MediaPlayer mediaPlayer = new MediaPlayer(); mediaPlayer.setOnPreparedListener(new MediaPlayer.OnPreparedListener() { @Override public void onPrepared(MediaPlayer mp) { // Start playing the audio at the desired time mp.start(); } }); mediaPlayer.setDataSource(...); mediaPlayer.prepareAsync(); Another option is to use the MediaPlayer.getCurrentPosition method to periodically check the current position of the audio and adjust the timing of other events accordingly. This can be done using a Timer or Handler to periodically check the current position of the audio and adjust the timing of other events. Here is an example of how you could use the MediaPlayer.getCurrentPosition method to ensure that the audio stays in sync with other events: // Create a Timer or Handler to periodically check the current position of the audio Timer timer = new Timer(); timer.schedule(new TimerTask() { @Override public void run() { // Check the current position of the audio int currentPosition = mediaPlayer.getCurrentPosition(); Copy code // Adjust the timing of other events based on the current position of the audio ... } }, 0, 100); // Check the current position every 100 milliseconds
Android MediaPlayer synchronized start
I'm trying to use Android's MediaPlayer to play a backing audio track to a music game which should stay in sync with other events. However, I'm finding that on different devices the backing track seems out of sync. After some frustration, I'm coming to the conclusion that the time it takes to start playing the audio is indeterminate. Is there a way around this? Or some kind of callback from the MediaPlayer that represents "starting to play NOW"? AFAICT I can't use the SoundPool for this backing track as the audio file is too long.
[ "One option to ensure that the audio stays in sync with other events is to use the MediaPlayer's setOnPreparedListener method, which is called when the MediaPlayer is ready to play the audio. You can use this callback to start playing the audio at the desired time, which should help ensure that it stays in sync with other events.\nHere is an example of how you could use the setOnPreparedListener method to start playing the audio at the desired time:\nMediaPlayer mediaPlayer = new MediaPlayer();\nmediaPlayer.setOnPreparedListener(new MediaPlayer.OnPreparedListener() {\n@Override\npublic void onPrepared(MediaPlayer mp) {\n// Start playing the audio at the desired time\nmp.start();\n}\n});\nmediaPlayer.setDataSource(...);\nmediaPlayer.prepareAsync();\n\nAnother option is to use the MediaPlayer.getCurrentPosition method to periodically check the current position of the audio and adjust the timing of other events accordingly. This can be done using a Timer or Handler to periodically check the current position of the audio and adjust the timing of other events.\nHere is an example of how you could use the MediaPlayer.getCurrentPosition method to ensure that the audio stays in sync with other events:\n// Create a Timer or Handler to periodically check the current position of the audio\nTimer timer = new Timer();\ntimer.schedule(new TimerTask() {\n@Override\npublic void run() {\n// Check the current position of the audio\nint currentPosition = mediaPlayer.getCurrentPosition();\n\nCopy code\n // Adjust the timing of other events based on the current position of the audio\n ...\n}\n}, 0, 100); // Check the current position every 100 milliseconds\n\n" ]
[ 0 ]
[]
[]
[ "android", "android_audio", "android_mediaplayer" ]
stackoverflow_0074679998_android_android_audio_android_mediaplayer.txt
Q: How can I pass a value from a function to any component in React? I want to pass a value which I am receiving in a function like this: const ViewDetails = item => () => { console.log(item); toggleModal(); } I want to pass the item to Modal component like open,onclose which is called in the Main function: return ( <Layout title="Dashboard" className="container-fluid"> {<Modal open={modalStatus} onClose={() => setModalStatus(false)} />} <div className="row"> <div className="col-sm-3"> <UserLinks /> </div> <div className="col-sm-9"> <UserInfo /> {orders ? <PurchaseHistory /> : ""} </div> </div> </Layout> ) I am expecting to have something like this: {<Modal open={modalStatus} onClose={() => setModalStatus(false)} ***item={item}***/>} so that I can use the values inside item in Modal component. A: Consider using context API, it enables you to dispatch the item to your reducer.js file and pull it in your Modal component using StateProvider.js file. A: I would like to add more to @GODWIN GODWIN comment in regards context API, by providing a very simple example along with the React docs about Context hook Generally in practice people tend to wrap providers at App.js, for the sake of simplicity I am going to wrap at index.js file. src/index.jsx import React from 'react'; import ReactDOM from 'react-dom/client'; import App from './App' import { ModalProvider } from './context/ModalContext' ReactDOM.createRoot( document.querySelector('#root') ).render( /** * @dev Note everything inside ModalPrivder has access * to the values provided, such as open, setOpen */ <ModalProvider> <App /> </ModalProvider> ) src/context/ModalContext.jsx import React, {useState, createContext, useContext } from 'react' /** * @dev inside your createContext object you can pass in * default values that will be passed in value at provider */ export const ModalContext = createContext({ open: false }) /** * @dev your provider will enable you to access value all your * children components. NOTE it will not be able to access your * parent components. */ export function ModalProvider(props) { const [open, setOpen] = useState(false) return ( <ModalContext.Provider value={{ open, setOpen }}> {props.children} </ModalContext.Provider> ) } src/components/Modal.jsx import { useContext } from 'react' function Modal(props) { const { open, setOpen } = useContext(ModalContext) return ( <> { open ? (<div> <p>test</p> <>{props.children}</> <button onClick={() => setOpen(false)}>Close Close</button> </div>) : (<button onClick={() => setOpen(true)}>Open Modal</button>) } </> ) } export default Modal src/App.jsx function App(props) { return ( <div className='App'> <h1>Hello React.</h1> <h2>Start editing to see some magic happen!</h2> <Modal> <p> You see content here</p> </Modal> </div> ); } export default App I hope this give you a good direction on how to use React's context hook, please note that this is a very basic source code, to understand how props.children works and context hook. A: You have to take state for this item. When viewDetails function triggered from inside this function you can set this state with this item afte can be pass this state as a props any component
How can I pass a value from a function to any component in React?
I want to pass a value which I am receiving in a function like this: const ViewDetails = item => () => { console.log(item); toggleModal(); } I want to pass the item to Modal component like open,onclose which is called in the Main function: return ( <Layout title="Dashboard" className="container-fluid"> {<Modal open={modalStatus} onClose={() => setModalStatus(false)} />} <div className="row"> <div className="col-sm-3"> <UserLinks /> </div> <div className="col-sm-9"> <UserInfo /> {orders ? <PurchaseHistory /> : ""} </div> </div> </Layout> ) I am expecting to have something like this: {<Modal open={modalStatus} onClose={() => setModalStatus(false)} ***item={item}***/>} so that I can use the values inside item in Modal component.
[ "Consider using context API, it enables you to dispatch the item to your reducer.js file and pull it in your Modal component using StateProvider.js file.\n", "I would like to add more to @GODWIN GODWIN comment in regards context API, by providing a very simple example along with the React docs about Context hook\nGenerally in practice people tend to wrap providers at App.js, for the sake of simplicity I am going to wrap at index.js file.\nsrc/index.jsx\nimport React from 'react';\nimport ReactDOM from 'react-dom/client';\n\nimport App from './App'\nimport { ModalProvider } from './context/ModalContext'\n\nReactDOM.createRoot( \n document.querySelector('#root')\n).render(\n /**\n * @dev Note everything inside ModalPrivder has access\n * to the values provided, such as open, setOpen\n */\n <ModalProvider>\n <App />\n </ModalProvider>\n)\n\nsrc/context/ModalContext.jsx\nimport React, {useState, createContext, useContext } from 'react'\n\n/**\n * @dev inside your createContext object you can pass in\n * default values that will be passed in value at provider\n */\nexport const ModalContext = createContext({\n open: false\n})\n\n/**\n * @dev your provider will enable you to access value all your\n * children components. NOTE it will not be able to access your\n * parent components.\n */\nexport function ModalProvider(props) {\n const [open, setOpen] = useState(false)\n\n return (\n <ModalContext.Provider value={{ open, setOpen }}>\n {props.children}\n </ModalContext.Provider>\n )\n}\n\nsrc/components/Modal.jsx\nimport { useContext } from 'react'\n\nfunction Modal(props) {\n const { open, setOpen } = useContext(ModalContext)\n\n return (\n <>\n { open ? \n (<div>\n <p>test</p>\n <>{props.children}</>\n <button onClick={() => setOpen(false)}>Close Close</button>\n </div>) : \n (<button onClick={() => setOpen(true)}>Open Modal</button>)\n }\n </>\n )\n}\n\nexport default Modal\n\nsrc/App.jsx\nfunction App(props) {\n return (\n <div className='App'>\n <h1>Hello React.</h1>\n <h2>Start editing to see some magic happen!</h2>\n <Modal>\n <p> You see content here</p>\n </Modal>\n </div>\n );\n}\n\nexport default App\n\nI hope this give you a good direction on how to use React's context hook, please note that this is a very basic source code, to understand how props.children works and context hook.\n", "You have to take state for this item. When viewDetails function triggered from inside this function you can set this state with this item afte can be pass this state as a props any component\n" ]
[ 0, 0, -1 ]
[]
[]
[ "react_functional_component", "reactjs" ]
stackoverflow_0074678996_react_functional_component_reactjs.txt
Q: How to use the hardware acceleration for ffmepg on m1-max? Since there aren't m1 builds available from ffmpeg.org, I had to compile my own. Obviously, I'd like to get the best possible performance. Does ffmpeg use the "Hardware-accelerated H.264" on the m1 max? Is there anything I need to do, like compiler flags, to get it? Any switch at run time? How can I verify that it's being used? To compile ffmpeg, I just did the basics: ./configure --prefix=/tmp/ff --enable-gpl --enable-nonfree --enable-libx264 make make install For x264, I just did ./configure --prefix=/tmp/ff make make install to run: ffmpeg -i random.wmv -c:v libx264 -preset ultrafast -c:a aac output-ultra.mp4 Anything else I should be doing? A: It looks like what I wanted was videotoolbox Usage is documented here, basically To use H.264/HEVC hardware encoding in macOS, just use the encoder -c:v h264_videotoolbox Example: ffmpeg -i random.wmv -c:v h264_videotoolbox -c:a aac junk-vt.mp4 Seems to be slightly faster than "ultrafast" with software, and much smaller files.
How to use the hardware acceleration for ffmepg on m1-max?
Since there aren't m1 builds available from ffmpeg.org, I had to compile my own. Obviously, I'd like to get the best possible performance. Does ffmpeg use the "Hardware-accelerated H.264" on the m1 max? Is there anything I need to do, like compiler flags, to get it? Any switch at run time? How can I verify that it's being used? To compile ffmpeg, I just did the basics: ./configure --prefix=/tmp/ff --enable-gpl --enable-nonfree --enable-libx264 make make install For x264, I just did ./configure --prefix=/tmp/ff make make install to run: ffmpeg -i random.wmv -c:v libx264 -preset ultrafast -c:a aac output-ultra.mp4 Anything else I should be doing?
[ "It looks like what I wanted was videotoolbox\nUsage is documented here, basically\nTo use H.264/HEVC hardware encoding in macOS, just use the encoder -c:v h264_videotoolbox\n\nExample:\nffmpeg -i random.wmv -c:v h264_videotoolbox -c:a aac junk-vt.mp4\n\nSeems to be slightly faster than \"ultrafast\" with software, and much smaller files.\n" ]
[ 0 ]
[]
[]
[ "apple_m1", "apple_silicon", "ffmpeg" ]
stackoverflow_0074680025_apple_m1_apple_silicon_ffmpeg.txt
Q: UnicodeDecodeError: 'utf8' codec can't decode byte 0x9c I have a socket server that is supposed to receive UTF-8 valid characters from clients. The problem is some clients (mainly hackers) are sending all the wrong kind of data over it. I can easily distinguish the genuine client, but I am logging to files all the data sent so I can analyze it later. Sometimes I get characters like this œ that cause the UnicodeDecodeError error. I need to be able to make the string UTF-8 with or without those characters. Update: For my particular case the socket service was an MTA and thus I only expect to receive ASCII commands such as: EHLO example.com MAIL FROM: <john.doe@example.com> ... I was logging all of this in JSON. Then some folks out there without good intentions decided to send all kind of junk. That is why for my specific case it is perfectly OK to strip the non ASCII characters. A: http://docs.python.org/howto/unicode.html#the-unicode-type str = unicode(str, errors='replace') or str = unicode(str, errors='ignore') Note: This will strip out (ignore) the characters in question returning the string without them. For me this is ideal case since I'm using it as protection against non-ASCII input which is not allowed by my application. Alternatively: Use the open method from the codecs module to read in the file: import codecs with codecs.open(file_name, 'r', encoding='utf-8', errors='ignore') as fdata: A: Changing the engine from C to Python did the trick for me. Engine is C: pd.read_csv(gdp_path, sep='\t', engine='c') 'utf-8' codec can't decode byte 0x92 in position 18: invalid start byte Engine is Python: pd.read_csv(gdp_path, sep='\t', engine='python') No errors for me. A: This type of issue crops up for me now that I've moved to Python 3. I had no idea Python 2 was simply steam rolling any issues with file encoding. I found this nice explanation of the differences and how to find a solution after none of the above worked for me. http://python-notes.curiousefficiency.org/en/latest/python3/text_file_processing.html In short, to make Python 3 behave as similarly as possible to Python 2 use: with open(filename, encoding="latin-1") as datafile: # work on datafile here However, read the article, there is no one size fits all solution. A: the first,Using get_encoding_type to get the files type of encode: import os from chardet import detect # get file encoding type def get_encoding_type(file): with open(file, 'rb') as f: rawdata = f.read() return detect(rawdata)['encoding'] the second, opening the files with the type: open(current_file, 'r', encoding = get_encoding_type, errors='ignore') A: >>> '\x9c'.decode('cp1252') u'\u0153' >>> print '\x9c'.decode('cp1252') œ A: I had same problem with UnicodeDecodeError and i solved it with this line. Don't know if is the best way but it worked for me. str = str.decode('unicode_escape').encode('utf-8') A: This solution works nice when using Latin American accents, such as 'ñ'. I have solved this problem just by adding df = pd.read_csv(fileName,encoding='latin1') A: Just in case of someone has the same problem. I'am using vim with YouCompleteMe, failed to start ycmd with this error message, what I did is: export LC_CTYPE="en_US.UTF-8", the problem is gone. A: What can you do if you need to make a change to a file, but don’t know the file’s encoding? If you know the encoding is ASCII-compatible and only want to examine or modify the ASCII parts, you can open the file with the surrogateescape error handler: with open(fname, 'r', encoding="ascii", errors="surrogateescape") as f: data = f.read() A: If as you say you simply want to permit pure 7-bit ASCII, just discard any bytes which are not. There is no straightforward way to guess what the remote end intended them to represent anyway, without an explicitly specified encoding. while bytes := socket.read_line_bytes(): try: string = bytes.decode('us-ascii') except UnicodeDecodeError as exc: logger.warning('[%s] - rejected non-ASCII input %s' % (client, bytes.decode('us-ascii', errors='backslashreplace')) socket.write(b'421 communication error - non-ASCII content rejected\r\n') continue ... A: I had the same error. For me, Python complained about the byte "0x87". I looked it up on https://bytetool.web.app/en/ascii/code/0x87/ where it told me that this byte belong to the codec Windows-1252. I then only added this line to the beginning of my Python file: #-*- encoding: Windows-1252 -*-" And all errors were gone. Before I had added this line, I had tried Pandas to import the file like this: Df = pd.read_csv(data, sep=",", engine='python', header=0, encoding='Windows-1252') but this returned me an error. So I changed it back to this: Df = pd.read_csv(data, sep=",", engine='python', header=0)
UnicodeDecodeError: 'utf8' codec can't decode byte 0x9c
I have a socket server that is supposed to receive UTF-8 valid characters from clients. The problem is some clients (mainly hackers) are sending all the wrong kind of data over it. I can easily distinguish the genuine client, but I am logging to files all the data sent so I can analyze it later. Sometimes I get characters like this œ that cause the UnicodeDecodeError error. I need to be able to make the string UTF-8 with or without those characters. Update: For my particular case the socket service was an MTA and thus I only expect to receive ASCII commands such as: EHLO example.com MAIL FROM: <john.doe@example.com> ... I was logging all of this in JSON. Then some folks out there without good intentions decided to send all kind of junk. That is why for my specific case it is perfectly OK to strip the non ASCII characters.
[ "http://docs.python.org/howto/unicode.html#the-unicode-type\nstr = unicode(str, errors='replace')\n\nor\nstr = unicode(str, errors='ignore')\n\nNote: This will strip out (ignore) the characters in question returning the string without them.\nFor me this is ideal case since I'm using it as protection against non-ASCII input which is not allowed by my application.\nAlternatively: Use the open method from the codecs module to read in the file:\nimport codecs\nwith codecs.open(file_name, 'r', encoding='utf-8',\n errors='ignore') as fdata:\n\n", "Changing the engine from C to Python did the trick for me.\nEngine is C:\npd.read_csv(gdp_path, sep='\\t', engine='c')\n\n\n'utf-8' codec can't decode byte 0x92 in position 18: invalid start byte\n\nEngine is Python:\npd.read_csv(gdp_path, sep='\\t', engine='python')\n\nNo errors for me.\n", "This type of issue crops up for me now that I've moved to Python 3. I had no idea Python 2 was simply steam rolling any issues with file encoding. \nI found this nice explanation of the differences and how to find a solution after none of the above worked for me. \nhttp://python-notes.curiousefficiency.org/en/latest/python3/text_file_processing.html\nIn short, to make Python 3 behave as similarly as possible to Python 2 use:\nwith open(filename, encoding=\"latin-1\") as datafile:\n # work on datafile here\n\nHowever, read the article, there is no one size fits all solution. \n", "the first,Using get_encoding_type to get the files type of encode:\nimport os \nfrom chardet import detect\n\n# get file encoding type\ndef get_encoding_type(file):\n with open(file, 'rb') as f:\n rawdata = f.read()\n return detect(rawdata)['encoding']\n\nthe second, opening the files with the type:\nopen(current_file, 'r', encoding = get_encoding_type, errors='ignore')\n\n", ">>> '\\x9c'.decode('cp1252')\nu'\\u0153'\n>>> print '\\x9c'.decode('cp1252')\nœ\n\n", "I had same problem with UnicodeDecodeError and i solved it with this line.\nDon't know if is the best way but it worked for me.\nstr = str.decode('unicode_escape').encode('utf-8')\n\n", "This solution works nice when using Latin American accents, such as 'ñ'.\nI have solved this problem just by adding\ndf = pd.read_csv(fileName,encoding='latin1')\n\n", "Just in case of someone has the same problem. I'am using vim with YouCompleteMe, failed to start ycmd with this error message, what I did is: export LC_CTYPE=\"en_US.UTF-8\", the problem is gone.\n", "What can you do if you need to make a change to a file, but don’t know the file’s encoding? If you know the encoding is ASCII-compatible and only want to examine or modify the ASCII parts, you can open the file with the surrogateescape error handler:\nwith open(fname, 'r', encoding=\"ascii\", errors=\"surrogateescape\") as f:\n data = f.read()\n\n", "If as you say you simply want to permit pure 7-bit ASCII, just discard any bytes which are not. There is no straightforward way to guess what the remote end intended them to represent anyway, without an explicitly specified encoding.\nwhile bytes := socket.read_line_bytes():\n try:\n string = bytes.decode('us-ascii')\n except UnicodeDecodeError as exc:\n logger.warning('[%s] - rejected non-ASCII input %s' % (client, bytes.decode('us-ascii', errors='backslashreplace'))\n socket.write(b'421 communication error - non-ASCII content rejected\\r\\n')\n continue\n ...\n\n", "I had the same error.\nFor me, Python complained about the byte \"0x87\". I looked it up on https://bytetool.web.app/en/ascii/code/0x87/ where it told me that this byte belong to the codec Windows-1252.\nI then only added this line to the beginning of my Python file:\n#-*- encoding: Windows-1252 -*-\"\n\nAnd all errors were gone. Before I had added this line, I had tried Pandas to import the file like this:\nDf = pd.read_csv(data, sep=\",\", engine='python', header=0, encoding='Windows-1252')\n\nbut this returned me an error. So I changed it back to this:\nDf = pd.read_csv(data, sep=\",\", engine='python', header=0)\n\n" ]
[ 420, 132, 76, 38, 37, 30, 18, 3, 2, 1, 0 ]
[ "\ndjango-storage is implicitly supported read byte file in text mode till django-storage == 1.8\nRemoved support in https://github.com/jschneier/django-storages/pull/657\nNeed to specify the binary mode for reading byte files.\n\n" ]
[ -1 ]
[ "linux", "python", "python_unicode" ]
stackoverflow_0012468179_linux_python_python_unicode.txt
Q: Placing two trailing icons in ListTile I want to place two icons, side by side on the "trailing" side of a ListTile. I tried adding a Row widget with the two icons inside, but it completely messed up the layout of the entire ListTile, making it unusable. Is there any way to expand the space allocated for the trailing part? Here's the code: import 'package:flutter/material.dart'; void main() => runApp(new MyApp()); class MyApp extends StatelessWidget { @override Widget build(BuildContext context) { return MaterialApp( title: 'Welcome to Flutter', home: Scaffold( appBar: AppBar( title: Text('Welcome to Flutter'), ), body: ListView( children: <Widget>[ ListTile( leading: Icon(Icons.play_arrow,), title: Text("This is a title"), subtitle: Text("This is subtitle"), trailing: Row( children: <Widget>[ Icon(Icons.flight), Icon(Icons.flight_land) ]), ) ] ), ), ); } } This is how it looks like: A: Adding mainAxisSize: MainAxisSize.min to the Row() instance fixes the issue. A: You can simply use Wrap in trailing ListTile( title: Text("This is my ListTile"), trailing: Wrap( spacing: 12, // space between two icons children: <Widget>[ Icon(Icons.call), // icon-1 Icon(Icons.message), // icon-2 ], ), ) A: Try this code. I think this is working correctly: trailing: FittedBox( fit: BoxFit.fill, child: Row( children: <Widget>[ Icon(Icons.flight), Icon(Icons.flight_land), ], ), ), A: I took advantage of the FittedBox solution left above and solved my problem by displaying a TextButton and an IconButton when the screen is in landscape and when in portrait mode, only IconButton trailing: MediaQuery.of(context).size.width > 480 ? FittedBox( fit: BoxFit.fill, child: Row( children: <Widget>[ TextButton( style: TextButton.styleFrom( // padding: const EdgeInsets.all(16.0), primary: Theme.of(context).errorColor, textStyle: const TextStyle( fontSize: 14, fontWeight: FontWeight.bold), ), onPressed: () => onRemove(tr.id), child: const Text('Excluir'), ), IconButton( icon: Icon(Icons.delete), color: Theme.of(context).errorColor, onPressed: () => onRemove(tr.id), ), ], ), ) : IconButton( icon: Icon(Icons.delete), color: Theme.of(context).errorColor, onPressed: () => onRemove(tr.id), ), A: Given negative value to spacing did the trick: trailing: Wrap( spacing: -16, children: [ IconButton( icon: const Icon(Icons.edit), onPressed: () {}, ), IconButton( icon: const Icon( Icons.delete, color: Colors.redAccent, ), onPressed: () {}, ), ], ),
Placing two trailing icons in ListTile
I want to place two icons, side by side on the "trailing" side of a ListTile. I tried adding a Row widget with the two icons inside, but it completely messed up the layout of the entire ListTile, making it unusable. Is there any way to expand the space allocated for the trailing part? Here's the code: import 'package:flutter/material.dart'; void main() => runApp(new MyApp()); class MyApp extends StatelessWidget { @override Widget build(BuildContext context) { return MaterialApp( title: 'Welcome to Flutter', home: Scaffold( appBar: AppBar( title: Text('Welcome to Flutter'), ), body: ListView( children: <Widget>[ ListTile( leading: Icon(Icons.play_arrow,), title: Text("This is a title"), subtitle: Text("This is subtitle"), trailing: Row( children: <Widget>[ Icon(Icons.flight), Icon(Icons.flight_land) ]), ) ] ), ), ); } } This is how it looks like:
[ "Adding mainAxisSize: MainAxisSize.min to the Row() instance fixes the issue.\n\n", "You can simply use Wrap in trailing\nListTile(\n title: Text(\"This is my ListTile\"),\n trailing: Wrap(\n spacing: 12, // space between two icons\n children: <Widget>[\n Icon(Icons.call), // icon-1\n Icon(Icons.message), // icon-2\n ],\n ),\n)\n\n\n", "Try this code. I think this is working correctly:\ntrailing: FittedBox(\n fit: BoxFit.fill,\n child: Row(\n children: <Widget>[\n Icon(Icons.flight),\n Icon(Icons.flight_land),\n ],\n ),\n ),\n\n", "I took advantage of the FittedBox solution left above and solved my problem by displaying a TextButton and an IconButton when the screen is in landscape and when in portrait mode, only IconButton\ntrailing: MediaQuery.of(context).size.width > 480\n ? FittedBox(\n fit: BoxFit.fill,\n child: Row(\n children: <Widget>[\n TextButton(\n style: TextButton.styleFrom(\n // padding: const EdgeInsets.all(16.0),\n primary: Theme.of(context).errorColor,\n textStyle: const TextStyle(\n fontSize: 14,\n fontWeight: FontWeight.bold),\n ),\n onPressed: () => onRemove(tr.id),\n child: const Text('Excluir'),\n ),\n IconButton(\n icon: Icon(Icons.delete),\n color: Theme.of(context).errorColor,\n onPressed: () => onRemove(tr.id),\n ),\n ],\n ),\n )\n : IconButton(\n icon: Icon(Icons.delete),\n color: Theme.of(context).errorColor,\n onPressed: () => onRemove(tr.id),\n ),\n\n", "Given negative value to spacing did the trick:\n trailing: Wrap(\n spacing: -16,\n children: [\n IconButton(\n icon: const Icon(Icons.edit),\n onPressed: () {},\n ),\n IconButton(\n icon: const Icon(\n Icons.delete,\n color: Colors.redAccent,\n ),\n onPressed: () {},\n ),\n ],\n ),\n\n\n" ]
[ 182, 85, 26, 1, 0 ]
[]
[]
[ "flutter", "flutter_layout" ]
stackoverflow_0054548853_flutter_flutter_layout.txt
Q: python formulas returning 0s so I have basic formulas setup to recive numbers and then covert them but when running the program the converted formulas aren't calculating dollars = 0 pounds = 0 tempF = 0 tempC = 0 globe = "\U0001F30D" euros = dollars*.95 kilograms = pounds/2.2 tempF = tempC* 9/5+32 print ("How many U.S dollars can you afford to spend on your trip?: ") dollars = float(input()) print("How many pounds of chocoloate will you be buying?:") pounds = float(input()) print("What is the tempature in degrees Celsius on the European news?:") tempC = float(input()) print ("ITINERARY NOTES") print ("------------------------------------------------------") print (globe + " you have {:.2f} euros to spend." .format(euros)) print (globe + " Plan to buy {:.2f} of chocolate for family and friends".format(kilograms)) print (globe + " The tempature in Europe is {} degrees F, So dress appropriately.".format(tempF)) How many U.S dollars can you afford to spend on your trip?: 100 How many pounds of chocoloate will you be buying?: 5 What is the tempature in degrees Celsius on the European news?: 15 ITINERARY NOTES ------------------------------------------------------ you have 0.00 euros to spend. Plan to buy 0.00 of chocolate for family and friends The temperature in Europe is 32.0 degrees F, So dress appropriately. A: Try calculating the results after you input the data, not before that A: How I would do it def get_input(): print ("How many U.S dollars can you afford to spend on your trip?: ") dollars = float(input()) print("How many pounds of chocoloate will you be buying?:") pounds = float(input()) print("What is the tempature in degrees Celsius on the European news?:") tempC = float(input()) return [dollars, pounds, tempC] def dollars_to_euros(dollars): return 0.95 * dollars def pounds_to_kilograms(pounds): return pounds / 2.2 def tempC_to_tempF(tempC): return tempC * 9 / 5 + 32 globe = "\U0001F30D" [dollars, pounds, tempC] = get_input() #move formula calculation after receiving the input euros = dollars_to_euros(dollars) kilograms = pounds_to_kilograms(pounds) tempF = tempC_to_tempF(tempC) print ("ITINERARY NOTES") print ("------------------------------------------------------") print (globe + " you have {:.2f} euros to spend." .format(euros)) print (globe + " Plan to buy {:.2f} of chocolate for family and friends".format(kilograms)) print (globe + " The tempature in Europe is {} degrees F, So dress appropriately.".format(tempF)) Minimal changes to your existing snippet to make it work: dollars = 0 pounds = 0 tempF = 0 tempC = 0 globe = "\U0001F30D" print ("How many U.S dollars can you afford to spend on your trip?: ") dollars = float(input()) print("How many pounds of chocoloate will you be buying?:") pounds = float(input()) print("What is the tempature in degrees Celsius on the European news?:") tempC = float(input()) #move formula calculation after receiving the input euros = dollars*.95 kilograms = pounds/2.2 tempF = tempC* 9/5+32 print ("ITINERARY NOTES") print ("------------------------------------------------------") print (globe + " you have {:.2f} euros to spend." .format(euros)) print (globe + " Plan to buy {:.2f} of chocolate for family and friends".format(kilograms)) print (globe + " The tempature in Europe is {} degrees F, So dress appropriately.".format(tempF))
python formulas returning 0s
so I have basic formulas setup to recive numbers and then covert them but when running the program the converted formulas aren't calculating dollars = 0 pounds = 0 tempF = 0 tempC = 0 globe = "\U0001F30D" euros = dollars*.95 kilograms = pounds/2.2 tempF = tempC* 9/5+32 print ("How many U.S dollars can you afford to spend on your trip?: ") dollars = float(input()) print("How many pounds of chocoloate will you be buying?:") pounds = float(input()) print("What is the tempature in degrees Celsius on the European news?:") tempC = float(input()) print ("ITINERARY NOTES") print ("------------------------------------------------------") print (globe + " you have {:.2f} euros to spend." .format(euros)) print (globe + " Plan to buy {:.2f} of chocolate for family and friends".format(kilograms)) print (globe + " The tempature in Europe is {} degrees F, So dress appropriately.".format(tempF)) How many U.S dollars can you afford to spend on your trip?: 100 How many pounds of chocoloate will you be buying?: 5 What is the tempature in degrees Celsius on the European news?: 15 ITINERARY NOTES ------------------------------------------------------ you have 0.00 euros to spend. Plan to buy 0.00 of chocolate for family and friends The temperature in Europe is 32.0 degrees F, So dress appropriately.
[ "Try calculating the results after you input the data, not before that\n", "How I would do it\ndef get_input():\n print (\"How many U.S dollars can you afford to spend on your trip?: \")\n dollars = float(input())\n\n print(\"How many pounds of chocoloate will you be buying?:\")\n pounds = float(input())\n\n print(\"What is the tempature in degrees Celsius on the European news?:\")\n tempC = float(input())\n return [dollars, pounds, tempC]\n\ndef dollars_to_euros(dollars):\n return 0.95 * dollars\n\ndef pounds_to_kilograms(pounds):\n return pounds / 2.2\n\ndef tempC_to_tempF(tempC):\n return tempC * 9 / 5 + 32\n\nglobe = \"\\U0001F30D\"\n\n[dollars, pounds, tempC] = get_input()\n\n#move formula calculation after receiving the input\neuros = dollars_to_euros(dollars)\nkilograms = pounds_to_kilograms(pounds)\ntempF = tempC_to_tempF(tempC)\n\n\nprint (\"ITINERARY NOTES\")\nprint (\"------------------------------------------------------\")\n\nprint (globe + \" you have {:.2f} euros to spend.\" .format(euros))\nprint (globe + \" Plan to buy {:.2f} of chocolate for family and friends\".format(kilograms))\nprint (globe + \" The tempature in Europe is {} degrees F, So dress appropriately.\".format(tempF))\n\nMinimal changes to your existing snippet to make it work:\ndollars = 0\npounds = 0\ntempF = 0\ntempC = 0\nglobe = \"\\U0001F30D\"\n\n\nprint (\"How many U.S dollars can you afford to spend on your trip?: \")\ndollars = float(input())\n\nprint(\"How many pounds of chocoloate will you be buying?:\")\npounds = float(input())\n\nprint(\"What is the tempature in degrees Celsius on the European news?:\")\ntempC = float(input())\n\n#move formula calculation after receiving the input\neuros = dollars*.95\nkilograms = pounds/2.2\ntempF = tempC* 9/5+32 \n\n\nprint (\"ITINERARY NOTES\")\nprint (\"------------------------------------------------------\")\n\nprint (globe + \" you have {:.2f} euros to spend.\" .format(euros))\nprint (globe + \" Plan to buy {:.2f} of chocolate for family and friends\".format(kilograms))\nprint (globe + \" The tempature in Europe is {} degrees F, So dress appropriately.\".format(tempF))\n\n" ]
[ 1, 0 ]
[]
[]
[ "python", "python_3.x" ]
stackoverflow_0074679904_python_python_3.x.txt
Q: How to pass a json object into a test controller in springboot? I am trying to create a controller test in springboot that tests whether the controller method adds a new line into my database (I have configured a h2 in memory database). I keep getting errors. I want to pass the data into the controller in json form like this: {"type":"epidemics", "questionIndex":21, "choiceNum":4, "question":"second question", "choiceA": "no1", "choiceB":"yes2", "choiceC":"no3", "choiceD":"yes4", "correct":"no1", "hint":"second answer" } but I keep getting errors. My code is below: controller @PostMapping("/adminadd") public QuizDTO addQuestion(@RequestBody QuizDTO quizDTO) { return quizRepo.addQuestion(quizDTO); } jdbc repo private static final String INSERT_QUESTION = "INSERT INTO Quiz(type,questionIndex,choiceNum,question,choiceA,choiceB,choiceC,choiceD,correct,hint) values(?,?,?,?,?,?,?,?,?,?)"; @Override public QuizDTO addQuestion(QuizDTO quizDTO) { jdbcTemplate.update(INSERT_QUESTION, quizDTO.getType(), quizDTO.getquestionIndex(), quizDTO.getChoiceNum(), quizDTO.getQuestion(), quizDTO.getChoiceA(), quizDTO.getChoiceB(), quizDTO.getChoiceC(), quizDTO.getChoiceD(), quizDTO.getCorrect(), quizDTO.getHint()); return quizDTO; } Test @RunWith(SpringRunner.class) @WebMvcTest(QuizController.class) //@AutoConfigureMockMvc public class QuizControllerTest { @MockBean private QuizRepository quizRepository; @Autowired private MockMvc mockMvc; @Test void shouldAddQuestion() throws Exception { QuizDTO quizDTO = new QuizDTO(104, "epidemics", 21, 4, "Test Question?", "A", "B", "C", "D", "hint"); mockMvc.perform(post("/adminadd").contentType(MediaType.APPLICATION_JSON_VALUE) .param("ID", "104") .param("Type", "Epidemics") .param("questionIndex", "21") .param("choiceNum", "4") .param("question", "Test Q") .param("choiceA", "A") .param("choiceB", "B") .param("choiceC", "C") .param("choiceD", "D") .param("correct", "B") .param("Hint", "hint")) .andExpect(status().isOk()) .andExpect(content().string(containsString("Test Q")));; } } error: MockHttpServletRequest: HTTP Method = POST Request URI = /adminadd Parameters = {ID=[104], Type=[Epidemics], questionIndex=[21], choiceNum=[4], question=[Test Q], choiceA=[A], choiceB=[B], choiceC=[C], choiceD=[D], correct=[B], Hint=[hint]} Headers = [Content-Type:"application/json;charset=UTF-8"] Body = null Session Attrs = {org.springframework.security.web.csrf.HttpSessionCsrfTokenRepository.CSRF_TOKEN=org.springframework.security.web.csrf.DefaultCsrfToken@22899683} Handler: Type = null Async: Async started = false Async result = null Resolved Exception: Type = null ModelAndView: View name = null View = null Model = null FlashMap: Attributes = null MockHttpServletResponse: Status = 403 Error message = Forbidden Headers = [X-Content-Type-Options:"nosniff", X-XSS-Protection:"1; mode=block", Cache-Control:"no-cache, no-store, max-age=0, must-revalidate", Pragma:"no-cache", Expires:"0", X-Frame-Options:"DENY"] Content type = null Body = Forwarded URL = null Redirected URL = null Cookies = [] Status expected:<200> but was:<403> A: To pass a JSON object to a test controller in Spring Boot, you can use the @RequestBody annotation along with a POST request. Here is an example: @PostMapping("/test") public ResponseEntity<String> test(@RequestBody MyJsonObject json) { // Do something with the JSON object return new ResponseEntity<String>("Success", HttpStatus.OK); } In this example, the MyJsonObject class should have fields that match the keys in your JSON object, and the method will be called when you send a POST request to the /test endpoint with a JSON object as the request body. You can then use the ObjectMapper class from the Jackson library to convert the JSON object into a Java object. For example: ObjectMapper mapper = new ObjectMapper(); MyJsonObject json = mapper.readValue(jsonString, MyJsonObject.class); Alternatively, you can use the @JsonProperty annotation on the fields in your MyJsonObject class to specify the keys in the JSON object that should be mapped to each field. For example: public class MyJsonObject { @JsonProperty("field1") private String field1; @JsonProperty("field2") private String field2; // Getters and setters for the fields } You can then use the ObjectMapper class as before to convert the JSON object into an instance of the MyJsonObject class. ObjectMapper mapper = new ObjectMapper(); MyJsonObject json = mapper.readValue(jsonString, MyJsonObject.class);
How to pass a json object into a test controller in springboot?
I am trying to create a controller test in springboot that tests whether the controller method adds a new line into my database (I have configured a h2 in memory database). I keep getting errors. I want to pass the data into the controller in json form like this: {"type":"epidemics", "questionIndex":21, "choiceNum":4, "question":"second question", "choiceA": "no1", "choiceB":"yes2", "choiceC":"no3", "choiceD":"yes4", "correct":"no1", "hint":"second answer" } but I keep getting errors. My code is below: controller @PostMapping("/adminadd") public QuizDTO addQuestion(@RequestBody QuizDTO quizDTO) { return quizRepo.addQuestion(quizDTO); } jdbc repo private static final String INSERT_QUESTION = "INSERT INTO Quiz(type,questionIndex,choiceNum,question,choiceA,choiceB,choiceC,choiceD,correct,hint) values(?,?,?,?,?,?,?,?,?,?)"; @Override public QuizDTO addQuestion(QuizDTO quizDTO) { jdbcTemplate.update(INSERT_QUESTION, quizDTO.getType(), quizDTO.getquestionIndex(), quizDTO.getChoiceNum(), quizDTO.getQuestion(), quizDTO.getChoiceA(), quizDTO.getChoiceB(), quizDTO.getChoiceC(), quizDTO.getChoiceD(), quizDTO.getCorrect(), quizDTO.getHint()); return quizDTO; } Test @RunWith(SpringRunner.class) @WebMvcTest(QuizController.class) //@AutoConfigureMockMvc public class QuizControllerTest { @MockBean private QuizRepository quizRepository; @Autowired private MockMvc mockMvc; @Test void shouldAddQuestion() throws Exception { QuizDTO quizDTO = new QuizDTO(104, "epidemics", 21, 4, "Test Question?", "A", "B", "C", "D", "hint"); mockMvc.perform(post("/adminadd").contentType(MediaType.APPLICATION_JSON_VALUE) .param("ID", "104") .param("Type", "Epidemics") .param("questionIndex", "21") .param("choiceNum", "4") .param("question", "Test Q") .param("choiceA", "A") .param("choiceB", "B") .param("choiceC", "C") .param("choiceD", "D") .param("correct", "B") .param("Hint", "hint")) .andExpect(status().isOk()) .andExpect(content().string(containsString("Test Q")));; } } error: MockHttpServletRequest: HTTP Method = POST Request URI = /adminadd Parameters = {ID=[104], Type=[Epidemics], questionIndex=[21], choiceNum=[4], question=[Test Q], choiceA=[A], choiceB=[B], choiceC=[C], choiceD=[D], correct=[B], Hint=[hint]} Headers = [Content-Type:"application/json;charset=UTF-8"] Body = null Session Attrs = {org.springframework.security.web.csrf.HttpSessionCsrfTokenRepository.CSRF_TOKEN=org.springframework.security.web.csrf.DefaultCsrfToken@22899683} Handler: Type = null Async: Async started = false Async result = null Resolved Exception: Type = null ModelAndView: View name = null View = null Model = null FlashMap: Attributes = null MockHttpServletResponse: Status = 403 Error message = Forbidden Headers = [X-Content-Type-Options:"nosniff", X-XSS-Protection:"1; mode=block", Cache-Control:"no-cache, no-store, max-age=0, must-revalidate", Pragma:"no-cache", Expires:"0", X-Frame-Options:"DENY"] Content type = null Body = Forwarded URL = null Redirected URL = null Cookies = [] Status expected:<200> but was:<403>
[ "To pass a JSON object to a test controller in Spring Boot, you can use the @RequestBody annotation along with a POST request. Here is an example:\n@PostMapping(\"/test\")\npublic ResponseEntity<String> test(@RequestBody MyJsonObject json) {\n // Do something with the JSON object\n return new ResponseEntity<String>(\"Success\", HttpStatus.OK);\n}\n\nIn this example, the MyJsonObject class should have fields that match the keys in your JSON object, and the method will be called when you send a POST request to the /test endpoint with a JSON object as the request body.\nYou can then use the ObjectMapper class from the Jackson library to convert the JSON object into a Java object. For example:\nObjectMapper mapper = new ObjectMapper();\nMyJsonObject json = mapper.readValue(jsonString, MyJsonObject.class);\n\nAlternatively, you can use the @JsonProperty annotation on the fields in your MyJsonObject class to specify the keys in the JSON object that should be mapped to each field. For example:\npublic class MyJsonObject {\n @JsonProperty(\"field1\")\n private String field1;\n \n @JsonProperty(\"field2\")\n private String field2;\n \n // Getters and setters for the fields\n}\n\nYou can then use the ObjectMapper class as before to convert the JSON object into an instance of the MyJsonObject class.\nObjectMapper mapper = new ObjectMapper();\nMyJsonObject json = mapper.readValue(jsonString, MyJsonObject.class);\n\n" ]
[ 1 ]
[]
[]
[ "controller", "java", "json", "spring_boot", "testing" ]
stackoverflow_0074679982_controller_java_json_spring_boot_testing.txt
Q: Getting "Failed to convert a NumPy array to a Tensor (Unsupported object type list)." From the whole week I'm training my AI model but it is facing some this issue of Failed to convert Numpy array to a tensor my I'm using the dataset I created for this model containing 100k+ movie plots but again and again its showing the same issue when I call "model.fit(...)" Error This is the code I'm using # Importing the dataset filename = "MoviePlots.csv" data = pd.read_csv(filename, encoding= 'unicode_escape') # Keeping only the neccessary columns data = data[['Plot']] # Keep only rows where 'Plot' is a string data = data[data['Plot'].apply(lambda x: isinstance(x, str))] # Clean the data data['Plot'] = data['Plot'].apply(lambda x: x.lower()) data['Plot'] = data['Plot'].apply((lambda x: re.sub('[^a-zA-z0-9\s]', '', x))) # Create the tokenizer tokenizer = Tokenizer(num_words=5000, split=" ") tokenizer.fit_on_texts(data['Plot'].values) # Save the tokenizer with open('tokenizer.pickle', 'wb') as handle: pickle.dump(tokenizer, handle, protocol=pickle.HIGHEST_PROTOCOL) # Create the sequences X = tokenizer.texts_to_sequences(data['Plot'].values) Y = pad_sequences(X) # Create the model model = Sequential() model.add(Embedding(5000, 256, input_length=Y.shape[1])) model.add(Bidirectional(LSTM(256, return_sequences=True, dropout=0.1, recurrent_dropout=0.1))) model.add(LSTM(256, return_sequences=True, dropout=0.1, recurrent_dropout=0.1)) model.add(LSTM(256, dropout=0.1, recurrent_dropout=0.1)) model.add(Dense(256, activation='relu', kernel_regularizer=regularizers.l2(0.01))) model.add(Dense(5000, activation='softmax')) # Compile the model model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.01), metrics=['accuracy']) # Train the model model.fit(X, X, epochs=500, batch_size=256, verbose=1) I have tried several other methods but the issue remains the same epochs=500 model.fit(X, X, verbose=2) Any help will be really appreciated! Thanks!!! A: there are many possible ways one of them is to create as a dataset as your error message indicated a mismatched datatype for model.fit() Sample: Transform input word by vocab and match their string bytes, or tokenize them. import tensorflow as tf import tensorflow_text as tft import json input_word = tf.constant(' \'Cause it\'s easy as an ice cream sundae Slipping outta your hand into the dirt Easy as an ice cream sundae Every dancer gets a little hurt Easy as an ice cream sundae Slipping outta your hand into the dirt Easy as an ice cream sundae Every dancer gets a little hurt Easy as an ice cream sundae Oh, easy as an ice cream sundae ') vocab = [ "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "_", "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z"] layer = tf.keras.layers.StringLookup(vocabulary=vocab) sequences_mapping_string = layer(tf.strings.bytes_split(input_word)) """"""""""""""""""""""""""""""""""""""""""""""""""""""""" : Method 1 create label from map it with vocaburary """"""""""""""""""""""""""""""""""""""""""""""""""""""""" print( 'input_word: ' + str(input_word) ) print( " " ) print( tf.strings.bytes_split(input_word) ) print( sequences_mapping_string ) """"""""""""""""""""""""""""""""""""""""""""""""""""""""" : Method 2 create label from it tokenizer """"""""""""""""""""""""""""""""""""""""""""""""""""""""" text = "Cause its easy as an ice cream sundae Slipping outta your hand" tokenizer = tf.keras.preprocessing.text.Tokenizer(num_words=10000, oov_token='oov', filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n', lower=True,) tokenizer.fit_on_texts([text]) i_count = tf.strings.split([text])[0].shape[0] + 1 aDict = json.loads(tokenizer.to_json()) text_input = tf.constant([''], shape=()) """"""""""""""""""""""""""""""""""""""""""""""""""""""""" : Class / Functions """"""""""""""""""""""""""""""""""""""""""""""""""""""""" def auto_paddings( data, max_sequences=15 ): data = tf.constant( data, shape=(data.shape[0], 1) ) paddings = tf.constant([[1, 15 - data.shape[0] - 1], [0, 0]]) padd_data = tf.pad( data, paddings, "CONSTANT" ) padd_data = tf.constant( padd_data, shape=(15, 1) ).numpy() return padd_data input_word = tf.zeros([1, 15, 1], dtype=tf.int64) input_label = tf.ones([1, 1, 1], dtype=tf.int64) for i in range(i_count): word = json.loads(aDict['config']['index_word'])[str(i + 1)] i_word = layer(tf.strings.bytes_split(word)) padd_data = tf.constant(auto_paddings( i_word, 15 ), shape=(1, 15, 1)) index = json.loads(aDict['config']['word_index'])[word] if i > 0: input_word = tf.experimental.numpy.vstack([input_word, padd_data]) input_label = tf.experimental.numpy.vstack([input_label, tf.constant(index, shape=(1, 1, 1))]) dataset = tf.data.Dataset.from_tensors(( input_word, input_label )) for d in dataset: print(d) print( " ==================================================== " ) Output: Input word as a string input_word: tf.Tensor(b" 'Cause it's easy as an ice cream sundae Slipping outta your hand into the dirt Easy as an ice cream sundae Every dancer gets a little hurt Easy as an ice cream sundae Slipping outta your hand into the dirt Easy as an ice cream sundae Every dancer gets a little hurt Easy as an ice cream sundae Oh, easy as an ice cream sundae ", shape=(), dtype=string) Output: String to bytes as a splitters. tf.Tensor( [b' ' b"'" b'C' b'a' b'u' b's' b'e' b' ' b'i' b't' b"'" b's' b' ' b'e' b'a' b's' b'y' b' ' b'a' b's' b' ' b'a' b'n' b' ' b'i' b'c' b'e' b' ' ... b'n' b'd' b'a' b'e' b' '], shape=(327,), dtype=string) Output: Sequence mapping a string to phones. tf.Tensor( [ 0 0 30 1 21 19 5 0 9 20 0 19 0 5 1 19 25 0 1 19 0 1 14 0 9 3 5 0 3 18 5 1 13 0 19 21 14 4 1 5 0 46 12 9 16 16 9 14 ... 5 0 3 18 5 1 13 0 19 21 14 4 1 5 0], shape=(327,), dtype=int64) Output: A string input, required of list conversion or array-like none repeats. Cause its easy as an ice cream sundae Slipping outta your hand Output: A dataset creates from input_word and name label. (<tf.Tensor: shape=(13, 15, 1), dtype=int64, numpy= array([[[ 0], [ 0], ... [ 0]]], dtype=int64)>, <tf.Tensor: shape=(13, 1, 1), dtype=int64, numpy= array([[[ 1]], [[[ 2]] ... [[13]]], dtype=int64)>) ==================================================== Application: Word input compares process from slide X windows channel. dataset = tf.data.Dataset.from_tensors( tf.strings.bytes_split(input_word) ) window_size = 6 dataset = dataset.map(lambda x: tft.sliding_window(x, width=window_size, axis=0)).flat_map(tf.data.Dataset.from_tensor_slices) Application: Wireless breaks. mapping_vocab = [ "_", "I", "l", "o", "v", "e", "c", "a", "t", "s" ] string_matching = [ 27, 9, 12, 15, 22, 5, 3, 1, 20, 19 ] string_matching_reverse = [ 1/27, 1/9, 1/12, 1/15, 1/22, 1/5, 1/3, 1/1, 1/20, 1/19 ] print( tf.math.multiply( tf.constant(string_matching, dtype=tf.float32), tf.constant(string_matching_reverse, dtype=tf.float32 ), name=None ) ) Output: encode and decodes, each number represents bytes you may replace with trained parameters. encode: tf.Tensor([[27 27 27 9 12 15 22 5 3 1 20 19]], shape=(1, 12), dtype=int64) decode: tf.Tensor([[b'_' b'_' b'_' b'I' b'l' b'o' b'v' b'e' b'c' b'a' b't' b's']], shape=(1, 12), dtype=string) tf.Tensor([1. 1. 1. 1. 1. 1. 1. 1. 1. 1.], shape=(10,), dtype=float32)
Getting "Failed to convert a NumPy array to a Tensor (Unsupported object type list)."
From the whole week I'm training my AI model but it is facing some this issue of Failed to convert Numpy array to a tensor my I'm using the dataset I created for this model containing 100k+ movie plots but again and again its showing the same issue when I call "model.fit(...)" Error This is the code I'm using # Importing the dataset filename = "MoviePlots.csv" data = pd.read_csv(filename, encoding= 'unicode_escape') # Keeping only the neccessary columns data = data[['Plot']] # Keep only rows where 'Plot' is a string data = data[data['Plot'].apply(lambda x: isinstance(x, str))] # Clean the data data['Plot'] = data['Plot'].apply(lambda x: x.lower()) data['Plot'] = data['Plot'].apply((lambda x: re.sub('[^a-zA-z0-9\s]', '', x))) # Create the tokenizer tokenizer = Tokenizer(num_words=5000, split=" ") tokenizer.fit_on_texts(data['Plot'].values) # Save the tokenizer with open('tokenizer.pickle', 'wb') as handle: pickle.dump(tokenizer, handle, protocol=pickle.HIGHEST_PROTOCOL) # Create the sequences X = tokenizer.texts_to_sequences(data['Plot'].values) Y = pad_sequences(X) # Create the model model = Sequential() model.add(Embedding(5000, 256, input_length=Y.shape[1])) model.add(Bidirectional(LSTM(256, return_sequences=True, dropout=0.1, recurrent_dropout=0.1))) model.add(LSTM(256, return_sequences=True, dropout=0.1, recurrent_dropout=0.1)) model.add(LSTM(256, dropout=0.1, recurrent_dropout=0.1)) model.add(Dense(256, activation='relu', kernel_regularizer=regularizers.l2(0.01))) model.add(Dense(5000, activation='softmax')) # Compile the model model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.01), metrics=['accuracy']) # Train the model model.fit(X, X, epochs=500, batch_size=256, verbose=1) I have tried several other methods but the issue remains the same epochs=500 model.fit(X, X, verbose=2) Any help will be really appreciated! Thanks!!!
[ "there are many possible ways one of them is to create as a dataset as your error message indicated a mismatched datatype for model.fit()\nSample: Transform input word by vocab and match their string bytes, or tokenize them.\nimport tensorflow as tf\nimport tensorflow_text as tft\n\nimport json\n\ninput_word = tf.constant(' \\'Cause it\\'s easy as an ice cream sundae Slipping outta your hand into the dirt Easy as an ice cream sundae Every dancer gets a little hurt Easy as an ice cream sundae Slipping outta your hand into the dirt Easy as an ice cream sundae Every dancer gets a little hurt Easy as an ice cream sundae Oh, easy as an ice cream sundae ')\nvocab = [ \"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\", \"i\", \"j\", \"k\", \"l\", \"m\", \"n\", \"o\", \"p\", \"q\", \"r\", \"s\", \"t\", \"u\", \"v\", \"w\", \"x\", \"y\", \"z\", \"_\", \n\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\", \"K\", \"L\", \"M\", \"N\", \"O\", \"P\", \"Q\", \"R\", \"S\", \"T\", \"U\", \"V\", \"W\", \"X\", \"Y\", \"Z\"]\nlayer = tf.keras.layers.StringLookup(vocabulary=vocab)\nsequences_mapping_string = layer(tf.strings.bytes_split(input_word))\n\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n: Method 1 create label from map it with vocaburary\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\nprint( 'input_word: ' + str(input_word) )\nprint( \" \" )\nprint( tf.strings.bytes_split(input_word) )\nprint( sequences_mapping_string )\n\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n: Method 2 create label from it tokenizer\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\ntext = \"Cause its easy as an ice cream sundae Slipping outta your hand\"\ntokenizer = tf.keras.preprocessing.text.Tokenizer(num_words=10000, oov_token='oov', filters='!\"#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~\\t\\n', lower=True,)\ntokenizer.fit_on_texts([text])\n\ni_count = tf.strings.split([text])[0].shape[0] + 1\naDict = json.loads(tokenizer.to_json())\ntext_input = tf.constant([''], shape=())\n\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n: Class / Functions\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\ndef auto_paddings( data, max_sequences=15 ):\n data = tf.constant( data, shape=(data.shape[0], 1) )\n paddings = tf.constant([[1, 15 - data.shape[0] - 1], [0, 0]])\n padd_data = tf.pad( data, paddings, \"CONSTANT\" )\n padd_data = tf.constant( padd_data, shape=(15, 1) ).numpy()\n return padd_data\n\n\ninput_word = tf.zeros([1, 15, 1], dtype=tf.int64)\ninput_label = tf.ones([1, 1, 1], dtype=tf.int64)\n\nfor i in range(i_count):\n word = json.loads(aDict['config']['index_word'])[str(i + 1)]\n i_word = layer(tf.strings.bytes_split(word))\n padd_data = tf.constant(auto_paddings( i_word, 15 ), shape=(1, 15, 1))\n \n index = json.loads(aDict['config']['word_index'])[word]\n\n if i > 0:\n input_word = tf.experimental.numpy.vstack([input_word, padd_data])\n input_label = tf.experimental.numpy.vstack([input_label, tf.constant(index, shape=(1, 1, 1))])\n\n\ndataset = tf.data.Dataset.from_tensors(( input_word, input_label ))\nfor d in dataset:\n print(d)\n\nprint( \" ==================================================== \" )\n\nOutput: Input word as a string\ninput_word: tf.Tensor(b\" 'Cause it's easy as an ice cream sundae Slipping outta your hand into the dirt Easy as an ice cream sundae Every dancer gets a little hurt Easy as an ice cream sundae Slipping outta your hand into the dirt Easy as an ice cream sundae Every dancer gets a little hurt Easy as an ice cream sundae Oh, easy as an ice cream sundae \", shape=(), dtype=string)\n\nOutput: String to bytes as a splitters.\ntf.Tensor(\n[b' ' b\"'\" b'C' b'a' b'u' b's' b'e' b' ' b'i' b't' b\"'\" b's' b' ' b'e'\n b'a' b's' b'y' b' ' b'a' b's' b' ' b'a' b'n' b' ' b'i' b'c' b'e' b' '\n ...\n b'n' b'd' b'a' b'e' b' '], shape=(327,), dtype=string)\n\nOutput: Sequence mapping a string to phones.\ntf.Tensor(\n[ 0 0 30 1 21 19 5 0 9 20 0 19 0 5 1 19 25 0 1 19 0 1 14 0\n 9 3 5 0 3 18 5 1 13 0 19 21 14 4 1 5 0 46 12 9 16 16 9 14\n ...\n 5 0 3 18 5 1 13 0 19 21 14 4 1 5 0], shape=(327,), dtype=int64)\n\nOutput: A string input, required of list conversion or array-like none repeats.\nCause its easy as an ice cream sundae Slipping outta your hand\n\nOutput: A dataset creates from input_word and name label.\n(<tf.Tensor: shape=(13, 15, 1), dtype=int64, numpy=\n array([[[ 0],\n [ 0],\n ...\n [ 0]]], dtype=int64)>, <tf.Tensor: shape=(13, 1, 1), dtype=int64, numpy=\n array([[[ 1]],\n [[[ 2]]\n ...\n [[13]]], dtype=int64)>)\n ====================================================\n\nApplication: Word input compares process from slide X windows channel.\ndataset = tf.data.Dataset.from_tensors( tf.strings.bytes_split(input_word) )\nwindow_size = 6\ndataset = dataset.map(lambda x: tft.sliding_window(x, width=window_size, axis=0)).flat_map(tf.data.Dataset.from_tensor_slices)\n\nApplication: Wireless breaks.\nmapping_vocab = [ \"_\", \"I\", \"l\", \"o\", \"v\", \"e\", \"c\", \"a\", \"t\", \"s\" ]\nstring_matching = [ 27, 9, 12, 15, 22, 5, 3, 1, 20, 19 ]\nstring_matching_reverse = [ 1/27, 1/9, 1/12, 1/15, 1/22, 1/5, 1/3, 1/1, 1/20, 1/19 ]\n\nprint( tf.math.multiply( tf.constant(string_matching, dtype=tf.float32), tf.constant(string_matching_reverse, dtype=tf.float32 ), name=None ) )\n\nOutput: encode and decodes, each number represents bytes you may replace with trained parameters.\nencode: tf.Tensor([[27 27 27 9 12 15 22 5 3 1 20 19]], shape=(1, 12), dtype=int64)\ndecode: tf.Tensor([[b'_' b'_' b'_' b'I' b'l' b'o' b'v' b'e' b'c' b'a' b't' b's']], shape=(1, 12), dtype=string)\ntf.Tensor([1. 1. 1. 1. 1. 1. 1. 1. 1. 1.], shape=(10,), dtype=float32)\n\n" ]
[ 0 ]
[]
[]
[ "artificial_intelligence", "deep_learning", "neural_network", "python", "tensorflow" ]
stackoverflow_0074677664_artificial_intelligence_deep_learning_neural_network_python_tensorflow.txt
Q: Force all execution to pause using Javascript I am making a chrome extension that aims to pause execution of a tab if it inactive after some time in order to stop if from consuming resources. By "Pause", I mean something like debugger, however, debugger does not seem to work unless the developer inspector tool is open. Is there anyway to stop a tab from consuming resources until the user goes to that tab and click on something (like the play button on debugger) PS: I want to make this because there a synonym/dictionary website that slowly consumes 5GB of RAM even if it is left inactive for half an hour. A: The only way is using alert() or prompt(). Enter may be used to confirm and continue executing. A: Chrome extension has API for this. Tab discard chrome.tabs.discard( tabId?: number, callback?: function, ) Add permission tabs if required.
Force all execution to pause using Javascript
I am making a chrome extension that aims to pause execution of a tab if it inactive after some time in order to stop if from consuming resources. By "Pause", I mean something like debugger, however, debugger does not seem to work unless the developer inspector tool is open. Is there anyway to stop a tab from consuming resources until the user goes to that tab and click on something (like the play button on debugger) PS: I want to make this because there a synonym/dictionary website that slowly consumes 5GB of RAM even if it is left inactive for half an hour.
[ "The only way is using alert() or prompt(). Enter may be used to confirm and continue executing.\n", "Chrome extension has API for this.\nTab discard\nchrome.tabs.discard(\n tabId?: number,\n callback?: function,\n) \n\nAdd permission tabs if required.\n" ]
[ 0, 0 ]
[]
[]
[ "browser", "google_chrome", "google_chrome_extension", "javascript" ]
stackoverflow_0074672848_browser_google_chrome_google_chrome_extension_javascript.txt
Q: Add Label and Remove a Bar from Bar Chart I have a data frame results_df with models and their respective performance metrics. There are 9 models shown on the bar plot below. I would like to remove the AR(1) model (First bar) and add labels over each bar to count the y-axis (MAPE) values. Is there a way to do this? results_df: dput(results_df) structure(list(Model = c("Naive (baseline)", "Naive (baseline)", "Holt-Winter's Smoothing", "Holt-Winter's Smoothing", "Exponential Smoothing (AAN)", "Exponential Smoothing (AAN)", "Exponential Smoothing (AAN)", "Exponential Smoothing (AAN)", "Exponential Smoothing (MMN)", "Exponential Smoothing (MMN)", "Exponential Smoothing (MMdN)", "Exponential Smoothing (MMdN)", "Simple Regression", "Simple Regression", "AR(1)", "AR(1)", "ARIMA", "ARIMA", "Neural Network Autoregression", "Neural Network Autoregression", "Neural Network Autoregression", "Neural Network Autoregression", "Neural Network Autoregression", "Neural Network Autoregression", "Neural Network Autoregression", "Neural Network Autoregression", "Neural Network Autoregression", "Neural Network Autoregression"), Set = c("Train", "Test", "Train", "Test", "Train", "Test", "Train", "Test", "Train", "Test", "Train", "Test", "Train", "Test", "Train", "Test", "Train", "Test", "Train", "Test", "Train", "Test", "Train", "Test", "Train", "Test", "Train", "Test"), ME = c(783.614908859554, 261.373668023744, 14.3109110454195, 195.100466156422, 26.3876287540668, 463.951556368907, 18.0130216685751, 359.483123328568, -22.1643363307684, -339.196046699131, 12.6912377584583, 518.09851679099, 0.0000000000000174739705362564, -529.41545982092, -1.22546154090165, 3925.73996146196, 8.85383136321011, 124.533565617977, 0.284309224688646, -6.72677362104004, 0.284309224688646, -6.72677362104004, 0.284309224688646, -6.72677362104004, 0.284309224688646, -6.72677362104004, 0.284309224688646, -6.72677362104004), RMSE = c(1217.13789794065, 751.642354127382, 500.820891022085, 477.824050637917, 519.473902002702, 642.857862958026, 519.161630651347, 557.229990542789, 523.067637426936, 589.028868073331, 528.35952997731, 686.44615027821, 524.569099190936, 735.240889009488, 454.850494992402, 3949.12093581915, 512.863753849202, 533.856079481684, 135.490259727469, 485.401392424817, 135.490259727469, 485.401392424817, 135.490259727469, 485.401392424817, 135.490259727469, 485.401392424817, 135.490259727469, 485.401392424817), MAE = c(996.051963196766, 506.736876037775, 260.159300687626, 353.225967700696, 294.653723455236, 540.046166565811, 295.649955005283, 444.757424142394, 309.824947101753, 423.457221068304, 306.092832486747, 589.143961366613, 392.62157043408, 583.197541495569, 287.909371174162, 3925.73996146196, 248.525867346098, 371.164249425045, 84.7526369385231, 293.994074536976, 84.7526369385231, 293.994074536976, 84.7526369385231, 293.994074536976, 84.7526369385231, 293.994074536976, 84.7526369385231, 293.994074536976), MPE = c(17.1491069887359, 4.32046155430935, -6.74519182268278, 2.87120046577417, -6.82283325669752, 9.90612395387752, -7.20214793423834, 7.23484722049863, -8.92406901313896, -10.618241704038, -7.35920566787723, 11.316397549041, -8.15817902292851, -16.2120961149314, 278.150530756537, 100.940759473326, -4.78085408038733, 0.903132637273637, -1.0450388130686, -3.16256781033905, -1.0450388130686, -3.16256781033905, -1.0450388130686, -3.16256781033905, -1.0450388130686, -3.16256781033905, -1.0450388130686, -3.16256781033905), MAPE = c(37.6646414975729, 14.2674058209844, 13.4253935410948, 10.2678719351011, 17.2343529926111, 14.5662838896106, 17.3299734983734, 12.2848516454307, 18.1093841365566, 12.5514842599768, 17.8614665736195, 15.7476258421098, 21.2960874782265, 17.3861366179269, 328.041997124712, 100.940759473326, 12.5829038948982, 10.9566576520148, 3.39520095739983, 9.9164317537263, 3.39520095739983, 9.9164317537263, 3.39520095739983, 9.9164317537263, 3.39520095739983, 9.9164317537263, 3.39520095739983, 9.9164317537263), MASE = c(1, 0.508745421686069, NaN, NaN, 0.295821638169924, 0.542186739768643, 0.2968218184686, 0.446520302730967, 0.311052995776836, 0.425135672349106, 0.307306088232949, 0.591479142790696, 0.39417779889112, 0.585509153180959, 0.374562870610186, 5.10728922520835, 0.2495109457427, 0.372635427808221, 0.0850885697434046, 0.295159374610759, 0.0850885697434046, 0.295159374610759, 0.0850885697434046, 0.295159374610759, 0.0850885697434046, 0.295159374610759, 0.0850885697434046, 0.295159374610759), ACF1 = c(0.477101737419842, -0.236641331303488, -0.0688752655501213, 0.0486772478154593, 0.121398483630841, 0.085203686984427, 0.124889761137534, 0.00218743715991606, 0.191776077581811, 0.186812955247135, 0.209332741368281, 0.105740160073441, 0.470691853904666, 0.0555995310408417, -0.00100961596465889, 0.0152620209423001, -0.0743813163863567, 0.0591555324997216, 0.0603986214142024, 0.00539281074890409, 0.0603986214142024, 0.00539281074890409, 0.0603986214142024, 0.00539281074890409, 0.0603986214142024, 0.00539281074890409, 0.0603986214142024, 0.00539281074890409), Theil.s.U = c(NA, 0.679390982324587, NA, 0.483244519716164, NA, 0.658305710036264, NA, 0.572169692732178, NA, 0.554481369845331, NA, 0.704434769895274, NA, 0.724366674060867, NA, 4.02609591299822, NA, 0.512019993546249, NA, 0.403328477993201, NA, 0.403328477993201, NA, 0.403328477993201, NA, 0.403328477993201, NA, 0.403328477993201)), row.names = c("Training set", "Test set", "Training set1", "Test set1", "Training set2", "Test set2", "Training set3", "Test set3", "Training set4", "Test set4", "Training set5", "Test set5", "Training set6", "Test set6", "Training set7", "Test set7", "Training set8", "Test set8", "Training set9", "Test set9", "Training set10", "Test set10", "Training set11", "Test set11", "Training set12", "Test set12", "Training set13", "Test set13"), class = "data.frame") Code: results_df_reshape = reshape2::melt(results_df, c("MAPE", "Model")) ggplot(results_df_reshape) + geom_bar(aes(x = Model, y = MAPE, fill = Model), stat = "identity", position = "dodge") + labs(title = "MAPE of All Models", x = "Model", y = "MAPE", bty = "l") A: library(dplyr) library(ggplot2) results_df_reshape <- reshape2::melt(results_df, c("MAPE", "Model")) %>% #Removal of AR(1) filter(Model != "AR(1)") results_df_reshape %>% ggplot() + geom_bar(aes(x = Model, y = MAPE, fill = Model), stat = "identity", position = "dodge") + #Label for MAPE geom_text( data = results_df_reshape %>% group_by(Model) %>% summarise(MAPE = max(MAPE),label_mape = round(sum(MAPE,na.rm= TRUE),2)), aes(x = Model, y = MAPE,label = label_mape), nudge_y = 1)+ labs(title = "MAPE of All Models", x = "Model", y = "MAPE", bty = "l") A: I have used the sum: library(tidyverse) df %>% pivot_longer(-c(Model, MAPE, Set)) %>% filter(Model != "AR(1)") %>% group_by(Model) %>% summarise(Mape_sum= sum(MAPE, na.rm = TRUE)) %>% ggplot(aes(x = Model, y = Mape_sum, fill = Model))+ geom_col()+ geom_text(aes(label=round(Mape_sum, 1)), size=3.5, vjust=-1)+ labs(title = "MAPE of All Models", x = "Model", y = "MAPE", bty = "l")
Add Label and Remove a Bar from Bar Chart
I have a data frame results_df with models and their respective performance metrics. There are 9 models shown on the bar plot below. I would like to remove the AR(1) model (First bar) and add labels over each bar to count the y-axis (MAPE) values. Is there a way to do this? results_df: dput(results_df) structure(list(Model = c("Naive (baseline)", "Naive (baseline)", "Holt-Winter's Smoothing", "Holt-Winter's Smoothing", "Exponential Smoothing (AAN)", "Exponential Smoothing (AAN)", "Exponential Smoothing (AAN)", "Exponential Smoothing (AAN)", "Exponential Smoothing (MMN)", "Exponential Smoothing (MMN)", "Exponential Smoothing (MMdN)", "Exponential Smoothing (MMdN)", "Simple Regression", "Simple Regression", "AR(1)", "AR(1)", "ARIMA", "ARIMA", "Neural Network Autoregression", "Neural Network Autoregression", "Neural Network Autoregression", "Neural Network Autoregression", "Neural Network Autoregression", "Neural Network Autoregression", "Neural Network Autoregression", "Neural Network Autoregression", "Neural Network Autoregression", "Neural Network Autoregression"), Set = c("Train", "Test", "Train", "Test", "Train", "Test", "Train", "Test", "Train", "Test", "Train", "Test", "Train", "Test", "Train", "Test", "Train", "Test", "Train", "Test", "Train", "Test", "Train", "Test", "Train", "Test", "Train", "Test"), ME = c(783.614908859554, 261.373668023744, 14.3109110454195, 195.100466156422, 26.3876287540668, 463.951556368907, 18.0130216685751, 359.483123328568, -22.1643363307684, -339.196046699131, 12.6912377584583, 518.09851679099, 0.0000000000000174739705362564, -529.41545982092, -1.22546154090165, 3925.73996146196, 8.85383136321011, 124.533565617977, 0.284309224688646, -6.72677362104004, 0.284309224688646, -6.72677362104004, 0.284309224688646, -6.72677362104004, 0.284309224688646, -6.72677362104004, 0.284309224688646, -6.72677362104004), RMSE = c(1217.13789794065, 751.642354127382, 500.820891022085, 477.824050637917, 519.473902002702, 642.857862958026, 519.161630651347, 557.229990542789, 523.067637426936, 589.028868073331, 528.35952997731, 686.44615027821, 524.569099190936, 735.240889009488, 454.850494992402, 3949.12093581915, 512.863753849202, 533.856079481684, 135.490259727469, 485.401392424817, 135.490259727469, 485.401392424817, 135.490259727469, 485.401392424817, 135.490259727469, 485.401392424817, 135.490259727469, 485.401392424817), MAE = c(996.051963196766, 506.736876037775, 260.159300687626, 353.225967700696, 294.653723455236, 540.046166565811, 295.649955005283, 444.757424142394, 309.824947101753, 423.457221068304, 306.092832486747, 589.143961366613, 392.62157043408, 583.197541495569, 287.909371174162, 3925.73996146196, 248.525867346098, 371.164249425045, 84.7526369385231, 293.994074536976, 84.7526369385231, 293.994074536976, 84.7526369385231, 293.994074536976, 84.7526369385231, 293.994074536976, 84.7526369385231, 293.994074536976), MPE = c(17.1491069887359, 4.32046155430935, -6.74519182268278, 2.87120046577417, -6.82283325669752, 9.90612395387752, -7.20214793423834, 7.23484722049863, -8.92406901313896, -10.618241704038, -7.35920566787723, 11.316397549041, -8.15817902292851, -16.2120961149314, 278.150530756537, 100.940759473326, -4.78085408038733, 0.903132637273637, -1.0450388130686, -3.16256781033905, -1.0450388130686, -3.16256781033905, -1.0450388130686, -3.16256781033905, -1.0450388130686, -3.16256781033905, -1.0450388130686, -3.16256781033905), MAPE = c(37.6646414975729, 14.2674058209844, 13.4253935410948, 10.2678719351011, 17.2343529926111, 14.5662838896106, 17.3299734983734, 12.2848516454307, 18.1093841365566, 12.5514842599768, 17.8614665736195, 15.7476258421098, 21.2960874782265, 17.3861366179269, 328.041997124712, 100.940759473326, 12.5829038948982, 10.9566576520148, 3.39520095739983, 9.9164317537263, 3.39520095739983, 9.9164317537263, 3.39520095739983, 9.9164317537263, 3.39520095739983, 9.9164317537263, 3.39520095739983, 9.9164317537263), MASE = c(1, 0.508745421686069, NaN, NaN, 0.295821638169924, 0.542186739768643, 0.2968218184686, 0.446520302730967, 0.311052995776836, 0.425135672349106, 0.307306088232949, 0.591479142790696, 0.39417779889112, 0.585509153180959, 0.374562870610186, 5.10728922520835, 0.2495109457427, 0.372635427808221, 0.0850885697434046, 0.295159374610759, 0.0850885697434046, 0.295159374610759, 0.0850885697434046, 0.295159374610759, 0.0850885697434046, 0.295159374610759, 0.0850885697434046, 0.295159374610759), ACF1 = c(0.477101737419842, -0.236641331303488, -0.0688752655501213, 0.0486772478154593, 0.121398483630841, 0.085203686984427, 0.124889761137534, 0.00218743715991606, 0.191776077581811, 0.186812955247135, 0.209332741368281, 0.105740160073441, 0.470691853904666, 0.0555995310408417, -0.00100961596465889, 0.0152620209423001, -0.0743813163863567, 0.0591555324997216, 0.0603986214142024, 0.00539281074890409, 0.0603986214142024, 0.00539281074890409, 0.0603986214142024, 0.00539281074890409, 0.0603986214142024, 0.00539281074890409, 0.0603986214142024, 0.00539281074890409), Theil.s.U = c(NA, 0.679390982324587, NA, 0.483244519716164, NA, 0.658305710036264, NA, 0.572169692732178, NA, 0.554481369845331, NA, 0.704434769895274, NA, 0.724366674060867, NA, 4.02609591299822, NA, 0.512019993546249, NA, 0.403328477993201, NA, 0.403328477993201, NA, 0.403328477993201, NA, 0.403328477993201, NA, 0.403328477993201)), row.names = c("Training set", "Test set", "Training set1", "Test set1", "Training set2", "Test set2", "Training set3", "Test set3", "Training set4", "Test set4", "Training set5", "Test set5", "Training set6", "Test set6", "Training set7", "Test set7", "Training set8", "Test set8", "Training set9", "Test set9", "Training set10", "Test set10", "Training set11", "Test set11", "Training set12", "Test set12", "Training set13", "Test set13"), class = "data.frame") Code: results_df_reshape = reshape2::melt(results_df, c("MAPE", "Model")) ggplot(results_df_reshape) + geom_bar(aes(x = Model, y = MAPE, fill = Model), stat = "identity", position = "dodge") + labs(title = "MAPE of All Models", x = "Model", y = "MAPE", bty = "l")
[ "library(dplyr)\nlibrary(ggplot2)\n\nresults_df_reshape <-\n reshape2::melt(results_df, c(\"MAPE\", \"Model\")) %>% \n #Removal of AR(1)\n filter(Model != \"AR(1)\")\n\nresults_df_reshape %>%\n ggplot() +\n geom_bar(aes(x = Model, y = MAPE, fill = Model),\n stat = \"identity\",\n position = \"dodge\") +\n #Label for MAPE\n geom_text(\n data = results_df_reshape %>% \n group_by(Model) %>% \n summarise(MAPE = max(MAPE),label_mape = round(sum(MAPE,na.rm= TRUE),2)),\n aes(x = Model, y = MAPE,label = label_mape),\n nudge_y = 1)+\n labs(title = \"MAPE of All Models\",\n x = \"Model\",\n y = \"MAPE\",\n bty = \"l\")\n\n\n", "I have used the sum:\nlibrary(tidyverse)\n\ndf %>% \n pivot_longer(-c(Model, MAPE, Set)) %>% \n filter(Model != \"AR(1)\") %>% \n group_by(Model) %>% \n summarise(Mape_sum= sum(MAPE, na.rm = TRUE)) %>% \n ggplot(aes(x = Model, y = Mape_sum, fill = Model))+\n geom_col()+\n geom_text(aes(label=round(Mape_sum, 1)), size=3.5, vjust=-1)+\n labs(title = \"MAPE of All Models\", x = \"Model\", y = \"MAPE\", bty = \"l\")\n\n\n" ]
[ 2, 0 ]
[]
[]
[ "bar_chart", "ggplot2", "r" ]
stackoverflow_0074679802_bar_chart_ggplot2_r.txt
Q: Neural Networks Extending Learning Domain I have a simple function f : R->R, f(x) = x2 + a, and would like to create a neural network to learn that function, as entirely as it can. Currently, I have a pytorch implementation that takes in inputs of a limited range of course, from x0 to xN with a particular number of points. Each epoch, the training data is randomly perturbed, in efforts to not only learn the relationship on the same grid points each time. Currently, it does a great job of learning on the function on the range it is trained on, but is it at all feasible to train in such a way that can extend this learning beyond what it is trained on? Currently the behavior outside the training range seems dependent on the activation function. For example, with ReLU, the true function (orange) compared to the networks prediction (blue) are below: I understand that if I transform the input vector to higher dimensions that contain higher powers of x, it may work out pretty well, but for a generalized case and how I plan to implement this in the future it won't work as well on non-polynomial functions. One thought that came to mind is from support vector machines and the choice of a kernel, and how the radial basis kernel gets around this generalization issue, but I'm not sure if this can be applied here without the inner product properties of svm. A: What you want is called extrapolation (as opposed to interpolation which is predicting a value that is inside the trained domain / range). There is never a good solution for extrapolation and using higher powers can give you a better fit for a specific problem, but if you change the fitted curve slightly (either change its x and y-intercept, one of the powers, etc) the extrapolation will be pretty bad again. This is also why neural networks use a large data set (to maximize their input range and rely on interpolation) and why over-training / over fitting (which is what you're trying to do) is a bad idea; it never works well in the general case.
Neural Networks Extending Learning Domain
I have a simple function f : R->R, f(x) = x2 + a, and would like to create a neural network to learn that function, as entirely as it can. Currently, I have a pytorch implementation that takes in inputs of a limited range of course, from x0 to xN with a particular number of points. Each epoch, the training data is randomly perturbed, in efforts to not only learn the relationship on the same grid points each time. Currently, it does a great job of learning on the function on the range it is trained on, but is it at all feasible to train in such a way that can extend this learning beyond what it is trained on? Currently the behavior outside the training range seems dependent on the activation function. For example, with ReLU, the true function (orange) compared to the networks prediction (blue) are below: I understand that if I transform the input vector to higher dimensions that contain higher powers of x, it may work out pretty well, but for a generalized case and how I plan to implement this in the future it won't work as well on non-polynomial functions. One thought that came to mind is from support vector machines and the choice of a kernel, and how the radial basis kernel gets around this generalization issue, but I'm not sure if this can be applied here without the inner product properties of svm.
[ "What you want is called extrapolation (as opposed to interpolation which is predicting a value that is inside the trained domain / range). There is never a good solution for extrapolation and using higher powers can give you a better fit for a specific problem, but if you change the fitted curve slightly (either change its x and y-intercept, one of the powers, etc) the extrapolation will be pretty bad again.\nThis is also why neural networks use a large data set (to maximize their input range and rely on interpolation) and why over-training / over fitting (which is what you're trying to do) is a bad idea; it never works well in the general case.\n" ]
[ 0 ]
[]
[]
[ "python", "pytorch" ]
stackoverflow_0074679929_python_pytorch.txt
Q: How can i remove a part of a line of a big log file i have a 15000+ lines log and i want to remove a part of every line This a what my log looks like 15 {"level":"warn","message":"warn: Player: 819631980014075905 | Track has been started playing [\u001b[34m34mSomethingElse2\u001b[39m]"} 12 {"level":"warn","message":"warn: Player: 819631980014075905 | Track has been started playing [\u001b[34m34mSomething2\u001b[39m]"} 1 {"level":"warn","message":"warn: Player: 819631980014075905 | Track has been started playing [\u001b[34m34mSomethingElse\u001b[39m]"} 3 {"level":"warn","message":"warn: Player: 819631980014075905 | Track has been started playing [\u001b[34mSomething\u001b[39m]"} What i want the log to look like 15 SomethingElse2 12 Something2 1 SomethingElse 3 Something Thank you all. A: I didn't completely think about just using the find/replace function of almost any text editor A: The obvious candidate is of course sed. It depends a bit on what you used to display the input-lines (especially the \u001b might give some trouble), but sed 's/\{.*34m34m//;s/.u001b.*//` You may want to put it through the same filter that you used to display the lines.
How can i remove a part of a line of a big log file
i have a 15000+ lines log and i want to remove a part of every line This a what my log looks like 15 {"level":"warn","message":"warn: Player: 819631980014075905 | Track has been started playing [\u001b[34m34mSomethingElse2\u001b[39m]"} 12 {"level":"warn","message":"warn: Player: 819631980014075905 | Track has been started playing [\u001b[34m34mSomething2\u001b[39m]"} 1 {"level":"warn","message":"warn: Player: 819631980014075905 | Track has been started playing [\u001b[34m34mSomethingElse\u001b[39m]"} 3 {"level":"warn","message":"warn: Player: 819631980014075905 | Track has been started playing [\u001b[34mSomething\u001b[39m]"} What i want the log to look like 15 SomethingElse2 12 Something2 1 SomethingElse 3 Something Thank you all.
[ "I didn't completely think about just using the find/replace function of almost any text editor\n", "The obvious candidate is of course sed. It depends a bit on what you used to display the input-lines (especially the \\u001b might give some trouble), but\nsed 's/\\{.*34m34m//;s/.u001b.*//`\n\nYou may want to put it through the same filter that you used to display the lines.\n" ]
[ 0, 0 ]
[]
[]
[ "bash", "logging" ]
stackoverflow_0074679189_bash_logging.txt
Q: How do I keep/store my azure credentials? I have this web app which access a keyvault stored in Azure cloud. To access this KeyVault I use the IConfigurationBuilder Extension configuration.AddAzureKeyVault(new Uri(KeyvaultUri), new DefaultAzureCredential(true)); I have created an managed identity for all the user who need access to this, meaning they should be able to run the application and have access to the keyvault once they are logged in via SSO, which they currently are forced to do everytime they start the application due to new DefaultAzureCredential(true) What I don't understand is why it everytime need to be requested everytime, and not store the credentials somewhere after it has been entered once, and use that stored credential, can I somehow locally store the required credentials after the initial login? It is sort of inconvenient to always login when one start their application, and debugging application becomes a bit lengthy with the required login. Is somehow possible to let the login happen in the background - or somehow store the credentials after first login? I feel a bit this is getting off tracked - the solution I am seeking should be applicable for those running the solution via a terminal, outside of visual studio. Such as frontend developers - who just need a backend to make reqeuest to a nothing else. A: It has no sense to cache the token since it is used only once at startup, what you are looking for is to exclude in your credentials all the wrong ways you are trying to grab the token to connect to your AKV except the one you are really using. To configure it correcly and do not wait 15 seconds in your startup you should configure DefaultAzureCredentials this way: DefaultAzureCredential credentials = new DefaultAzureCredential(new DefaultAzureCredentialOptions { ExcludeEnvironmentCredential = true, ExcludeInteractiveBrowserCredential = true, ExcludeAzurePowerShellCredential = true, ExcludeSharedTokenCacheCredential = true, ExcludeVisualStudioCodeCredential = true, ExcludeVisualStudioCredential = true, ExcludeAzureCliCredential = true, ExcludeManagedIdentityCredential = false, }); Exclude all posibilities to grab the token except the one you are using, in this case "Managed Identity" or in other cases AzureCliCredentials. Regards. A: Can you share a small full code example ? What about using DefaultAzureCredentialOptions. Like: .ConfigureAppConfiguration((context, config) => { var appSettings = config.Build(); var credentialOptions = new DefaultAzureCredentialOptions(); var credential = new DefaultAzureCredential(credentialOptions); config.AddAzureKeyVault(new Uri(appSettings["Url:KeyVault"]), credential); }) A: You have configured your application to use an Azure managed identity to authenticate with Azure Key Vault, and this requires the user to sign in with their Azure credentials every time they start the application. To avoid having to sign in every time, you can use the AddAzureKeyVault method to specify a token cache that will store the authentication token so that it can be reused in future requests. This will allow the user to sign in once, and then their authentication token will be stored and used for subsequent requests to Azure Key Vault. Here is an example of how you can configure the AddAzureKeyVault method to use a token cache: // Create a token cache to store the authentication token var tokenCache = new TokenCache(); // Configure the IConfigurationBuilder to use the token cache configuration.AddAzureKeyVault( new Uri(KeyvaultUri), new DefaultAzureCredential(true), tokenCache: tokenCache ); When using a token cache, the authentication process will happen in the background, so the user will not have to sign in every time they start the application. The token cache will store the authentication token, and the AddAzureKeyVault method will use this token to authenticate with Azure Key Vault. It's worth noting that the token cache will only work if the user is signed in with their Azure credentials on their computer. If the user is not signed in, they will still have to sign in every time they start the application.
How do I keep/store my azure credentials?
I have this web app which access a keyvault stored in Azure cloud. To access this KeyVault I use the IConfigurationBuilder Extension configuration.AddAzureKeyVault(new Uri(KeyvaultUri), new DefaultAzureCredential(true)); I have created an managed identity for all the user who need access to this, meaning they should be able to run the application and have access to the keyvault once they are logged in via SSO, which they currently are forced to do everytime they start the application due to new DefaultAzureCredential(true) What I don't understand is why it everytime need to be requested everytime, and not store the credentials somewhere after it has been entered once, and use that stored credential, can I somehow locally store the required credentials after the initial login? It is sort of inconvenient to always login when one start their application, and debugging application becomes a bit lengthy with the required login. Is somehow possible to let the login happen in the background - or somehow store the credentials after first login? I feel a bit this is getting off tracked - the solution I am seeking should be applicable for those running the solution via a terminal, outside of visual studio. Such as frontend developers - who just need a backend to make reqeuest to a nothing else.
[ "It has no sense to cache the token since it is used only once at startup, what you are looking for is to exclude in your credentials all the wrong ways you are trying to grab the token to connect to your AKV except the one you are really using.\nTo configure it correcly and do not wait 15 seconds in your startup you should configure DefaultAzureCredentials this way:\n DefaultAzureCredential credentials = new DefaultAzureCredential(new DefaultAzureCredentialOptions\n {\n ExcludeEnvironmentCredential = true,\n ExcludeInteractiveBrowserCredential = true,\n ExcludeAzurePowerShellCredential = true,\n ExcludeSharedTokenCacheCredential = true,\n ExcludeVisualStudioCodeCredential = true,\n ExcludeVisualStudioCredential = true,\n ExcludeAzureCliCredential = true,\n ExcludeManagedIdentityCredential = false,\n });\n\nExclude all posibilities to grab the token except the one you are using, in this case \"Managed Identity\" or in other cases AzureCliCredentials.\nRegards.\n", "Can you share a small full code example ?\nWhat about using DefaultAzureCredentialOptions.\nLike:\n.ConfigureAppConfiguration((context, config) =>\n {\n var appSettings = config.Build();\n var credentialOptions = new DefaultAzureCredentialOptions();\n var credential = new DefaultAzureCredential(credentialOptions);\n config.AddAzureKeyVault(new Uri(appSettings[\"Url:KeyVault\"]), credential);\n })\n\n", "You have configured your application to use an Azure managed identity to authenticate with Azure Key Vault, and this requires the user to sign in with their Azure credentials every time they start the application.\nTo avoid having to sign in every time, you can use the AddAzureKeyVault method to specify a token cache that will store the authentication token so that it can be reused in future requests. This will allow the user to sign in once, and then their authentication token will be stored and used for subsequent requests to Azure Key Vault.\nHere is an example of how you can configure the AddAzureKeyVault method to use a token cache:\n// Create a token cache to store the authentication token\nvar tokenCache = new TokenCache();\n\n// Configure the IConfigurationBuilder to use the token cache\nconfiguration.AddAzureKeyVault(\n new Uri(KeyvaultUri), \n new DefaultAzureCredential(true),\n tokenCache: tokenCache\n);\n\nWhen using a token cache, the authentication process will happen in the background, so the user will not have to sign in every time they start the application. The token cache will store the authentication token, and the AddAzureKeyVault method will use this token to authenticate with Azure Key Vault.\nIt's worth noting that the token cache will only work if the user is signed in with their Azure credentials on their computer. If the user is not signed in, they will still have to sign in every time they start the application.\n" ]
[ 1, 0, 0 ]
[]
[]
[ ".net", "asp.net", "azure", "c#", "defaultazurecredential" ]
stackoverflow_0074103118_.net_asp.net_azure_c#_defaultazurecredential.txt
Q: Not able to run this program unable to use features of java 8 I am trying to use Java 8 features in my code (attached) like Lamda, For-Each and Method Reference. But I am getting compile time errors (attached). Even I am unable to use Enhanced FOR-Loop. Find attached the error & code. I have java version '1.8.0_291'. Should I upgrade my JDK & JRE ? If so, what is the recommended version and can u provide me the links to download JDK & JRE. If not, what is the workaround to fix the above errors, so that I can use Java 8 features. error image code please check if there's any mistake in my code A: You can install as many javas as you want on a system, and in addition, there's the notion of targeting a given JDK version that can be different from the one you use. Therefore, for any java project, you have to configure the JVM version you target. Clearly you've set it to 6. Right click on the project, pick preferences, find the 'java build' stuff, and set the version target to 8, or 11, or 17. At that point, you'll need a JDK installed that can run that, but you presumably have it already.
Not able to run this program unable to use features of java 8
I am trying to use Java 8 features in my code (attached) like Lamda, For-Each and Method Reference. But I am getting compile time errors (attached). Even I am unable to use Enhanced FOR-Loop. Find attached the error & code. I have java version '1.8.0_291'. Should I upgrade my JDK & JRE ? If so, what is the recommended version and can u provide me the links to download JDK & JRE. If not, what is the workaround to fix the above errors, so that I can use Java 8 features. error image code please check if there's any mistake in my code
[ "You can install as many javas as you want on a system, and in addition, there's the notion of targeting a given JDK version that can be different from the one you use.\nTherefore, for any java project, you have to configure the JVM version you target. Clearly you've set it to 6. Right click on the project, pick preferences, find the 'java build' stuff, and set the version target to 8, or 11, or 17. At that point, you'll need a JDK installed that can run that, but you presumably have it already.\n" ]
[ 0 ]
[]
[]
[ "compiler_errors", "foreach", "java", "java_8" ]
stackoverflow_0074679731_compiler_errors_foreach_java_java_8.txt
Q: Understanding how child elements inherit parent css properties I have a lack of understanding how the css settings of a parent restrict its children. Simple example: I have a container with three buttons. play button fast forward button fast backwards button My goal is the following: create a grid 1x3 (row x column) aling buttons inside of the grid With this I went to work and ended up here: .adaptations { margin-bottom: 20px; } .video-button-container { height: 100px; width: auto; display: grid; grid-template-columns: 33% 33% 33%; } .video-button { height: 100%; } .video-button img { height: 70%; } <div class="adaptations video-button-container"> <button type="button" class="video-button"> <img src="https://e7.pngegg.com/pngimages/552/785/png-clipart-smile-mouth-love-white.png"> </button> <button type="button" class="video-button middle-button"> <img src="https://e7.pngegg.com/pngimages/552/785/png-clipart-smile-mouth-love-white.png"> </button> <button type="button" class="video-button"> <img src="https://e7.pngegg.com/pngimages/552/785/png-clipart-smile-mouth-love-white.png"> </button> </div> Here's my problem: I managed to implement the said goal with fixed sizes (width, height). However, it is not desireable since screen sizes differ, hence, make it responsive and here lies my problem. What I learned is that if I set my parent-div to height: 100px; the child elements will adjust themselves inside that div. Therefore I said height: 100%. Which sounds like (as far as I understand this): "You can use all the height that is available to you, therefore max. 100px) Can you enlighten me on what I am missing? A: I hope this is what you are trying to achieve. .adaptations { margin-bottom: 20px; } .video-button-container { height: 100px; width: auto; display: grid; grid-template-columns: 33% 33% 33%; } .video-button { height: inherit; /* Use 'inherit' here instead of '100%' */ } .video-button img { height: 70%; } <div class="adaptations video-button-container"> <button type="button" class="video-button"> <img src="https://e7.pngegg.com/pngimages/552/785/png-clipart-smile-mouth-love-white.png"> </button> <button type="button" class="video-button middle-button"> <img src="https://e7.pngegg.com/pngimages/552/785/png-clipart-smile-mouth-love-white.png"> </button> <button type="button" class="video-button"> <img src="https://e7.pngegg.com/pngimages/552/785/png-clipart-smile-mouth-love-white.png"> </button> </div>
Understanding how child elements inherit parent css properties
I have a lack of understanding how the css settings of a parent restrict its children. Simple example: I have a container with three buttons. play button fast forward button fast backwards button My goal is the following: create a grid 1x3 (row x column) aling buttons inside of the grid With this I went to work and ended up here: .adaptations { margin-bottom: 20px; } .video-button-container { height: 100px; width: auto; display: grid; grid-template-columns: 33% 33% 33%; } .video-button { height: 100%; } .video-button img { height: 70%; } <div class="adaptations video-button-container"> <button type="button" class="video-button"> <img src="https://e7.pngegg.com/pngimages/552/785/png-clipart-smile-mouth-love-white.png"> </button> <button type="button" class="video-button middle-button"> <img src="https://e7.pngegg.com/pngimages/552/785/png-clipart-smile-mouth-love-white.png"> </button> <button type="button" class="video-button"> <img src="https://e7.pngegg.com/pngimages/552/785/png-clipart-smile-mouth-love-white.png"> </button> </div> Here's my problem: I managed to implement the said goal with fixed sizes (width, height). However, it is not desireable since screen sizes differ, hence, make it responsive and here lies my problem. What I learned is that if I set my parent-div to height: 100px; the child elements will adjust themselves inside that div. Therefore I said height: 100%. Which sounds like (as far as I understand this): "You can use all the height that is available to you, therefore max. 100px) Can you enlighten me on what I am missing?
[ "I hope this is what you are trying to achieve.\n\n\n.adaptations {\n margin-bottom: 20px;\n}\n\n.video-button-container {\n height: 100px;\n width: auto;\n\n display: grid;\n grid-template-columns: 33% 33% 33%;\n}\n\n.video-button {\n height: inherit; /* Use 'inherit' here instead of '100%' */\n}\n\n.video-button img {\n height: 70%;\n}\n<div class=\"adaptations video-button-container\">\n <button type=\"button\" class=\"video-button\">\n <img src=\"https://e7.pngegg.com/pngimages/552/785/png-clipart-smile-mouth-love-white.png\">\n </button>\n <button type=\"button\" class=\"video-button middle-button\">\n <img src=\"https://e7.pngegg.com/pngimages/552/785/png-clipart-smile-mouth-love-white.png\">\n </button>\n <button type=\"button\" class=\"video-button\">\n <img src=\"https://e7.pngegg.com/pngimages/552/785/png-clipart-smile-mouth-love-white.png\">\n </button>\n</div>\n\n\n\n" ]
[ 0 ]
[]
[]
[ "css", "html" ]
stackoverflow_0074668041_css_html.txt
Q: How to estimate execution cost of function in pgsql? I am trying to define custom function and I wanted to find how can I calculate estimated cost of that function https://www.postgresql.org/docs/current/sql-createfunction.html I tried giving different values of cost function but unable to find to find how to estimate that cost. A: The unit of costs in PostgreSQL is not defined, but 1 is the default value of seq_page_cost, the cost of reading one 8kB page during a sequential scan. So you could measure the average duration of that operation and assign the cost of the function in comparison with that. A: You can use EXPLAIN to see the cost of CPU from each query on the computer its being executed. CREATE OR REPLACE FUNCTION a() RETURNS SET OF INTEGER AS $$ SELECT 1; $$ LANGUAGE SQL; EXPLAIN SELECT * FROM a() CROSS JOIN (Values(1),(2),(3)) as foo; Nested Loop (cost=0.25..47.80 rows=3000 width=8) -> Function Scan on a (cost=0.25..10.25 rows=1000 width=4) -> Materialize (cost=0.00..0.05 rows=3 width=4) -> Values Scan on "*VALUES*" (cost=0.00..0.04 rows=3 width=4) (4 rows) If two functions with COST 0.0001 AND 10000 get executed on the same time as the predicate of a SELECT statement the query planner will execute first the function of cost 0.0001 and only later the 10000 cost condition as you can see in this example below. EXPLAIN SELECT * FROM pg_language WHERE lanname ILIKE '%sql%' AND slow_ function(lanname)AND fast_function(lanname); QUERY PLAN ------------------------------------------------------------------------- Seq Scan on pg_language (cost=0.00..101.05 rows=1 width=114) Filter: (fast_function(lanname) AND (lanname ~~* '%sql%'::text) AND slow_function(lanname)) (2 rows) A: If I cared enough to bother, I would do it experimentally. For example, if your function takes double precision, you could compare: explain analyze select sqrt(x::double precision) from generate_series(1,1000000) f(x); to explain analyze select your_func(x::double precision) from generate_series(1,1000000) f(x); And then find the cost setting that makes the ratio of the cost estimates about match the ratio of the actual times. You could try to subtract the baseline costs of the generate_series and the cast, but if the added time of your function is so small that it warrants such precision, then it is probably small enough to just make the cost 1 and not worry about it.
How to estimate execution cost of function in pgsql?
I am trying to define custom function and I wanted to find how can I calculate estimated cost of that function https://www.postgresql.org/docs/current/sql-createfunction.html I tried giving different values of cost function but unable to find to find how to estimate that cost.
[ "The unit of costs in PostgreSQL is not defined, but 1 is the default value of seq_page_cost, the cost of reading one 8kB page during a sequential scan. So you could measure the average duration of that operation and assign the cost of the function in comparison with that.\n", "You can use EXPLAIN to see the cost of CPU from each query on the computer its being executed.\n CREATE OR REPLACE FUNCTION a() RETURNS SET OF INTEGER AS $$\n SELECT 1;\n$$\nLANGUAGE SQL;\nEXPLAIN SELECT * FROM a() CROSS JOIN (Values(1),(2),(3)) as foo;\n\nNested Loop (cost=0.25..47.80 rows=3000 width=8)\n -> Function Scan on a (cost=0.25..10.25 rows=1000 width=4)\n -> Materialize (cost=0.00..0.05 rows=3 width=4)\n -> Values Scan on \"*VALUES*\" (cost=0.00..0.04 rows=3 width=4)\n(4 rows)\n\nIf two functions with COST 0.0001 AND 10000 get executed on the same time as the predicate of a SELECT statement the query planner will execute first the function of cost 0.0001 and only later the 10000 cost condition as you can see in this example below.\nEXPLAIN SELECT * FROM pg_language WHERE lanname ILIKE '%sql%' AND slow_\nfunction(lanname)AND fast_function(lanname);\n QUERY PLAN\n-------------------------------------------------------------------------\nSeq Scan on pg_language (cost=0.00..101.05 rows=1 width=114)\n Filter: (fast_function(lanname) AND (lanname ~~* '%sql%'::text) AND \nslow_function(lanname))\n(2 rows)\n\n", "If I cared enough to bother, I would do it experimentally.\nFor example, if your function takes double precision, you could compare:\nexplain analyze select sqrt(x::double precision) from generate_series(1,1000000) f(x);\n\nto\nexplain analyze select your_func(x::double precision) from generate_series(1,1000000) f(x);\n\nAnd then find the cost setting that makes the ratio of the cost estimates about match the ratio of the actual times.\nYou could try to subtract the baseline costs of the generate_series and the cast, but if the added time of your function is so small that it warrants such precision, then it is probably small enough to just make the cost 1 and not worry about it.\n" ]
[ 1, 0, 0 ]
[ "In PostgreSQL, you can define the estimated cost of a custom function using the COST parameter in the CREATE FUNCTION statement. The cost represents the estimated number of disk page fetches that the function will perform. This value is used by the PostgreSQL query planner to determine the most efficient execution plan for a query.\nFor example, if you have a custom function get_customer_orders(customer_id int) that retrieves all orders for a given customer, you can define the estimated cost of the function as follows:\nCREATE FUNCTION get_customer_orders(customer_id int)\nRETURNS SETOF orders\nAS $$\n SELECT * FROM orders WHERE customer_id = $1;\n$$ LANGUAGE SQL\nCOST 100;\n\nIn this example, the COST parameter is set to 100, which means that the query planner estimates that the function will perform 100 disk page fetches. You can adjust this value based on your own observations and tests of the function's performance.\nIt's important to note that the COST parameter is only an estimate, and it may not always accurately reflect the actual cost of the function. The query planner uses this value as a starting point, but it can adjust the execution plan based on other factors, such as the size and distribution of data in the tables that the function accesses.\n" ]
[ -1 ]
[ "plpgsql", "postgresql" ]
stackoverflow_0074674033_plpgsql_postgresql.txt
Q: Get the first three human readable elements in a queryset I'm trying to render elements in a Django view. Every clinic object has many specialities, but for estetic reasons I only want the first three of them to be displayed in the template. I've tried: def clinics_index(request): clinics = Clinic.objects.all() for clinic in clinics: speciality = clinic.get_speciality_display context = { 'clinics' : clinics, 'speciality' : speciality, } return render(request, 'guide/clinic/clinic_directory.html', context) This now renders the human-readable name of the speciality field (which is a multiple choice field in the model). However, I can't use substraction to only get 3 elements like here: speciality = clinic.get_speciality_display[:3] As I get the following error: TypeError at /guide/clinics/ 'method' object is not subscriptable How can I render it? Edit: This is the Clinic model: class Clinic(models.Model): name = models.CharField(max_length=75, blank=True, null=True) speciality = MultiSelectField(choices=Speciality.choices, max_length=100, blank=True, null=True) city = models.CharField(max_length=20, choices=Cities.choices, blank=True, null=True) ward = models.CharField(max_length=20, choices=Wards.choices, blank=True, null=True) full_address = models.CharField(max_length=100, blank=True, null=True) maps_link = models.CharField(max_length=75, blank=True, null=True) train_access = models.CharField(max_length=50, blank=True, null=True) bus_access = models.CharField(max_length=50, blank=True, null=True) parking = models.CharField(_('Parking availability'), max_length=75, blank=True, null=True) phone_number = models.CharField(max_length=20, blank=True, null=True) english_support = models.BooleanField(default=False, blank=True, null=True) holiday_availability = models.BooleanField(_('Availability on weekends/holidays'), default=False, blank=True, null=True) slug = models.SlugField(blank=True, null=True) def __str__(self): return self.name def get_absolute_url(self): return reverse('guide:clinic_detail', kwargs={"slug" : self.slug}) And the template snippet: <tbody> {% for clinic in clinics %} <tr> <td>{{clinic.name}}</td> <td>{{clinic.city}}</td> <td>{{clinic.ward}}</td> <td>{{speciality}}</td> <td><a href="{{clinic.get_absolute_url}}">More...</a></td> </tr> {% endfor %} </tbody> A: I assume that in a loop you want to collect all the data. To do this, you need to save them to a list. But that's overkill, just pass clinics to a dictionary and iterate over all the values in the template. Also, for links, I used clinic.slug instead of clinic.get_absolute_url, since the model already returns the generated url through the get_absolute_url method. views.py def clinics_index(request): clinics = Clinic.objects.all()[:3] return render(request, 'guide/clinic/clinic_directory.html', {'context': clinics}) templates {% for clinic in context %} <p>{{ clinic }}</p> <tr> <td>{{ clinic.name }}</td> <td>{{ clinic.city }}</td> <td>{{ clinic.ward }}</td> <td>{{ clinic.speciality }}</td> <td><a href="{{ clinic.slug }}">More...</a></td> </tr> {% endfor %} </tbody>
Get the first three human readable elements in a queryset
I'm trying to render elements in a Django view. Every clinic object has many specialities, but for estetic reasons I only want the first three of them to be displayed in the template. I've tried: def clinics_index(request): clinics = Clinic.objects.all() for clinic in clinics: speciality = clinic.get_speciality_display context = { 'clinics' : clinics, 'speciality' : speciality, } return render(request, 'guide/clinic/clinic_directory.html', context) This now renders the human-readable name of the speciality field (which is a multiple choice field in the model). However, I can't use substraction to only get 3 elements like here: speciality = clinic.get_speciality_display[:3] As I get the following error: TypeError at /guide/clinics/ 'method' object is not subscriptable How can I render it? Edit: This is the Clinic model: class Clinic(models.Model): name = models.CharField(max_length=75, blank=True, null=True) speciality = MultiSelectField(choices=Speciality.choices, max_length=100, blank=True, null=True) city = models.CharField(max_length=20, choices=Cities.choices, blank=True, null=True) ward = models.CharField(max_length=20, choices=Wards.choices, blank=True, null=True) full_address = models.CharField(max_length=100, blank=True, null=True) maps_link = models.CharField(max_length=75, blank=True, null=True) train_access = models.CharField(max_length=50, blank=True, null=True) bus_access = models.CharField(max_length=50, blank=True, null=True) parking = models.CharField(_('Parking availability'), max_length=75, blank=True, null=True) phone_number = models.CharField(max_length=20, blank=True, null=True) english_support = models.BooleanField(default=False, blank=True, null=True) holiday_availability = models.BooleanField(_('Availability on weekends/holidays'), default=False, blank=True, null=True) slug = models.SlugField(blank=True, null=True) def __str__(self): return self.name def get_absolute_url(self): return reverse('guide:clinic_detail', kwargs={"slug" : self.slug}) And the template snippet: <tbody> {% for clinic in clinics %} <tr> <td>{{clinic.name}}</td> <td>{{clinic.city}}</td> <td>{{clinic.ward}}</td> <td>{{speciality}}</td> <td><a href="{{clinic.get_absolute_url}}">More...</a></td> </tr> {% endfor %} </tbody>
[ "I assume that in a loop you want to collect all the data. To do this, you need to save them to a list. But that's overkill, just pass clinics to a dictionary and iterate over all the values in the template. Also, for links, I used clinic.slug instead of clinic.get_absolute_url, since the model already returns the generated url through the get_absolute_url method.\nviews.py\ndef clinics_index(request):\n clinics = Clinic.objects.all()[:3]\n\n return render(request, 'guide/clinic/clinic_directory.html', {'context': clinics})\n\ntemplates\n{% for clinic in context %}\n<p>{{ clinic }}</p>\n<tr>\n <td>{{ clinic.name }}</td>\n <td>{{ clinic.city }}</td>\n <td>{{ clinic.ward }}</td>\n <td>{{ clinic.speciality }}</td>\n <td><a href=\"{{ clinic.slug }}\">More...</a></td>\n</tr>\n{% endfor %}\n</tbody>\n\n" ]
[ 0 ]
[]
[]
[ "django", "django_models", "django_templates", "django_views", "view" ]
stackoverflow_0074674035_django_django_models_django_templates_django_views_view.txt
Q: TypeError: 'list' object is not callable, on a function I am struggling to understand why is python throwing this error, to a function: Traceback (most recent call last): File "/home/arksdf/Repos/alura/Iesb_DeepLearning/tentativas/teste.py", line 40, in <module> model, train_loss, valid_loss = r.classificacao(optimizer, criterion) File "/home/arksdf/Repos/alura/Iesb_DeepLearning/tentativas/Runner.py", line 21, in classificacao model, train_loss, valid_loss = t.train(self.model, optimizer, criterion) TypeError: 'list' object is not callable I'm pretty sure the only thing I'm calling in this line is a function, and functions, as far as I know, are callable class Classificador(): def __init__(self, dataset, model, epochs = 2000, batch_size = 25, early_stopping_epochs = 60): self.dataset = dataset self.model = model self.epochs = epochs self.early_stopping_epochs = early_stopping_epochs # quantas épocas sem melhoria serão toleradas antes de parar o treinamento self.batch_size = batch_size def classificacao(self, optimizer, criterion): t = Treinamento(self.dataset, self.epochs, self.batch_size, self.early_stopping_epochs) model, train_loss, valid_loss = t.train(self.model, optimizer, criterion) return model, train_loss, valid_loss So why is it throwing this specific error? What bugs me is that it just don't send me to any specific line inside t.train it just says that the call is wrong This is the whole Treinamento class, in case there is something that might help find what I missed (is quite big tho) import torch import numpy as np from tqdm import tqdm from datetime import datetime from sklearn.model_selection import KFold from Reader import * class Treinamento(): def __init__(self, dataset, n_epochs=10, batch_size=1, early_stopping_epochs=10): read = Reader(dataset) self.train, self.valid = read.read() self.X_train, self.X_test, self.y_train, self.y_test = self.train self.X_valid, self.X_test, self.y_valid, self.y_test = self.valid self.n_epochs = n_epochs self.batch_size = batch_size self.early_stopping_epochs = early_stopping_epochs # UTILS def get_batches(self, data, batch_size=1): batches = [] data_size = len(data) for start_idx in range(0, data_size, batch_size): end_idx = min(data_size, start_idx + batch_size) batches.append(data[start_idx:end_idx]) return batches def load_best_model(self, model, best_epoch, best_valid_loss, best_train_loss, epochs_without_improv): # Load best model model.load_state_dict(torch.load('best_model')) model.eval() # Print logs if epochs_without_improv >= self.early_stopping_epochs: print('Training interrupted by early stopping!') else: print('Training finished by epochs!') print(f'Total epochs run: {epoch + 1}') print(f'Best model found at epoch {best_epoch + 1} with valid loss {best_valid_loss} and training loss {best_train_loss}') ############################################################################################################################### #################################### LOSSES ################################################################################### ############################################################################################################################### def train_loss(self, X_train, y_train, optimizer, criterion, model): model.train() acc_train_loss = 0.0 for index, (original_data, original_target) in enumerate(zip(self.get_batches(X_train, self.batch_size), self.get_batches(y_train, self.batch_size))): # Format data to tensor target = (original_target == 1).nonzero(as_tuple=True)[1] data = original_data.float() # Esse '.float()' é necessário para arrumar o tipo do dado # target = target.cuda() # data = data.cuda() optimizer.zero_grad() # model.forward(data) predicted = model(data) loss = criterion(predicted, target) # Backprop loss.backward() optimizer.step() acc_train_loss += loss.item() return acc_train_loss def valid_loss(self, X_valid, y_valid, criterion, model): model.eval() acc_valid_loss = 0.0 for index, (original_data, original_target) in enumerate(zip(self.get_batches(X_valid, self.batch_size), self.get_batches(y_valid, self.batch_size))): # Format data to tensor target = (original_target == 1).nonzero(as_tuple=True)[1] data = original_data.float() # Esse '.float()' é necessário para arrumar o tipo do dado # target = target.cuda() # data = data.cuda() # model.forward(data) predicted = model(data) loss = criterion(predicted, target) acc_valid_loss += loss.item() return acc_valid_loss ############################################################################################################################### #################################### TREINAMENTOS ############################################################################# ############################################################################################################################### def train(self, model, optimizer, criterion): init = datetime.now() best_epoch = None best_valid_loss = np.Inf best_train_loss = None epochs_without_improv = 0 train_loss = [] valid_loss = [] for epoch in tqdm(range(self.n_epochs)): ################### # early stopping? # ################### if epochs_without_improv >= self.early_stopping_epochs: break ################### # train the model # ################### acc_train_loss = self.train_loss(torch.from_numpy(self.X_train), torch.from_numpy(self.y_train.to_numpy()), optimizer, criterion, model) train_loss.append(acc_train_loss) ################### # valid the model # ################### acc_valid_loss = self.valid_loss(torch.from_numpy(self.X_valid), torch.from_numpy(self.y_valid.to_numpy()), criterion, model) valid_loss.append(acc_valid_loss) ##################### # Update best model # ##################### if acc_valid_loss < best_valid_loss: torch.save(model.state_dict(), 'best_model') # save best model best_epoch = epoch best_valid_loss = acc_valid_loss best_train_loss = acc_train_loss epochs_without_improv = 0 else: epochs_without_improv += 1 self.load_best_model(model, best_epoch, best_valid_loss, best_train_loss, epochs_without_improv) end = datetime.now() print(f'Total training time: {end - init}') return model, train_loss, valid_loss def train_cross_validation(self, model, optimizer, criterion): init = datetime.now() best_epoch = None best_valid_loss = np.Inf best_train_loss = None epochs_without_improv = 0 train_loss = [] valid_loss = [] kf = KFold(n_splits=4, random_state=1, shuffle=True) split = kf.split(self.train) for idx, (train_idx, valid_idx) in enumerate(split): print('Index {}'.format(idx + 1)) y_cros_train, y_cros_valid = y_train.iloc[train_idx], y_test.iloc[valid_idx] X_cros_train, X_cros_valid = X_train[train_idx,:], X_test[valid_idx,:] for epoch in tqdm(range(self.n_epochs)): if epochs_without_improv >= self.early_stopping_epochs: break ################### # train the model # ################### acc_train_loss = self.cross_train(torch.from_numpy(X_cros_train), torch.from_numpy(y_cros_train.to_numpy()), optimizer, criterion, model) train_loss.append(acc_train_loss) ################### # valid the model # ################### acc_valid_loss = self.cross_valid(torch.from_numpy(X_cros_valid), torch.from_numpy(y_cros_valid.to_numpy()), criterion, model) valid_loss.append(acc_valid_loss) if acc_valid_loss < best_valid_loss: torch.save(model.state_dict(), 'best_model') # save best model best_epoch = epoch best_valid_loss = acc_valid_loss best_train_loss = acc_train_loss epochs_without_improv = 0 else: epochs_without_improv += 1 self.load_best_model(model, best_epoch, best_valid_loss, best_train_loss, epochs_without_improv) end = datetime.now() print(f'Total training time: {end - init}') return model, train_loss, valid_loss A: As @Michael Butcher answered there was a variable with the same name as my function, train, renaming the function fixed the issue.
TypeError: 'list' object is not callable, on a function
I am struggling to understand why is python throwing this error, to a function: Traceback (most recent call last): File "/home/arksdf/Repos/alura/Iesb_DeepLearning/tentativas/teste.py", line 40, in <module> model, train_loss, valid_loss = r.classificacao(optimizer, criterion) File "/home/arksdf/Repos/alura/Iesb_DeepLearning/tentativas/Runner.py", line 21, in classificacao model, train_loss, valid_loss = t.train(self.model, optimizer, criterion) TypeError: 'list' object is not callable I'm pretty sure the only thing I'm calling in this line is a function, and functions, as far as I know, are callable class Classificador(): def __init__(self, dataset, model, epochs = 2000, batch_size = 25, early_stopping_epochs = 60): self.dataset = dataset self.model = model self.epochs = epochs self.early_stopping_epochs = early_stopping_epochs # quantas épocas sem melhoria serão toleradas antes de parar o treinamento self.batch_size = batch_size def classificacao(self, optimizer, criterion): t = Treinamento(self.dataset, self.epochs, self.batch_size, self.early_stopping_epochs) model, train_loss, valid_loss = t.train(self.model, optimizer, criterion) return model, train_loss, valid_loss So why is it throwing this specific error? What bugs me is that it just don't send me to any specific line inside t.train it just says that the call is wrong This is the whole Treinamento class, in case there is something that might help find what I missed (is quite big tho) import torch import numpy as np from tqdm import tqdm from datetime import datetime from sklearn.model_selection import KFold from Reader import * class Treinamento(): def __init__(self, dataset, n_epochs=10, batch_size=1, early_stopping_epochs=10): read = Reader(dataset) self.train, self.valid = read.read() self.X_train, self.X_test, self.y_train, self.y_test = self.train self.X_valid, self.X_test, self.y_valid, self.y_test = self.valid self.n_epochs = n_epochs self.batch_size = batch_size self.early_stopping_epochs = early_stopping_epochs # UTILS def get_batches(self, data, batch_size=1): batches = [] data_size = len(data) for start_idx in range(0, data_size, batch_size): end_idx = min(data_size, start_idx + batch_size) batches.append(data[start_idx:end_idx]) return batches def load_best_model(self, model, best_epoch, best_valid_loss, best_train_loss, epochs_without_improv): # Load best model model.load_state_dict(torch.load('best_model')) model.eval() # Print logs if epochs_without_improv >= self.early_stopping_epochs: print('Training interrupted by early stopping!') else: print('Training finished by epochs!') print(f'Total epochs run: {epoch + 1}') print(f'Best model found at epoch {best_epoch + 1} with valid loss {best_valid_loss} and training loss {best_train_loss}') ############################################################################################################################### #################################### LOSSES ################################################################################### ############################################################################################################################### def train_loss(self, X_train, y_train, optimizer, criterion, model): model.train() acc_train_loss = 0.0 for index, (original_data, original_target) in enumerate(zip(self.get_batches(X_train, self.batch_size), self.get_batches(y_train, self.batch_size))): # Format data to tensor target = (original_target == 1).nonzero(as_tuple=True)[1] data = original_data.float() # Esse '.float()' é necessário para arrumar o tipo do dado # target = target.cuda() # data = data.cuda() optimizer.zero_grad() # model.forward(data) predicted = model(data) loss = criterion(predicted, target) # Backprop loss.backward() optimizer.step() acc_train_loss += loss.item() return acc_train_loss def valid_loss(self, X_valid, y_valid, criterion, model): model.eval() acc_valid_loss = 0.0 for index, (original_data, original_target) in enumerate(zip(self.get_batches(X_valid, self.batch_size), self.get_batches(y_valid, self.batch_size))): # Format data to tensor target = (original_target == 1).nonzero(as_tuple=True)[1] data = original_data.float() # Esse '.float()' é necessário para arrumar o tipo do dado # target = target.cuda() # data = data.cuda() # model.forward(data) predicted = model(data) loss = criterion(predicted, target) acc_valid_loss += loss.item() return acc_valid_loss ############################################################################################################################### #################################### TREINAMENTOS ############################################################################# ############################################################################################################################### def train(self, model, optimizer, criterion): init = datetime.now() best_epoch = None best_valid_loss = np.Inf best_train_loss = None epochs_without_improv = 0 train_loss = [] valid_loss = [] for epoch in tqdm(range(self.n_epochs)): ################### # early stopping? # ################### if epochs_without_improv >= self.early_stopping_epochs: break ################### # train the model # ################### acc_train_loss = self.train_loss(torch.from_numpy(self.X_train), torch.from_numpy(self.y_train.to_numpy()), optimizer, criterion, model) train_loss.append(acc_train_loss) ################### # valid the model # ################### acc_valid_loss = self.valid_loss(torch.from_numpy(self.X_valid), torch.from_numpy(self.y_valid.to_numpy()), criterion, model) valid_loss.append(acc_valid_loss) ##################### # Update best model # ##################### if acc_valid_loss < best_valid_loss: torch.save(model.state_dict(), 'best_model') # save best model best_epoch = epoch best_valid_loss = acc_valid_loss best_train_loss = acc_train_loss epochs_without_improv = 0 else: epochs_without_improv += 1 self.load_best_model(model, best_epoch, best_valid_loss, best_train_loss, epochs_without_improv) end = datetime.now() print(f'Total training time: {end - init}') return model, train_loss, valid_loss def train_cross_validation(self, model, optimizer, criterion): init = datetime.now() best_epoch = None best_valid_loss = np.Inf best_train_loss = None epochs_without_improv = 0 train_loss = [] valid_loss = [] kf = KFold(n_splits=4, random_state=1, shuffle=True) split = kf.split(self.train) for idx, (train_idx, valid_idx) in enumerate(split): print('Index {}'.format(idx + 1)) y_cros_train, y_cros_valid = y_train.iloc[train_idx], y_test.iloc[valid_idx] X_cros_train, X_cros_valid = X_train[train_idx,:], X_test[valid_idx,:] for epoch in tqdm(range(self.n_epochs)): if epochs_without_improv >= self.early_stopping_epochs: break ################### # train the model # ################### acc_train_loss = self.cross_train(torch.from_numpy(X_cros_train), torch.from_numpy(y_cros_train.to_numpy()), optimizer, criterion, model) train_loss.append(acc_train_loss) ################### # valid the model # ################### acc_valid_loss = self.cross_valid(torch.from_numpy(X_cros_valid), torch.from_numpy(y_cros_valid.to_numpy()), criterion, model) valid_loss.append(acc_valid_loss) if acc_valid_loss < best_valid_loss: torch.save(model.state_dict(), 'best_model') # save best model best_epoch = epoch best_valid_loss = acc_valid_loss best_train_loss = acc_train_loss epochs_without_improv = 0 else: epochs_without_improv += 1 self.load_best_model(model, best_epoch, best_valid_loss, best_train_loss, epochs_without_improv) end = datetime.now() print(f'Total training time: {end - init}') return model, train_loss, valid_loss
[ "As @Michael Butcher answered there was a variable with the same name as my function, train, renaming the function fixed the issue.\n" ]
[ 1 ]
[]
[]
[ "function", "list", "python" ]
stackoverflow_0074679583_function_list_python.txt
Q: In Webflux-WebFilter, how can I identify whether an inbound HTTP request has a body or not In WebFilter, how can I identify whether an inbound HTTP request has a body or not? I am aware of the content-length header presence check, but it's a hack at the best. Besides, it will not catch all the cases (some of the clients are sending body without content-length header). Note: I just need to identify whether the body is there or not, reading body is a whole different question!. Server is netty, if that helps A: To check if an inbound HTTP request has a body, you can use the HttpRequest.getBody() method provided by the Webflux framework. This method will return an empty Flux if the request has no body, or a Flux containing the request body if it does have a body. Here is an example of how you can use this method: public class MyWebFilter implements WebFilter { public Mono<Void> filter(ServerWebExchange exchange, WebFilterChain chain) { HttpRequest request = exchange.getRequest(); Flux<DataBuffer> body = request.getBody(); // Check if the request has a body if (body == null || body.count().block() == 0) { // The request has no body } else { // The request has a body } } } This approach is more reliable than checking the presence of the Content-Length header, as it directly checks the request body itself rather than relying on potentially unreliable headers.
In Webflux-WebFilter, how can I identify whether an inbound HTTP request has a body or not
In WebFilter, how can I identify whether an inbound HTTP request has a body or not? I am aware of the content-length header presence check, but it's a hack at the best. Besides, it will not catch all the cases (some of the clients are sending body without content-length header). Note: I just need to identify whether the body is there or not, reading body is a whole different question!. Server is netty, if that helps
[ "To check if an inbound HTTP request has a body, you can use the HttpRequest.getBody() method provided by the Webflux framework. This method will return an empty Flux if the request has no body, or a Flux containing the request body if it does have a body. Here is an example of how you can use this method:\n public class MyWebFilter implements WebFilter {\n public Mono<Void> filter(ServerWebExchange exchange, WebFilterChain chain) {\n HttpRequest request = exchange.getRequest();\n Flux<DataBuffer> body = request.getBody();\n \n // Check if the request has a body\n if (body == null || body.count().block() == 0) {\n // The request has no body\n } else {\n // The request has a body\n }\n }\n}\n\nThis approach is more reliable than checking the presence of the Content-Length header, as it directly checks the request body itself rather than relying on potentially unreliable headers.\n" ]
[ 0 ]
[]
[]
[ "netty", "reactor", "spring", "spring_boot", "spring_webflux" ]
stackoverflow_0074598220_netty_reactor_spring_spring_boot_spring_webflux.txt
Q: I try to install my dependencies but I got a error i tried to install dependencies of my program and I get a error when execute yarn install on linux os i execute "yarn install or sudo yarn install" a i get the next error: An unexpected error occurred: "https://registry.yarnpkg.com/eslint/-/eslint-7.32.0.tgz: connect EACCES 2606:4700::6810:1523:443" any ideas? A: Turn off the strict-ssl on yarn/npm config, and try again yarn config set "strict-ssl" false -g or npm config set "strict-ssl" false -g or yarn config set registry https://registry.npmjs.org try these and then tell me if it worked or not and if solved please accept the answer A: finnaly was only firewall rule of ubuntu. sudo ufw allow 443
I try to install my dependencies but I got a error
i tried to install dependencies of my program and I get a error when execute yarn install on linux os i execute "yarn install or sudo yarn install" a i get the next error: An unexpected error occurred: "https://registry.yarnpkg.com/eslint/-/eslint-7.32.0.tgz: connect EACCES 2606:4700::6810:1523:443" any ideas?
[ "Turn off the strict-ssl on yarn/npm config, and try again\nyarn config set \"strict-ssl\" false -g\n\nor\nnpm config set \"strict-ssl\" false -g\n\nor\nyarn config set registry https://registry.npmjs.org\n\ntry these and then tell me if it worked or not and if solved please accept the answer\n", "finnaly was only firewall rule of ubuntu.\nsudo ufw allow 443\n\n" ]
[ 0, 0 ]
[]
[]
[ "yarnpkg" ]
stackoverflow_0074667078_yarnpkg.txt
Q: express body parser is changing date field in body of post request When sending date field in post request in local time zone from client side is received by body parser then the body parser changes the date left side client payload and right side what parsed on server side: My guess is its has some thing to do with express time zone but I don't know how I tried to set time zone of node js by setting process.env.TZ but the result is same
express body parser is changing date field in body of post request
When sending date field in post request in local time zone from client side is received by body parser then the body parser changes the date left side client payload and right side what parsed on server side: My guess is its has some thing to do with express time zone but I don't know how I tried to set time zone of node js by setting process.env.TZ but the result is same
[]
[]
[ "So the problem was not on the backend it was on the frontend JSON.stringify was the problem it was changing the date so rather then sending the raw date object I'm converting date in to local String and sending that and this is working fine\n" ]
[ -1 ]
[ "body_parser", "express", "javascript", "timezone" ]
stackoverflow_0074525005_body_parser_express_javascript_timezone.txt
Q: Disable (C# 10) Implicit usings on a specific class C# 10 brought implicit usings. I globaly like them, but they're causing me a conflict on a particular class called Region as there's a conflict with the Microsoft.Identity.Client.Region class. There's a nice thread here explaining how to disable implicit usings everywhere: C# 10: Disable Global Using I don't want to do it globally, I want to do it just on specific classes. Alternatively, a solution that would allow me to disable a particular implicit using (Microsoft.Identity.Client.Region) would also serve my needs. A: To remove implicit namespace for project completely you can use Using xml element (see also) with Remove attribute: <ItemGroup> <Using Remove="Microsoft.Identity.Client" /> </ItemGroup> It also provides option to specify Condition attribute but I have not found docs on it for this specific use case.
Disable (C# 10) Implicit usings on a specific class
C# 10 brought implicit usings. I globaly like them, but they're causing me a conflict on a particular class called Region as there's a conflict with the Microsoft.Identity.Client.Region class. There's a nice thread here explaining how to disable implicit usings everywhere: C# 10: Disable Global Using I don't want to do it globally, I want to do it just on specific classes. Alternatively, a solution that would allow me to disable a particular implicit using (Microsoft.Identity.Client.Region) would also serve my needs.
[ "To remove implicit namespace for project completely you can use Using xml element (see also) with Remove attribute:\n<ItemGroup>\n <Using Remove=\"Microsoft.Identity.Client\" />\n</ItemGroup>\n\nIt also provides option to specify Condition attribute but I have not found docs on it for this specific use case.\n" ]
[ 1 ]
[]
[]
[ "c#", "c#_10.0", "implicit_using", "using" ]
stackoverflow_0074679856_c#_c#_10.0_implicit_using_using.txt
Q: How to edit commit author without changing date? I already know how to change the author of the commit (author and commit field), git rebase --root --exec "git commit --amend --reset-author --no-edit" but with the change of the author the dates (author date and commit date) are changed as of the current date. How do I save the old dates and change the author at the same time? A: This is not a complete solution to your question, as the commit date is still updated (this does change the commit after all), but it might be suitable for anyone that just wants to keep the author date unchanged. Rather than using --reset-author with also updates the author date, you can just set the author explicitly. git rebase --root --exec "git commit --amend --author=John --no-edit" You can specify what you want as the author explicitly, or use a use a search pattern (which is what the example above does). --author= Override the commit author. Specify an explicit author using the standard A U Thor format. Otherwise is assumed to be a pattern and is used to search for an existing commit by that author (i.e. rev-list --all -i --author=); the commit author is then copied from the first such commit found. Source A: None of the answers above worked properly for me here. Instead I used git filter-repo with the --mailmap option. After installing the git extension, follow these steps: Create a my_mailmap file like this: New Name <new-mail@address.com> Old Name <old-mail@address.com> Run the following command in your repos root directory: git filter-repo --mailmap my_mailmap (I additionally also needed the --force option) Be warned though, this replaces the mail address and name in the entire history, make sure you know what you're doing. You can confirm the author and committer dates of a commit separately with git show -s --format=fuller: Author: Author Name <mail@address.com> AuthorDate: Wed Dec 30 10:27:44 2020 +0100 Commit: Commiter Name <mail@address.com> CommitDate: Wed Dec 30 10:27:44 2020 +0100 A: I answer this here. In short: git -c rebase.instructionFormat='%s%nexec GIT_COMMITTER_DATE="%cD" GIT_AUTHOR_DATE="%aD" git commit --amend --no-edit --reset-author' rebase -i <commit before wrong author and email> A: Use the --ignore-date flag or --committer-date-is-author-date git rebase --ignore-date
How to edit commit author without changing date?
I already know how to change the author of the commit (author and commit field), git rebase --root --exec "git commit --amend --reset-author --no-edit" but with the change of the author the dates (author date and commit date) are changed as of the current date. How do I save the old dates and change the author at the same time?
[ "This is not a complete solution to your question, as the commit date is still updated (this does change the commit after all), but it might be suitable for anyone that just wants to keep the author date unchanged.\nRather than using --reset-author with also updates the author date, you can just set the author explicitly.\ngit rebase --root --exec \"git commit --amend --author=John --no-edit\"\n\nYou can specify what you want as the author explicitly, or use a use a search pattern (which is what the example above does).\n\n--author=\nOverride the commit author. Specify an explicit author using the standard A U Thor format. Otherwise is assumed to be a pattern and is used to search for an existing commit by that author (i.e. rev-list --all -i --author=); the commit author is then copied from the first such commit found.\n\nSource\n", "None of the answers above worked properly for me here. Instead I used git filter-repo with the --mailmap option. After installing the git extension, follow these steps:\n\nCreate a my_mailmap file like this:\nNew Name <new-mail@address.com> Old Name <old-mail@address.com>\n\n\nRun the following command in your repos root directory:\ngit filter-repo --mailmap my_mailmap\n\n(I additionally also needed the --force option)\n\n\nBe warned though, this replaces the mail address and name in the entire history, make sure you know what you're doing. You can confirm the author and committer dates of a commit separately with git show -s --format=fuller:\nAuthor: Author Name <mail@address.com>\nAuthorDate: Wed Dec 30 10:27:44 2020 +0100\nCommit: Commiter Name <mail@address.com>\nCommitDate: Wed Dec 30 10:27:44 2020 +0100\n\n", "I answer this here. In short:\ngit -c rebase.instructionFormat='%s%nexec GIT_COMMITTER_DATE=\"%cD\" GIT_AUTHOR_DATE=\"%aD\" git commit --amend --no-edit --reset-author' rebase -i <commit before wrong author and email>\n\n", "Use the --ignore-date flag or --committer-date-is-author-date\ngit rebase --ignore-date\n\n" ]
[ 4, 3, 3, 1 ]
[ "git rebase -r --root --exec 'git commit --amend --author=\"UserName <author@gmail.com>\" --no-edit'\n\n" ]
[ -1 ]
[ "commit", "git", "rebase" ]
stackoverflow_0057280172_commit_git_rebase.txt
Q: JavaFX animation is choppy JavaFX animation is choppy. And when i use stage.initStyle(StageStyle.TRANSPARENT) animations become more choppy. package com.test; import javafx.animation.Interpolator; import javafx.animation.KeyFrame; import javafx.animation.KeyValue; import javafx.animation.Timeline; import javafx.application.Application; import javafx.scene.Scene; import javafx.scene.layout.Pane; import javafx.scene.shape.Rectangle; import javafx.stage.Stage; import javafx.stage.StageStyle; import javafx.util.Duration; import java.io.IOException; public class HelloApplication extends Application { @Override public void start(Stage stage) throws IOException { Pane root = new Pane(); Rectangle rect = new Rectangle(50,50,10,10); Duration cycleDuration = Duration.millis(5000); Timeline timeline = new Timeline( new KeyFrame(cycleDuration, new KeyValue(rect.widthProperty(),500,Interpolator.LINEAR)) ); timeline.setCycleCount(2000); timeline.play(); root.getChildren().add(rect); Scene scene = new Scene(root, 600, 650); stage.initStyle(StageStyle.TRANSPARENT); stage.setScene(scene); stage.show(); } public static void main(String[] args) { launch(); } } How it works: https://youtu.be/UwoNByw-HYE Intel(R) Core(TM) i5-6500 CPU @ 3.20GHz 3.19 GHz 16,0 Gb Windows 10 Java 19 Javafx 19 NVIDIA GeForce GTX 950 2Gb I tried to make the same app with Qt QML, and it works really smooth and without ANY chopping. Here's the comparation with Qt QML (bottom) and JavaFX (top) https://youtu.be/YAcI05NkjAE. JavaFX is jerky compared to Qt QML A: There definitely are visible glitches in the upper bar of the second video at irregular intervals of approximately one second length. Just concentrate on the right moving border of the bar. Some people just don't see or ignore them but others feel extremely disturbed by them. It seems to depend a bit on what you are used to. People who have a mobile or gaming background seem to be much more sensitive to such glitches than people with a primarily office PC background. Android once had project butter to overcome such glitches but JavaFX never did the same and still suffers from such glitches. (I know I won't make friends with this statement but it's true.) I also don't see anything obvious in your code that could directly improve the situation. Edit: I have run your code on my Mac now and I have to admit that I do not see any glitches there. But I have seen the behaviour you describe very often myself. Of course it will always depend on your machine too.
JavaFX animation is choppy
JavaFX animation is choppy. And when i use stage.initStyle(StageStyle.TRANSPARENT) animations become more choppy. package com.test; import javafx.animation.Interpolator; import javafx.animation.KeyFrame; import javafx.animation.KeyValue; import javafx.animation.Timeline; import javafx.application.Application; import javafx.scene.Scene; import javafx.scene.layout.Pane; import javafx.scene.shape.Rectangle; import javafx.stage.Stage; import javafx.stage.StageStyle; import javafx.util.Duration; import java.io.IOException; public class HelloApplication extends Application { @Override public void start(Stage stage) throws IOException { Pane root = new Pane(); Rectangle rect = new Rectangle(50,50,10,10); Duration cycleDuration = Duration.millis(5000); Timeline timeline = new Timeline( new KeyFrame(cycleDuration, new KeyValue(rect.widthProperty(),500,Interpolator.LINEAR)) ); timeline.setCycleCount(2000); timeline.play(); root.getChildren().add(rect); Scene scene = new Scene(root, 600, 650); stage.initStyle(StageStyle.TRANSPARENT); stage.setScene(scene); stage.show(); } public static void main(String[] args) { launch(); } } How it works: https://youtu.be/UwoNByw-HYE Intel(R) Core(TM) i5-6500 CPU @ 3.20GHz 3.19 GHz 16,0 Gb Windows 10 Java 19 Javafx 19 NVIDIA GeForce GTX 950 2Gb I tried to make the same app with Qt QML, and it works really smooth and without ANY chopping. Here's the comparation with Qt QML (bottom) and JavaFX (top) https://youtu.be/YAcI05NkjAE. JavaFX is jerky compared to Qt QML
[ "There definitely are visible glitches in the upper bar of the second video at irregular intervals of approximately one second length. Just concentrate on the right moving border of the bar. Some people just don't see or ignore them but others feel extremely disturbed by them. It seems to depend a bit on what you are used to. People who have a mobile or gaming background seem to be much more sensitive to such glitches than people with a primarily office PC background. Android once had project butter to overcome such glitches but JavaFX never did the same and still suffers from such glitches. (I know I won't make friends with this statement but it's true.) I also don't see anything obvious in your code that could directly improve the situation.\nEdit: I have run your code on my Mac now and I have to admit that I do not see any glitches there. But I have seen the behaviour you describe very often myself. Of course it will always depend on your machine too.\n" ]
[ 0 ]
[]
[]
[ "javafx" ]
stackoverflow_0074679174_javafx.txt
Q: Excel VBA - For Loop IS taking far far too long to execute First question ever here, I am the newbiest newbie.. So.. what I am trying to get is: to find if in sheet1 and sheet2 there are cells with the same value on column E from sheet1 and column F from sheet2. if there are, then copy the value from sheet2 column A row x to sheet2 column P row y. rows x and y are where the identical values are on each sheet. this is my code: Sub ccopiazanrfact() Dim camion As Worksheet Dim facturi As Worksheet Set camion = ThisWorkbook.Sheets("B816RUS") Set facturi = ThisWorkbook.Sheets("EVIDENTA FACTURI") Dim nrcomanda As String Dim nrfactura As String For a = 2 To facturi.Range("F" & Rows.Count).End(xlUp).Row nrcomanda = facturi.Range("F" & a).Value For b = 4 To camion.Range("E" & Rows.Count).End(xlUp).Row If camion.Range("E" & b).Value = facturi.Range("F" & a).Value Then camion.Range("P" & b) = facturi.Range("A" & a).Value Exit For End If Next b Next a End Sub A: I would recommend using arrays to achieve what you want. Nested looping over ranges can make it very slow. Is this what you are trying? (UNTESTED). As I have not tested it, I would recommend making a backup of your data before you test this code. I have commented the code. But if you still have a question or find an error/bug in the below code then simply ask. Option Explicit Sub ccopiazanrfact() Dim Camion As Worksheet Dim Facturi As Worksheet Set Camion = ThisWorkbook.Sheets("B816RUS") Set Facturi = ThisWorkbook.Sheets("EVIDENTA FACTURI") '~~> Declare 2 arrays Dim ArCamion As Variant Dim ArFacturi As Variant Dim LRow As Long '~~> Find last row in Col E of Sheets("B816RUS") LRow = Camion.Range("E" & Camion.Rows.Count).End(xlUp).Row '~~> Store Values from E4:P last row in the array. We have taken E:P '~~> because we are replacing the value in P if match found ArCamion = Camion.Range("E4:P" & LRow).Value '~~> Find last row in Col E of Sheets("EVIDENTA FACTURI") LRow = ArFacturi.Range("F" & ArFacturi.Rows.Count).End(xlUp).Row '~~> Store Values from A2:F last row in the array. We have taken A:F '~~> because we are replacing the value in P with A ArFacturi = Facturi.Range("A2:F" & LRow).Value Dim i As Long, j As Long For i = 2 To UBound(ArFacturi) For j = 4 To UBound(ArCamion) '~~> Checking if camion.Range("E" & j) = facturi.Range("F" & i) If ArCamion(j, 1) = ArFacturi(i, 6) Then '~~> Replacing camion.Range("P" & j) with facturi.Range("A" & i) ArCamion(j, 12) = ArFacturi(i, 1) Exit For End If Next j Next i '~~> Write the array back to the worksheet in one go Camion.Range("E4:P" & LRow).Resize(UBound(ArCamion), 12).Value = ArCamion End Sub A: Please, test the next code. It should be very fast, using arrays and Find function: Sub ccopiazaNrfact() Dim camion As Worksheet, facturi As Worksheet, cellMatch As Range, rngE As Range Set camion = ThisWorkbook.Sheets("B816RUS") Set facturi = ThisWorkbook.Sheets("EVIDENTA FACTURI") Set rngE = camion.Range("E4:E" & camion.Range("E" & camion.rows.count).End(xlUp).row) Dim a As Long, arrFact, arrP, nrComanda As String arrP = camion.Range("P1:P" & camion.Range("E" & rows.count).End(xlUp).row).Value arrFact = facturi.Range("A2:F" & facturi.Range("F" & rows.count).End(xlUp).row).Value Debug.Print UBound(arrP): Stop For a = 1 To UBound(arrFact) nrComanda = arrFact(a, 6) Set cellMatch = rngE.Find(What:=nrComanda, After:=rngE.cells(1, 1), LookIn:=xlValues, lookAt:=xlWhole) If Not cellMatch Is Nothing Then arrP(cellMatch.row, 1) = arrFact(a, 1) End If Next a camion.Range("P1").Resize(UBound(arrP), 1).Value = arrP MsgBox "Ready..." End Sub Please, send some feedback after testing it...
Excel VBA - For Loop IS taking far far too long to execute
First question ever here, I am the newbiest newbie.. So.. what I am trying to get is: to find if in sheet1 and sheet2 there are cells with the same value on column E from sheet1 and column F from sheet2. if there are, then copy the value from sheet2 column A row x to sheet2 column P row y. rows x and y are where the identical values are on each sheet. this is my code: Sub ccopiazanrfact() Dim camion As Worksheet Dim facturi As Worksheet Set camion = ThisWorkbook.Sheets("B816RUS") Set facturi = ThisWorkbook.Sheets("EVIDENTA FACTURI") Dim nrcomanda As String Dim nrfactura As String For a = 2 To facturi.Range("F" & Rows.Count).End(xlUp).Row nrcomanda = facturi.Range("F" & a).Value For b = 4 To camion.Range("E" & Rows.Count).End(xlUp).Row If camion.Range("E" & b).Value = facturi.Range("F" & a).Value Then camion.Range("P" & b) = facturi.Range("A" & a).Value Exit For End If Next b Next a End Sub
[ "I would recommend using arrays to achieve what you want. Nested looping over ranges can make it very slow. Is this what you are trying? (UNTESTED). As I have not tested it, I would recommend making a backup of your data before you test this code.\nI have commented the code. But if you still have a question or find an error/bug in the below code then simply ask.\nOption Explicit\n\nSub ccopiazanrfact()\n Dim Camion As Worksheet\n Dim Facturi As Worksheet\n \n Set Camion = ThisWorkbook.Sheets(\"B816RUS\")\n Set Facturi = ThisWorkbook.Sheets(\"EVIDENTA FACTURI\")\n \n '~~> Declare 2 arrays\n Dim ArCamion As Variant\n Dim ArFacturi As Variant\n Dim LRow As Long\n \n '~~> Find last row in Col E of Sheets(\"B816RUS\")\n LRow = Camion.Range(\"E\" & Camion.Rows.Count).End(xlUp).Row\n '~~> Store Values from E4:P last row in the array. We have taken E:P\n '~~> because we are replacing the value in P if match found\n ArCamion = Camion.Range(\"E4:P\" & LRow).Value\n \n '~~> Find last row in Col E of Sheets(\"EVIDENTA FACTURI\")\n LRow = ArFacturi.Range(\"F\" & ArFacturi.Rows.Count).End(xlUp).Row\n '~~> Store Values from A2:F last row in the array. We have taken A:F\n '~~> because we are replacing the value in P with A\n ArFacturi = Facturi.Range(\"A2:F\" & LRow).Value\n \n Dim i As Long, j As Long\n \n For i = 2 To UBound(ArFacturi)\n For j = 4 To UBound(ArCamion)\n '~~> Checking if camion.Range(\"E\" & j) = facturi.Range(\"F\" & i)\n If ArCamion(j, 1) = ArFacturi(i, 6) Then\n '~~> Replacing camion.Range(\"P\" & j) with facturi.Range(\"A\" & i)\n ArCamion(j, 12) = ArFacturi(i, 1)\n Exit For\n End If\n Next j\n Next i\n\n '~~> Write the array back to the worksheet in one go\n Camion.Range(\"E4:P\" & LRow).Resize(UBound(ArCamion), 12).Value = ArCamion\nEnd Sub\n\n", "Please, test the next code. It should be very fast, using arrays and Find function:\nSub ccopiazaNrfact()\n Dim camion As Worksheet, facturi As Worksheet, cellMatch As Range, rngE As Range\n Set camion = ThisWorkbook.Sheets(\"B816RUS\")\n Set facturi = ThisWorkbook.Sheets(\"EVIDENTA FACTURI\")\n \n Set rngE = camion.Range(\"E4:E\" & camion.Range(\"E\" & camion.rows.count).End(xlUp).row)\n Dim a As Long, arrFact, arrP, nrComanda As String\n \n arrP = camion.Range(\"P1:P\" & camion.Range(\"E\" & rows.count).End(xlUp).row).Value\n arrFact = facturi.Range(\"A2:F\" & facturi.Range(\"F\" & rows.count).End(xlUp).row).Value\n Debug.Print UBound(arrP): Stop\n For a = 1 To UBound(arrFact)\n nrComanda = arrFact(a, 6)\n Set cellMatch = rngE.Find(What:=nrComanda, After:=rngE.cells(1, 1), LookIn:=xlValues, lookAt:=xlWhole)\n \n If Not cellMatch Is Nothing Then\n arrP(cellMatch.row, 1) = arrFact(a, 1)\n End If\n Next a\n \n camion.Range(\"P1\").Resize(UBound(arrP), 1).Value = arrP\n MsgBox \"Ready...\"\n End Sub\n\nPlease, send some feedback after testing it...\n" ]
[ 1, 1 ]
[]
[]
[ "excel", "vba" ]
stackoverflow_0074679465_excel_vba.txt
Q: How to perform aggregate query in mongooose I have the following data, [ { type: 'sale' , amount: 2000 }, { type: 'expenditure' , amount: 1300 }, { type: 'sale' , amount: 4090 }, { type: 'expenditure' , amount: 3000 }] The output expected, [ { type: 'sale' , amount: 6090 }, { type: 'expenditure' , amount: 4300 }] I mean I want to perform the summation of 2 types 'sale' and 'expenditure' and I tried as bellow, ` store.aggregate( [ { $group: { type: "sale", total: { $sum: "amount" } } } ], function(err, result) { if (err) { res.send(err); } else { res.json(result); } } ); }); ` store.aggregate( [ { $group: { type: "sale", total: { $sum: "amount" } } } ], function(err, result) { if (err) { res.send(err); } else { res.json(result); } } ); }); ` [ { type: 'sale' , amount: 2000 }, { type: 'expenditure' , amount: 1300 }, { type: 'sale' , amount: 4090 }, { type: 'expenditure' , amount: 3000 }] The output expected, [ { type: 'sale' , amount: 6090 }, { type: 'expenditure' , amount: 4300 }] I mean I want to perform the summation of 2 types 'sale' and 'expenditure' and I tried as bellow, A: // You can use a combination of the Array.prototype.reduce() and Object.entries() methods to achieve the desired outcome. // Here's an example of how you could do this: const data = [ { type: 'sale', amount: 2000 }, { type: 'expenditure', amount: 1300 }, { type: 'sale', amount: 4090 }, { type: 'expenditure', amount: 3000 }, ]; const result = Object.entries( data.reduce((acc, { type, amount }) => { // Initialize the property on the accumulator if it doesn't exist if (!acc[type]) acc[type] = 0; // Add the amount to the property on the accumulator acc[type] += amount; // Return the accumulator return acc; }, {}) ).map(([type, amount]) => ({ type, amount })); console.log(result); // This code will first use Array.prototype.reduce() to iterate over the input data and sum up the amounts for each type of transaction. The result will be an object with the transaction types as keys and the sums as values. // Next, the Object.entries() method is used to convert the object into an array of key-value pairs. This array is then mapped to an array of objects with the desired format. // The final result will be an array with the following structure: [ { type: 'sale', amount: 6090 }, { type: 'expenditure', amount: 4300 }, ] // I hope this helps! Let me know if you have any questions.
How to perform aggregate query in mongooose
I have the following data, [ { type: 'sale' , amount: 2000 }, { type: 'expenditure' , amount: 1300 }, { type: 'sale' , amount: 4090 }, { type: 'expenditure' , amount: 3000 }] The output expected, [ { type: 'sale' , amount: 6090 }, { type: 'expenditure' , amount: 4300 }] I mean I want to perform the summation of 2 types 'sale' and 'expenditure' and I tried as bellow, ` store.aggregate( [ { $group: { type: "sale", total: { $sum: "amount" } } } ], function(err, result) { if (err) { res.send(err); } else { res.json(result); } } ); }); ` store.aggregate( [ { $group: { type: "sale", total: { $sum: "amount" } } } ], function(err, result) { if (err) { res.send(err); } else { res.json(result); } } ); }); ` [ { type: 'sale' , amount: 2000 }, { type: 'expenditure' , amount: 1300 }, { type: 'sale' , amount: 4090 }, { type: 'expenditure' , amount: 3000 }] The output expected, [ { type: 'sale' , amount: 6090 }, { type: 'expenditure' , amount: 4300 }] I mean I want to perform the summation of 2 types 'sale' and 'expenditure' and I tried as bellow,
[ "// You can use a combination of the Array.prototype.reduce() and Object.entries() methods to achieve the desired outcome.\n\n// Here's an example of how you could do this:\n\nconst data = [\n { type: 'sale', amount: 2000 },\n { type: 'expenditure', amount: 1300 },\n { type: 'sale', amount: 4090 },\n { type: 'expenditure', amount: 3000 },\n];\n\nconst result = Object.entries(\n data.reduce((acc, { type, amount }) => {\n // Initialize the property on the accumulator if it doesn't exist\n if (!acc[type]) acc[type] = 0;\n\n // Add the amount to the property on the accumulator\n acc[type] += amount;\n\n // Return the accumulator\n return acc;\n }, {})\n).map(([type, amount]) => ({ type, amount }));\n\nconsole.log(result);\n\n// This code will first use Array.prototype.reduce() to iterate over the input data and sum up the amounts for each type of transaction. The result will be an object with the transaction types as keys and the sums as values.\n\n// Next, the Object.entries() method is used to convert the object into an array of key-value pairs. This array is then mapped to an array of objects with the desired format.\n\n// The final result will be an array with the following structure:\n\n[\n { type: 'sale', amount: 6090 },\n { type: 'expenditure', amount: 4300 },\n]\n\n// I hope this helps! Let me know if you have any questions.\n\n" ]
[ 0 ]
[]
[]
[ "javascript", "mongoose", "node.js" ]
stackoverflow_0074680060_javascript_mongoose_node.js.txt
Q: Align a 3D line A to the line B I want to align a line A (blue), which is defined with a 3D start (S) and 3D end point (E) to the other 3D line B (red), so that the line A (does not matter, how it is originally positioned) is parallel to the line B, as shown in Fig.B I know that I have to calculate the angle between two them for that I do: def calcAngleBtw2Lines(self, vec1S, vec1E, vec2S, vec2E): # Substract the end point (E) from the start point (S) of the line vec1 = np.subtract(vec1E, vec1S) vec2 = np.subtract(vec2E, vec2S) # Dot product to get the cosine of the rotation angle dotProduct = np.dot(vec1, vec2) # Normalize the vectors to find the unit vectors vec1Unit = np.linalg.norm(vec1) vec2Unit = np.linalg.norm(vec2) # Find the angle between vectors angle = np.degrees(np.arccos(dotProduct / (vec1Unit * vec2Unit))) print("angle: ", angle) return np.round(angle, 1) But I am not sure, whether the steps are correct. If they are parallel to each other, the angle between them should be 0 Edit: The length of both lines are equal. The line B is stationary. To make line A parallel to the B, the S and E of A can be moved at the same time. A: Ok, here's a working answer. Note that if A and B have the same lengths, part of the code is unnecessary (but I'll leave it anyway to make it more portable): import numpy as np def makeAparalleltoB(pointSA, pointEA, pointSB, pointEB): # pointSA... are np.arrays of the 3 coordinates # Calculating the coordinates of the vectors vecA = pointEA - pointSA vecB = pointEB - pointSB # Calculating the lengths of the vectors # Unnecessary if we know that A and B have the same lengths vecANorm = np.linalg.norm(vecA) vecBNorm = np.linalg.norm(vecB) # Calculating the coordinates of a vector collinear to B, of the same length as A newvecA = vecB * vecANorm/vecBNorm # Returning new coordinates for the endpoint of A return pointSA + newvecA Example: a = np.array([1,1,1]) b = np.array([2,3,4]) c = np.array([0,0,0]) d = np.array([1,1,1]) print(makeAparalleltoB(a, b, c, d)) # [3.1602469 3.1602469 3.1602469] If we know that A and B have the same length, then it's even simpler: we simply make it so SB, EB, EA, SA is a parallelogram: newpointEA = pointSA + pointEB - pointSB
Align a 3D line A to the line B
I want to align a line A (blue), which is defined with a 3D start (S) and 3D end point (E) to the other 3D line B (red), so that the line A (does not matter, how it is originally positioned) is parallel to the line B, as shown in Fig.B I know that I have to calculate the angle between two them for that I do: def calcAngleBtw2Lines(self, vec1S, vec1E, vec2S, vec2E): # Substract the end point (E) from the start point (S) of the line vec1 = np.subtract(vec1E, vec1S) vec2 = np.subtract(vec2E, vec2S) # Dot product to get the cosine of the rotation angle dotProduct = np.dot(vec1, vec2) # Normalize the vectors to find the unit vectors vec1Unit = np.linalg.norm(vec1) vec2Unit = np.linalg.norm(vec2) # Find the angle between vectors angle = np.degrees(np.arccos(dotProduct / (vec1Unit * vec2Unit))) print("angle: ", angle) return np.round(angle, 1) But I am not sure, whether the steps are correct. If they are parallel to each other, the angle between them should be 0 Edit: The length of both lines are equal. The line B is stationary. To make line A parallel to the B, the S and E of A can be moved at the same time.
[ "Ok, here's a working answer. Note that if A and B have the same lengths, part of the code is unnecessary (but I'll leave it anyway to make it more portable):\nimport numpy as np\n\ndef makeAparalleltoB(pointSA, pointEA, pointSB, pointEB):\n# pointSA... are np.arrays of the 3 coordinates\n\n # Calculating the coordinates of the vectors\n vecA = pointEA - pointSA\n vecB = pointEB - pointSB\n\n # Calculating the lengths of the vectors\n # Unnecessary if we know that A and B have the same lengths\n vecANorm = np.linalg.norm(vecA)\n vecBNorm = np.linalg.norm(vecB)\n\n # Calculating the coordinates of a vector collinear to B, of the same length as A\n newvecA = vecB * vecANorm/vecBNorm\n\n # Returning new coordinates for the endpoint of A\n return pointSA + newvecA\n\nExample:\na = np.array([1,1,1])\nb = np.array([2,3,4])\nc = np.array([0,0,0])\nd = np.array([1,1,1])\n\nprint(makeAparalleltoB(a, b, c, d))\n\n# [3.1602469 3.1602469 3.1602469]\n\nIf we know that A and B have the same length, then it's even simpler: we simply make it so SB, EB, EA, SA is a parallelogram:\nnewpointEA = pointSA + pointEB - pointSB\n\n" ]
[ 1 ]
[]
[]
[ "math", "numpy", "python" ]
stackoverflow_0074679790_math_numpy_python.txt
Q: Initializing class member to emptyList in secondary constructor I'm very new to Kotlin and have to implement a data class TreeNode that resembles a generic tree of nodes. I'm trying to declare a secondary constructor that initializes the member children to be an empty list. Here's what I tried; but I'm not understanding the syntax quite well so not sure how to go about solving this. data class TreeNode<T> ( val value: T, val children: List<TreeNode<T>>, ) { constructor(value: T, children: emptyList<TreeNode<T>>): this(value){ } } A: You're getting confused by where to put the arguments. If you know the list is empty, you don't need to take it as an argument. constructor(value: T): this(value, emptyList()) {} A: In addition to Silvio's answer, if you want to be able to take a list of children but default to not doing so: data class TreeNode<T>(val value: T, val children: List<TreeNode<T>> = emptyList()) {} See the section on default values in constructors in the Kotlin documentation: https://kotlinlang.org/docs/classes.html#constructors
Initializing class member to emptyList in secondary constructor
I'm very new to Kotlin and have to implement a data class TreeNode that resembles a generic tree of nodes. I'm trying to declare a secondary constructor that initializes the member children to be an empty list. Here's what I tried; but I'm not understanding the syntax quite well so not sure how to go about solving this. data class TreeNode<T> ( val value: T, val children: List<TreeNode<T>>, ) { constructor(value: T, children: emptyList<TreeNode<T>>): this(value){ } }
[ "You're getting confused by where to put the arguments. If you know the list is empty, you don't need to take it as an argument.\nconstructor(value: T): this(value, emptyList()) {}\n\n", "In addition to Silvio's answer, if you want to be able to take a list of children but default to not doing so:\ndata class TreeNode<T>(val value: T, val children: List<TreeNode<T>> = emptyList()) {}\n\nSee the section on default values in constructors in the Kotlin documentation: https://kotlinlang.org/docs/classes.html#constructors\n" ]
[ 2, 2 ]
[]
[]
[ "kotlin" ]
stackoverflow_0074680016_kotlin.txt
Q: Uncaught TypeError: Cannot read properties of undefined (reading 'includes') my question is that the I am accessing an array from another class but continuously this error is showing after making so many changes this error is still showing. Array is updating but not accessible from another class window.addEventListener('load',function(){ const canvas = document.getElementById('canvas1'); const ctx = canvas.getContext('2d'); canvas.width = 1500; canvas.height= 500; class InputHandler{ constructor(game){ this.game = game; window.addEventListener('keydown', e =>{ if((( e.key === 'ArrowUp') || (e.key == 'ArrowDown')) && this.game.keys.indexOf(e.key) === -1 ){ this.game.keys.push(e.key); } console.log(this.game.keys); }); window.addEventListener('keyup',e=> { if(this.game.keys.indexOf(e.key) > -1){ this.game.keys.splice(this.game.keys.indexOf(e.key),1); } }); } } class player{ constructor(game){ this.game = game; this.width = 120; this.height = 190; this.x = 20; this.y = 100; this.speedY = 0; this.maxspeed = 2; } update(){ this.y += this.speedY; if(this.game.keys.includes('ArrowUp')) this.speedY = -1; // error on this line while executing } draw(context){ context.fillRect(this.x,this.y,this.width,this.height); } } class Game{ constructor(width,height){ this.width = width; this.height = height; this.player = new player(Game); this. Input = new InputHandler(this); this.keys = []; } update(){ this.player.update(); } draw(context){ this.player.draw(context); } } const game = new Game(canvas.width,canvas.height); // animation loop function animat(){ ctx.clearRect(0,0,canvas.width,canvas.height); game.update(); game.draw(ctx); requestAnimationFrame(animat); } animat(); }); i tried to implement eventhandler class in player.Update() method but still showing the error. A: It looks like the problem is with the line if(this.game.keys.includes('ArrowUp')) this.speedY = -1;. It seems like this.game.keys is undefined when you try to access it. One possible reason for this is that this.game.keys is not defined in the player class, but you are trying to access it in the player.update() method. To fix this, you could define this.game.keys in the player class and pass it in as an argument to the player.update() method. Here's an example of how you could do this: class player { constructor(game, keys) { this.game = game; this.width = 120; this.height = 190; this.x = 20; this.y = 100; this.speedY = 0; this.maxspeed = 2; this.keys = keys; // add this line to save the keys } update() { this.y += this.speedY; if (this.keys.includes('ArrowUp')) this.speedY = -1; // access this.keys instead of this.game.keys // ... other code } // ... other code } Then, in the Game class, you would need to pass the keys array to the player class when you create a new player object: class Game { constructor(width, height) { this.width = width; this.height = height; this.keys = []; this.player = new player(this, this.keys); // pass in this.keys as an argument to the player class this.Input = new InputHandler(this); } // ... other code } This should fix the error you are seeing and allow you to access the keys array in the player.update() method. A: The error you're getting is because this.game.keys is undefined in the update method of the player class. This is because you're trying to access the game property of this directly, but this refers to the player instance, not the Game instance. To fix this, you can pass the game instance as an argument to the update method, and then use that argument to access the keys property. Here is an example of how you can do this: class player { // ... update(game) { this.y += this.speedY; if (game.keys.includes('ArrowUp')) this.speedY = -1; } // ... } You would need to make a similar change in the draw method of the player class, where you would also need to pass the game instance as an argument and use it to access the keys property. You would also need to update the update method of the Game class to pass the game instance as an argument to the update methods of the player and InputHandler classes: class Game { // ... update() { this.player.update(this); this.Input.update(this); } // ... }
Uncaught TypeError: Cannot read properties of undefined (reading 'includes')
my question is that the I am accessing an array from another class but continuously this error is showing after making so many changes this error is still showing. Array is updating but not accessible from another class window.addEventListener('load',function(){ const canvas = document.getElementById('canvas1'); const ctx = canvas.getContext('2d'); canvas.width = 1500; canvas.height= 500; class InputHandler{ constructor(game){ this.game = game; window.addEventListener('keydown', e =>{ if((( e.key === 'ArrowUp') || (e.key == 'ArrowDown')) && this.game.keys.indexOf(e.key) === -1 ){ this.game.keys.push(e.key); } console.log(this.game.keys); }); window.addEventListener('keyup',e=> { if(this.game.keys.indexOf(e.key) > -1){ this.game.keys.splice(this.game.keys.indexOf(e.key),1); } }); } } class player{ constructor(game){ this.game = game; this.width = 120; this.height = 190; this.x = 20; this.y = 100; this.speedY = 0; this.maxspeed = 2; } update(){ this.y += this.speedY; if(this.game.keys.includes('ArrowUp')) this.speedY = -1; // error on this line while executing } draw(context){ context.fillRect(this.x,this.y,this.width,this.height); } } class Game{ constructor(width,height){ this.width = width; this.height = height; this.player = new player(Game); this. Input = new InputHandler(this); this.keys = []; } update(){ this.player.update(); } draw(context){ this.player.draw(context); } } const game = new Game(canvas.width,canvas.height); // animation loop function animat(){ ctx.clearRect(0,0,canvas.width,canvas.height); game.update(); game.draw(ctx); requestAnimationFrame(animat); } animat(); }); i tried to implement eventhandler class in player.Update() method but still showing the error.
[ "It looks like the problem is with the line if(this.game.keys.includes('ArrowUp')) this.speedY = -1;. It seems like this.game.keys is undefined when you try to access it.\nOne possible reason for this is that this.game.keys is not defined in the player class, but you are trying to access it in the player.update() method. To fix this, you could define this.game.keys in the player class and pass it in as an argument to the player.update() method.\nHere's an example of how you could do this:\nclass player {\n constructor(game, keys) {\n this.game = game;\n this.width = 120;\n this.height = 190;\n this.x = 20;\n this.y = 100;\n this.speedY = 0;\n this.maxspeed = 2;\n this.keys = keys; // add this line to save the keys\n }\n\n update() {\n this.y += this.speedY;\n if (this.keys.includes('ArrowUp')) this.speedY = -1; // access this.keys instead of this.game.keys\n\n // ... other code\n }\n\n // ... other code\n}\n\nThen, in the Game class, you would need to pass the keys array to the player class when you create a new player object:\nclass Game {\n constructor(width, height) {\n this.width = width;\n this.height = height;\n this.keys = [];\n this.player = new player(this, this.keys); // pass in this.keys as an argument to the player class\n this.Input = new InputHandler(this);\n }\n\n // ... other code\n}\n\nThis should fix the error you are seeing and allow you to access the keys array in the player.update() method.\n", "The error you're getting is because this.game.keys is undefined in the update method of the player class. This is because you're trying to access the game property of this directly, but this refers to the player instance, not the Game instance.\nTo fix this, you can pass the game instance as an argument to the update method, and then use that argument to access the keys property. Here is an example of how you can do this:\nclass player {\n // ...\n\n update(game) {\n this.y += this.speedY;\n if (game.keys.includes('ArrowUp')) this.speedY = -1;\n }\n\n // ...\n}\n\nYou would need to make a similar change in the draw method of the player class, where you would also need to pass the game instance as an argument and use it to access the keys property.\nYou would also need to update the update method of the Game class to pass the game instance as an argument to the update methods of the player and InputHandler classes:\nclass Game {\n // ...\n\n update() {\n this.player.update(this);\n this.Input.update(this);\n }\n\n // ...\n}\n\n" ]
[ 0, 0 ]
[]
[]
[ "arrays", "class", "javascript", "oop" ]
stackoverflow_0074680051_arrays_class_javascript_oop.txt
Q: Android Studio: This file not part of the project, but the project builds successfully I have a strange problem that suddenly appeared in android studio. I created a new cpp file, and included it in Android.mk. Then I synced the project. However, android studio still complains that the file is not part of the project and that I need to sync, BUT the whole project builds successfully. Likewise, if I remove one of the other older files from Android.mk that it did not complain about, and resyncs and then tries to build the project, as expected the build fails, but android studio does NOT complain that that file is not part of the project anymore. So somehow, suddenly the android studio editor is not able to correctly identify which files have been synced and are part of the project, but during compilation everything works as expected. Does anyone know how to fix this annoying problem? I have tried clean project, invalidate caches/restart as well as updating android studio without luck (AS version 3.4). A: I used Build > Refresh Linked C++ Projects menu and it worked. A: I had a similar problem. Like yourself, I have tried everything. Invalidate and Restart: Doesn't work Manual deleting folders: .gradle .idea .ndkbuild etc. doesn't work Clean, Rebuild, Link C++ Files: Doesn't work One thing that kind of helped me was: I changed the NDK version. I compiled, then got a compilation error (didn't matter because it was the wrong version of NDK anyway), then I reverted to the original NDK. This appeared to solve the problem, however, it got back again. My solution was to reset Android Studio to factory settings. If you are on Linux, you can start by deleting these folders: rm -rf ~/.android rm -rf ~/.AndroidStudio3.4 Then you download and run your Android Studio and not import anything from anywhere. I suspect the problem was caused by one of the plugins I've installed. It may be a good idea to backup those two folders from time to time and reload them from there if necessary. EDIT: It seems that my problem persisted after the above solution after adding more .cpp files. After seeing that, I searched where that popup came from. It follows that "This file is not part of project..." popup is pushed from ndk-build. (Class name: NewCppSourceNotificationProvider - StaleCppProjectNotificationPanel). What I tried, and what worked so far; I used Android Studio 3.5 Canary13 with NDK version r19c (Stable version). I hope this helps you. A: Problem Environment Android Studio 3.5 RC 2 gradle-4.10-all com.android.tools.build:gradle:3.2.1 Solution Update to: - gradle-5.5.1-all - com.android.tools.build:gradle:3.4.2 Steps From the project root run (note this has to be done first): ./gradlew wrapper --gradle-version 5.5.1 --distribution-type all In root build.gradle file: buildscript { //... dependencies { classpath("com.android.tools.build:gradle:3.4.2") //... } } A: I have similar problem, it could be the compatibility issue of gradle version and gradle plugin version, because my solution is replacing the old configuration // build.gradle classpath 'com.android.tools.build:gradle:3.2.1' ... // gradle-wrapper.properties distributionUrl=https\://services.gradle.org/distributions/gradle-4.6-all.zip with the following new one by changing gradle version from 4.6 to 4.10.1. // build.gradle classpath 'com.android.tools.build:gradle:3.2.1' ... // gradle-wrapper.properties distributionUrl=https\://services.gradle.org/distributions/gradle-4.10.1-all.zip After that, Android studio can index and track my new cpp files in project. A: fix this by update my 'com.android.tools.build:gradle' A: Had the same problem with Android Studio 4.0.1 and the latest gradle at this time (6.1.1?). The problem went away after I exited Android Studio and deleted .gradle and .idea folders in the project main directory, plus deleted .cxx and build directories in the affected module directory. I'm not sure which really helped, but most probably deleting .grade and/or .idea A: I fixed with update 'com.android.tools.build:gradle' in the artic fox version 2020.3.1 A: For those struggling with this for me I made a small change to CMakeLists.txt (I altered the version required). This forced the CMake to regenerate, and all missing files were added. This is quicker than the other options listed here. This applied to NDK 21, so YMMV! A: Invalidate cache and restart and you should be good. However, make sure you checked the Clear file system cache and local history checkbox.
Android Studio: This file not part of the project, but the project builds successfully
I have a strange problem that suddenly appeared in android studio. I created a new cpp file, and included it in Android.mk. Then I synced the project. However, android studio still complains that the file is not part of the project and that I need to sync, BUT the whole project builds successfully. Likewise, if I remove one of the other older files from Android.mk that it did not complain about, and resyncs and then tries to build the project, as expected the build fails, but android studio does NOT complain that that file is not part of the project anymore. So somehow, suddenly the android studio editor is not able to correctly identify which files have been synced and are part of the project, but during compilation everything works as expected. Does anyone know how to fix this annoying problem? I have tried clean project, invalidate caches/restart as well as updating android studio without luck (AS version 3.4).
[ "I used Build > Refresh Linked C++ Projects menu and it worked.\n\n", "I had a similar problem. Like yourself, I have tried everything. \n\nInvalidate and Restart: Doesn't work\nManual deleting folders: .gradle .idea .ndkbuild etc. doesn't work\nClean, Rebuild, Link C++ Files: Doesn't work\n\nOne thing that kind of helped me was: I changed the NDK version. I compiled, then got a compilation error (didn't matter because it was the wrong version of NDK anyway), then I reverted to the original NDK. This appeared to solve the problem, however, it got back again.\nMy solution was to reset Android Studio to factory settings. If you are on Linux, you can start by deleting these folders:\nrm -rf ~/.android\n\nrm -rf ~/.AndroidStudio3.4\n\nThen you download and run your Android Studio and not import anything from anywhere.\nI suspect the problem was caused by one of the plugins I've installed.\nIt may be a good idea to backup those two folders from time to time and reload them from there if necessary.\nEDIT: It seems that my problem persisted after the above solution after adding more .cpp files. After seeing that, I searched where that popup came from. It follows that \"This file is not part of project...\" popup is pushed from ndk-build. (Class name: NewCppSourceNotificationProvider - StaleCppProjectNotificationPanel). What I tried, and what worked so far; I used Android Studio 3.5 Canary13 with NDK version r19c (Stable version). I hope this helps you.\n", "Problem Environment\n\nAndroid Studio 3.5 RC 2\ngradle-4.10-all \ncom.android.tools.build:gradle:3.2.1\n\nSolution\nUpdate to:\n- gradle-5.5.1-all\n- com.android.tools.build:gradle:3.4.2\nSteps\nFrom the project root run (note this has to be done first): \n./gradlew wrapper --gradle-version 5.5.1 --distribution-type all\n\nIn root build.gradle file:\nbuildscript {\n //...\n dependencies {\n classpath(\"com.android.tools.build:gradle:3.4.2\")\n //...\n }\n}\n\n", "I have similar problem, it could be the compatibility issue of gradle version and gradle plugin version, because my solution is replacing the old configuration\n// build.gradle\nclasspath 'com.android.tools.build:gradle:3.2.1'\n...\n// gradle-wrapper.properties\ndistributionUrl=https\\://services.gradle.org/distributions/gradle-4.6-all.zip\n\nwith the following new one by changing gradle version from 4.6 to 4.10.1.\n// build.gradle\nclasspath 'com.android.tools.build:gradle:3.2.1'\n...\n// gradle-wrapper.properties\ndistributionUrl=https\\://services.gradle.org/distributions/gradle-4.10.1-all.zip\n\nAfter that, Android studio can index and track my new cpp files in project.\n", "fix this by update my 'com.android.tools.build:gradle'\n", "Had the same problem with Android Studio 4.0.1 and the latest gradle at this time (6.1.1?). The problem went away after I exited Android Studio and deleted .gradle and .idea folders in the project main directory, plus deleted .cxx and build directories in the affected module directory. I'm not sure which really helped, but most probably deleting .grade and/or .idea\n", "I fixed with update 'com.android.tools.build:gradle' in the artic fox version 2020.3.1\n", "For those struggling with this for me I made a small change to CMakeLists.txt (I altered the version required). This forced the CMake to regenerate, and all missing files were added. This is quicker than the other options listed here.\nThis applied to NDK 21, so YMMV!\n", "Invalidate cache and restart and you should be good. However, make sure you checked the Clear file system cache and local history checkbox.\n" ]
[ 8, 4, 4, 2, 1, 1, 0, 0, 0 ]
[]
[]
[ "android_studio" ]
stackoverflow_0055826287_android_studio.txt
Q: Display multiple elements of an XML I have an xml file with multiple elements with the same key elements with the same name. I'm trying to concatonate the sub elements but can only get the first occurrence. <?xml version="1.0" encoding="utf-8"?> <FOLDER JOBNAME="some Job" MAXWAIT="5"> <OTHER> <ELEMENTS> </ELEMENTS> </OTHER> </FOLDER> <FOLDER JOBNAME="some Other Job" MAXWAIT="15"> <OTHER> <ELEMENTS> </ELEMENTS> </OTHER> </FOLDER> Is there a way to use xmllint or some other tool to get output like: some Job 5 some Other Job 15 etc... when I try with xmllint --xpath, I get the following: me@myComp tmp $ xmllint --xpath 'concat(//@JOBNAME," ",//@MAXWAIT)' jobs.xml ADDRESS_VERIFICATION 5 me@myComp tmp $ xmllint --xpath 'concat(//JOBNAME[*]," ",//MAXWAIT[*])' jobs.xml me@myComp tmp $ Is there a way to concatenate multiple parameters with xmllint or any other tool on the command line? UPDATE - Yeah, it's a proper XML - Also, just notices the repeated lines and removed them. A: Assuming you have valid XML (per comments by @GillesQuenot and @Cyrus) and are open to using XSLT, then the following transform might be an option: <?xml version="1.0" encoding="UTF-8" ?> <xsl:transform xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0"> <xsl:output method="text" encoding="UTF-8" /> <xsl:template match="FOLDER"> <xsl:value-of select="concat(@JOBNAME, ' ', @MAXWAIT)"/> </xsl:template> </xsl:transform> Sample usage: $ xsltproc abc.xslt abc.txt some Job 5 some Other Job 15 A: You may find the XML parser xidel interesting: $ xidel -s jobs.xml -e '//FOLDER/concat(@JOBNAME," ",@MAXWAIT)' $ xidel -s jobs.xml -e '//FOLDER/join((@JOBNAME,@MAXWAIT))' $ xidel -s jobs.xml -e '//FOLDER/x"{@JOBNAME} {@MAXWAIT}"' # Xidel's own extended-string-syntax. All resulting in: some Job 5 some Other Job 15
Display multiple elements of an XML
I have an xml file with multiple elements with the same key elements with the same name. I'm trying to concatonate the sub elements but can only get the first occurrence. <?xml version="1.0" encoding="utf-8"?> <FOLDER JOBNAME="some Job" MAXWAIT="5"> <OTHER> <ELEMENTS> </ELEMENTS> </OTHER> </FOLDER> <FOLDER JOBNAME="some Other Job" MAXWAIT="15"> <OTHER> <ELEMENTS> </ELEMENTS> </OTHER> </FOLDER> Is there a way to use xmllint or some other tool to get output like: some Job 5 some Other Job 15 etc... when I try with xmllint --xpath, I get the following: me@myComp tmp $ xmllint --xpath 'concat(//@JOBNAME," ",//@MAXWAIT)' jobs.xml ADDRESS_VERIFICATION 5 me@myComp tmp $ xmllint --xpath 'concat(//JOBNAME[*]," ",//MAXWAIT[*])' jobs.xml me@myComp tmp $ Is there a way to concatenate multiple parameters with xmllint or any other tool on the command line? UPDATE - Yeah, it's a proper XML - Also, just notices the repeated lines and removed them.
[ "Assuming you have valid XML (per comments by @GillesQuenot and @Cyrus) and are open to using XSLT, then the following transform might be an option:\n<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n<xsl:transform xmlns:xsl=\"http://www.w3.org/1999/XSL/Transform\" version=\"1.0\">\n <xsl:output method=\"text\" encoding=\"UTF-8\" />\n <xsl:template match=\"FOLDER\">\n <xsl:value-of select=\"concat(@JOBNAME, ' ', @MAXWAIT)\"/>\n </xsl:template>\n</xsl:transform>\n\nSample usage:\n$ xsltproc abc.xslt abc.txt\n\nsome Job 5\nsome Other Job 15\n\n", "You may find the XML parser xidel interesting:\n$ xidel -s jobs.xml -e '//FOLDER/concat(@JOBNAME,\" \",@MAXWAIT)'\n$ xidel -s jobs.xml -e '//FOLDER/join((@JOBNAME,@MAXWAIT))'\n$ xidel -s jobs.xml -e '//FOLDER/x\"{@JOBNAME} {@MAXWAIT}\"' # Xidel's own extended-string-syntax.\n\nAll resulting in:\nsome Job 5\nsome Other Job 15\n\n" ]
[ 0, 0 ]
[]
[]
[ "bash", "xml", "xmllint" ]
stackoverflow_0074621586_bash_xml_xmllint.txt
Q: Haskell FFI: stack run is ok, but GHCi does not link properly I am trying to learn how to structure a Haskell project/workflow that uses FFI. I am using stack, but I find myself unable to use GHCi when it comes to the imported foreign functions. Here is a simplified version of the problem. Let's say that I have the following two files in $PROJECT_ROOT/cbits: hello.h #ifndef HELLO_H #define HELLO_H extern "C" { int foo(); } #endif /* HELLO_H */ hello.cpp #include "hello.h" #include <iostream> int foo() { std::cout << "extremely dangerous side effect" << std::endl; return 42; } My Main.hs file: module Main where import Foreign.C foreign import ccall unsafe "foo" foo :: IO CInt -- this does side effects and prints '42' main = foo >>= print The relevant (C++ specific) section of my package.yaml is: include-dirs: - cbits cxx-sources: - cbits/*.cpp cxx-options: - -std=c++17 extra-libraries: - stdc++ I am using the souffle-haskell's package.yaml as a reference. Compiling and running with stack run is ok and I get the expected output: extremely dangerous side effect 42 But, in the GHCi session (run with stack ghci), calling main gives: ghc: ^^ Could not load 'foo', dependency unresolved. See top entry above. GHC.ByteCode.Linker: can't find label During interactive linking, GHCi couldn't find the following symbol: foo This may be due to you not asking GHCi to load extra object files, archives or DLLs needed by your current session. Restart GHCi, specifying the missing library using the -L/path/to/object/dir and -lmissinglibname flags, or simply by naming the relevant files on the GHCi command line. Alternatively, this link failure might indicate a bug in GHCi. If you suspect the latter, please report this as a GHC bug: https://www.haskell.org/ghc/reportabug The problem is not present if I compile hello.cpp beforehand: g++ -c cbits/hello.cpp -o cbits/hello.o And then run stack ghci --ghci-options cbits/hello.o, as suggested by the GHCi error message. Question is: do I really need to maintain a separate *.o file specifically for GHCi? Searching online I have found discussions addressing only the GHCi part or the stack/cabal part, but not both. The only useful answer that I have found is this one from 2013, which reaffirms the "solution" given by GHCi and does not mention stack or cabal. A: Question is: do I really need to maintain a separate *.o file specifically for GHCi? Answer is: no. After several tries, the only thing that I had to change was the name of an option: - cxx-sources: + c-sources: This left the behaviour of stack run unchanged, and allowed GHCi to link properly to the compiled code.
Haskell FFI: stack run is ok, but GHCi does not link properly
I am trying to learn how to structure a Haskell project/workflow that uses FFI. I am using stack, but I find myself unable to use GHCi when it comes to the imported foreign functions. Here is a simplified version of the problem. Let's say that I have the following two files in $PROJECT_ROOT/cbits: hello.h #ifndef HELLO_H #define HELLO_H extern "C" { int foo(); } #endif /* HELLO_H */ hello.cpp #include "hello.h" #include <iostream> int foo() { std::cout << "extremely dangerous side effect" << std::endl; return 42; } My Main.hs file: module Main where import Foreign.C foreign import ccall unsafe "foo" foo :: IO CInt -- this does side effects and prints '42' main = foo >>= print The relevant (C++ specific) section of my package.yaml is: include-dirs: - cbits cxx-sources: - cbits/*.cpp cxx-options: - -std=c++17 extra-libraries: - stdc++ I am using the souffle-haskell's package.yaml as a reference. Compiling and running with stack run is ok and I get the expected output: extremely dangerous side effect 42 But, in the GHCi session (run with stack ghci), calling main gives: ghc: ^^ Could not load 'foo', dependency unresolved. See top entry above. GHC.ByteCode.Linker: can't find label During interactive linking, GHCi couldn't find the following symbol: foo This may be due to you not asking GHCi to load extra object files, archives or DLLs needed by your current session. Restart GHCi, specifying the missing library using the -L/path/to/object/dir and -lmissinglibname flags, or simply by naming the relevant files on the GHCi command line. Alternatively, this link failure might indicate a bug in GHCi. If you suspect the latter, please report this as a GHC bug: https://www.haskell.org/ghc/reportabug The problem is not present if I compile hello.cpp beforehand: g++ -c cbits/hello.cpp -o cbits/hello.o And then run stack ghci --ghci-options cbits/hello.o, as suggested by the GHCi error message. Question is: do I really need to maintain a separate *.o file specifically for GHCi? Searching online I have found discussions addressing only the GHCi part or the stack/cabal part, but not both. The only useful answer that I have found is this one from 2013, which reaffirms the "solution" given by GHCi and does not mention stack or cabal.
[ "\nQuestion is: do I really need to maintain a separate *.o file specifically for GHCi?\n\nAnswer is: no.\nAfter several tries, the only thing that I had to change was the name of an option:\n- cxx-sources:\n+ c-sources:\n\nThis left the behaviour of stack run unchanged, and allowed GHCi to link properly to the compiled code.\n" ]
[ 1 ]
[]
[]
[ "ffi", "ghci", "haskell", "haskell_stack" ]
stackoverflow_0074678296_ffi_ghci_haskell_haskell_stack.txt
Q: Why on disabled Pseudo class background color of icon button is not changing Carousel is working fine. But the issue is when there is no card on left the js function add disabled class to the left-arrow but on disabled i want the background color of icon to be dim and disabled. Don't know how can I fix it. Is there a way to change the bg color on disabled. html: <div class="left-arrow" id="left-arrow-rtl"> <div class="arrow" > <i class="fas fa-arrow-left"></i> </div> </div> Css: .left-arrow :disabled{ background-color: grey; } .left-arrow { width: 36px; height: 36px; background-color: #ACBBD1; border-radius: 60px; color: white; position: relative; cursor: pointer; } A: Pseudo class disabled only works on input fields , buttons and select. By adding the background color to disabled class it works. .disabled { background: #ACBBD1; opacity: 0.5; }
Why on disabled Pseudo class background color of icon button is not changing
Carousel is working fine. But the issue is when there is no card on left the js function add disabled class to the left-arrow but on disabled i want the background color of icon to be dim and disabled. Don't know how can I fix it. Is there a way to change the bg color on disabled. html: <div class="left-arrow" id="left-arrow-rtl"> <div class="arrow" > <i class="fas fa-arrow-left"></i> </div> </div> Css: .left-arrow :disabled{ background-color: grey; } .left-arrow { width: 36px; height: 36px; background-color: #ACBBD1; border-radius: 60px; color: white; position: relative; cursor: pointer; }
[ "Pseudo class disabled only works on input fields , buttons and select.\nBy adding the background color to disabled class it works.\n.disabled {\n background: #ACBBD1;\n opacity: 0.5;\n}\n\n" ]
[ 0 ]
[]
[]
[ "css", "html", "pseudo_class" ]
stackoverflow_0074680009_css_html_pseudo_class.txt
Q: BGR to RGB for CUB_200 images by Image.split() I am creating a PyTorch dataset and dataloader from CUB_200. When reading the images as pill, I need to change the BGR channels to RGB and I use the following code: def _read_images_from_list(imagefile_list): imgs = [] mean=[0.485, 0.456, 0.406] std= [0.229, 0.224, 0.225] Transformations = transforms.Compose([transforms.Resize([224, 224]), transforms.ToTensor(), transforms.Normalize(mean, std)]) for imagefile in imagefile_list: # read images as PIL instead of NUMPY img = Image.open(imagefile) b, g, r = img.split() img = Image.merge("RGB", (r, g, b)) img = Transformations(img) # ToTensor and between [0,1], then normalized using image net mean and std, then transposed into shape (C,H,W) imgs += [img] return imgs After going through a number of classes, I get the following error. ValueError: not enough values to unpack (expected 3, got 1) I wonder what should I do now? it means that one of the images has only one channel instead of one. Can this be the case or there is a problem with my code? I had a different implementation before but it worked. The reason I changed this implementation was that I could not normalize my images. This is the old implementation: def _read_images_from_list(imagefile_list): imgs = [] for imagefile in imagefile_list: img = cv2.imread(imagefile).astype(np.float32) img = cv2.resize(img, (224, 224)) # Convert RGB to BGR img_r, img_g, img_b = np.split(img, 3, axis=2) img = np.concatenate((img_b, img_g, img_r), axis=2) # Extract mean img -= np.array((103.94,116.78,123.68), dtype=np.float32) # BGR mean # HWC -> CHW, compatible with pytorch img = np.transpose(img, [2, 0, 1]) imgs += [img] return imgs A: I would strongly recommend you use skimage.io to load your images, not opencv. It opens the images in RGB format by default, removing your shuffling overhead, but if you want to convert BGR to RGB you can use this: import numpy as np img = np.arange(27).reshape(3,3,3) b = img[:,:,0] g = img[:,:,1] r = img[:,:,2] rgb = np.dstack([r,g,b]) print(img) print("#"*20) print(rgb)
BGR to RGB for CUB_200 images by Image.split()
I am creating a PyTorch dataset and dataloader from CUB_200. When reading the images as pill, I need to change the BGR channels to RGB and I use the following code: def _read_images_from_list(imagefile_list): imgs = [] mean=[0.485, 0.456, 0.406] std= [0.229, 0.224, 0.225] Transformations = transforms.Compose([transforms.Resize([224, 224]), transforms.ToTensor(), transforms.Normalize(mean, std)]) for imagefile in imagefile_list: # read images as PIL instead of NUMPY img = Image.open(imagefile) b, g, r = img.split() img = Image.merge("RGB", (r, g, b)) img = Transformations(img) # ToTensor and between [0,1], then normalized using image net mean and std, then transposed into shape (C,H,W) imgs += [img] return imgs After going through a number of classes, I get the following error. ValueError: not enough values to unpack (expected 3, got 1) I wonder what should I do now? it means that one of the images has only one channel instead of one. Can this be the case or there is a problem with my code? I had a different implementation before but it worked. The reason I changed this implementation was that I could not normalize my images. This is the old implementation: def _read_images_from_list(imagefile_list): imgs = [] for imagefile in imagefile_list: img = cv2.imread(imagefile).astype(np.float32) img = cv2.resize(img, (224, 224)) # Convert RGB to BGR img_r, img_g, img_b = np.split(img, 3, axis=2) img = np.concatenate((img_b, img_g, img_r), axis=2) # Extract mean img -= np.array((103.94,116.78,123.68), dtype=np.float32) # BGR mean # HWC -> CHW, compatible with pytorch img = np.transpose(img, [2, 0, 1]) imgs += [img] return imgs
[ "I would strongly recommend you use skimage.io to load your images, not opencv. It opens the images in RGB format by default, removing your shuffling overhead, but if you want to convert BGR to RGB you can use this:\nimport numpy as np\n\nimg = np.arange(27).reshape(3,3,3)\nb = img[:,:,0]\ng = img[:,:,1]\nr = img[:,:,2]\n\nrgb = np.dstack([r,g,b])\n\nprint(img)\nprint(\"#\"*20)\nprint(rgb)\n\n" ]
[ 1 ]
[]
[]
[ "image", "python", "pytorch", "pytorch_dataloader" ]
stackoverflow_0074679922_image_python_pytorch_pytorch_dataloader.txt
Q: Navbar works only in first click, in second does not work and it wont go back So im doing this website and on the navbar if i click on one navbar item it will work it will send me to that page but if i click on the other items it wont work ,it wont go to another section it and it wont go back ,goes this way for every item ! Im new to programming so i dont really know the mistake and i cant find an answer online and here are the filesenter image description here here is the html code <nav> <a href="index.html"><img src="img/logo.png"></a> <div class="nav-links" id="navLinks"> <i class="fa fa-times" onclick="hideMenu()"></i> <ul> <li><a href="">HOME</a></li> <li><a href="">ABOUT</a></li> <li><a href="">COURSE</a></li> <li><a href="">BLOG</a></li> <li><a href="">CONTACT</a></li> </ul> </div> <i class="fa fa-bars" onclick="showMenu()"></i> </nav> <h1>Our Courses</h1> </section> <section class="course"> <h1>Courses We Offer</h1> <p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec eget iaculis dui, quis dapibus diam. Etiam tellus erat, consectetur eget eros sit amet, tincidunt consectetur erat.</p> <div class="row"> <div class="course-col"> <h3>Intermediate</h3> <p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec eget iaculis dui, quis dapibus diam. Etiam tellus erat, consectetur eget eros sit amet, tincidunt consectetur erat. </p> </div> <div class="course-col"> <h3>Degree</h3> <p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec eget iaculis dui, quis dapibus diam. Etiam tellus erat, consectetur eget eros sit amet, tincidunt consectetur erat. </p> </div> <div class="course-col"> <h3>Post Graduation</h3> <p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec eget iaculis dui, quis dapibus diam. Etiam tellus erat, consectetur eget eros sit amet, tincidunt consectetur erat. </p> </div> </div> </section> <section class="facilities"> <h1>Our Facilities</h1> <p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent cursus nulla consequat, congue tellus in, dictum massa. Proin facilisis arcu erat, ut euismod ex fringilla at.</p> <div class="row"> <div class="facilities-col"> <img src="./img/library.png"> <h3>World Class Library</h3> <p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent cursus nulla consequat, congue tellus in, dictum massa. Proin facilisis arcu erat, ut euismod ex fringilla at.</p> </div> <div class="facilities-col"> <img src="./img/basketball.png"> <h3>Largest Play Ground</h3> <p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent cursus nulla consequat, congue tellus in, dictum massa. Proin facilisis arcu erat, ut euismod ex fringilla at.</p> </div> <div class="facilities-col"> <img src="./img/cafeteria.png"> <h3>Tasty and healthy food</h3> <p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent cursus nulla consequat, congue tellus in, dictum massa. Proin facilisis arcu erat, ut euismod ex fringilla at.</p> </div> </div> </section> <section class="footer"> <h4>About Us</h4> <p>Lorem ipsum dolor sit amet, consectetur adipiscing elit.<br> Praesent cursus nulla consequat, congue tellus in, dictum massa <br>. Proin facilisis arcu erat, ut euismod ex fringilla at</p> <div class="icons"> <i class="fa-brands fa-square-instagram"></i> <i class="fa-brands fa-square-facebook"></i> <i class="fa-brands fa-linkedin"></i> <i class="fa-brands fa-square-twitter"></i> </div> </section> <script> var navLinks = document.getElementById("navLinks"); function showMenu() { navLinks.style.right = "0"; } function hideMenu() { navLinks.style.right = "-200px"; } </script> A: Here in the href attribute you must to specify where the link is going to: <ul> <li><a href="/">HOME</a></li> <li><a href="/about.html">ABOUT</a></li> <li><a href="/course.html">COURSE</a></li> <li><a href="/blog.html">BLOG</a></li> <li><a href="/contact.html">CONTACT</a></li> </ul>
Navbar works only in first click, in second does not work and it wont go back
So im doing this website and on the navbar if i click on one navbar item it will work it will send me to that page but if i click on the other items it wont work ,it wont go to another section it and it wont go back ,goes this way for every item ! Im new to programming so i dont really know the mistake and i cant find an answer online and here are the filesenter image description here here is the html code <nav> <a href="index.html"><img src="img/logo.png"></a> <div class="nav-links" id="navLinks"> <i class="fa fa-times" onclick="hideMenu()"></i> <ul> <li><a href="">HOME</a></li> <li><a href="">ABOUT</a></li> <li><a href="">COURSE</a></li> <li><a href="">BLOG</a></li> <li><a href="">CONTACT</a></li> </ul> </div> <i class="fa fa-bars" onclick="showMenu()"></i> </nav> <h1>Our Courses</h1> </section> <section class="course"> <h1>Courses We Offer</h1> <p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec eget iaculis dui, quis dapibus diam. Etiam tellus erat, consectetur eget eros sit amet, tincidunt consectetur erat.</p> <div class="row"> <div class="course-col"> <h3>Intermediate</h3> <p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec eget iaculis dui, quis dapibus diam. Etiam tellus erat, consectetur eget eros sit amet, tincidunt consectetur erat. </p> </div> <div class="course-col"> <h3>Degree</h3> <p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec eget iaculis dui, quis dapibus diam. Etiam tellus erat, consectetur eget eros sit amet, tincidunt consectetur erat. </p> </div> <div class="course-col"> <h3>Post Graduation</h3> <p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec eget iaculis dui, quis dapibus diam. Etiam tellus erat, consectetur eget eros sit amet, tincidunt consectetur erat. </p> </div> </div> </section> <section class="facilities"> <h1>Our Facilities</h1> <p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent cursus nulla consequat, congue tellus in, dictum massa. Proin facilisis arcu erat, ut euismod ex fringilla at.</p> <div class="row"> <div class="facilities-col"> <img src="./img/library.png"> <h3>World Class Library</h3> <p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent cursus nulla consequat, congue tellus in, dictum massa. Proin facilisis arcu erat, ut euismod ex fringilla at.</p> </div> <div class="facilities-col"> <img src="./img/basketball.png"> <h3>Largest Play Ground</h3> <p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent cursus nulla consequat, congue tellus in, dictum massa. Proin facilisis arcu erat, ut euismod ex fringilla at.</p> </div> <div class="facilities-col"> <img src="./img/cafeteria.png"> <h3>Tasty and healthy food</h3> <p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent cursus nulla consequat, congue tellus in, dictum massa. Proin facilisis arcu erat, ut euismod ex fringilla at.</p> </div> </div> </section> <section class="footer"> <h4>About Us</h4> <p>Lorem ipsum dolor sit amet, consectetur adipiscing elit.<br> Praesent cursus nulla consequat, congue tellus in, dictum massa <br>. Proin facilisis arcu erat, ut euismod ex fringilla at</p> <div class="icons"> <i class="fa-brands fa-square-instagram"></i> <i class="fa-brands fa-square-facebook"></i> <i class="fa-brands fa-linkedin"></i> <i class="fa-brands fa-square-twitter"></i> </div> </section> <script> var navLinks = document.getElementById("navLinks"); function showMenu() { navLinks.style.right = "0"; } function hideMenu() { navLinks.style.right = "-200px"; } </script>
[ "Here in the href attribute you must to specify where the link is going to:\n<ul>\n <li><a href=\"/\">HOME</a></li>\n <li><a href=\"/about.html\">ABOUT</a></li>\n <li><a href=\"/course.html\">COURSE</a></li>\n <li><a href=\"/blog.html\">BLOG</a></li>\n <li><a href=\"/contact.html\">CONTACT</a></li>\n</ul>\n\n" ]
[ 0 ]
[]
[]
[ "css", "html", "nav" ]
stackoverflow_0074679935_css_html_nav.txt
Q: Error SqlLite Query with Android 5.0 API 21 This error occurs with android 5.0 APi 21 but this code works perfectly with Android API 33 I'm using SimpleSQLiteQuery to create my select query[![enter image description here override fun getPropertyBySearch( type: String, city: String, minSurface: Int, maxSurface: Int, minPrice: Int, maxPrice: Int, onTheMarketLessALastWeek: Boolean, soldOn3LastMonth: Boolean, min3photos: Boolean, schools: Boolean, shops: Boolean ): LiveData<List<RealEstateDatabase>> { val iso = ISOChronology.getInstance() val today = LocalDate(iso) Log.e("today",today.toString()) val dateMinusThreeMonth = today.minusMonths(3) val dateMinus1Week = today.minusDays(7) Log.e("dateMinusThreeMonth",dateMinusThreeMonth.toString()) Log.e("dateMinus1Week",dateMinus1Week.toString()) val query = """SELECT * FROM RealEstateDatabase WHERE ('$type' ='' OR type LIKE '%$type%' ) AND ('$city' ='' OR city LIKE '%$city%' ) AND ($schools = false OR schoolsNear = $schools ) AND ($shops = false OR shopsNear = $shops ) AND ($min3photos = false OR count_photo >= 3 ) AND ($minSurface =0 AND $maxSurface = 0 OR area BETWEEN $minSurface AND $maxSurface ) AND ($minPrice =0 AND $maxPrice = 0 OR price BETWEEN $minPrice AND $maxPrice ) AND ($onTheMarketLessALastWeek = false OR dateOfEntry BETWEEN '$dateMinus1Week' AND '$today' ) AND ($soldOn3LastMonth = false OR dateOfSale BETWEEN '$dateMinusThreeMonth' AND '$today') """ Log.e("query", query) return realEstateDao.getPropertyBySearch(SimpleSQLiteQuery(query)) } Here is the table in question @Entity @Parcelize data class RealEstateDatabase( @PrimaryKey var id: String, var type: String? = null, var price: Int? = null, var area: Int? = null, var numberRoom: String? = null, var description: String? = null, var numberAndStreet: String? = null, var numberApartment: String? = null, var city: String? = null, var region: String? = null, var postalCode: String? = null, var country: String? = null, var status: String? = null, var dateOfEntry: String? = null, var dateOfSale: String? = null, var realEstateAgent: String? = null, var lat: Double ?=null, var lng: Double ?=null, var hospitalsNear : Boolean = false, var schoolsNear : Boolean = false, var shopsNear : Boolean = false, var parksNear : Boolean = false, @ColumnInfo(name = "listPhotoWithText") var listPhotoWithText : List<PhotoWithTextFirebase> ?=null, var count_photo : Int? = listPhotoWithText?.size, ) I don't explain that this code works on one api rather than another, I'm leaning towards a non-compatibility of my sql lite query but I'm not sure or how to fix it A: The constant false (as well as true) was introduced in version 3.23.0 of SQLite; as per :- 2018-04-02 (3.23.0) Add the sqlite3_serialize() and sqlite3_deserialize() interfaces when the SQLITE_ENABLE_DESERIALIZE compile-time option is used. Recognize TRUE and FALSE as constants. (For compatibility, if there exist columns named "true" or "false", then the identifiers refer to the columns rather than Boolean constants.) Support operators IS TRUE, IS FALSE, IS NOT TRUE, and IS NOT FALSE. https://www.sqlite.org/changes.html As such only android API's of 30+ support the use of true and false, as per https://developer.android.com/reference/android/database/sqlite/package-summary.html Instead you could use 0 instead of false (non zero, typically 1 for true). e.g. val query = """SELECT * FROM RealEstateDatabase WHERE ('$type' ='' OR type LIKE '%$type%' ) AND ('$city' ='' OR city LIKE '%$city%' ) AND ($schools = 0 OR schoolsNear = $schools ) AND ($shops = 0 OR shopsNear = $shops ) AND ($min3photos = 0 OR count_photo >= 3 ) AND ($minSurface =0 AND $maxSurface = 0 OR area BETWEEN $minSurface AND $maxSurface ) AND ($minPrice =0 AND $maxPrice = 0 OR price BETWEEN $minPrice AND $maxPrice ) AND ($onTheMarketLessALastWeek = 0 OR dateOfEntry BETWEEN '$dateMinus1Week' AND '$today' ) AND ($soldOn3LastMonth = 0 OR dateOfSale BETWEEN '$dateMinusThreeMonth' AND '$today') """
Error SqlLite Query with Android 5.0 API 21
This error occurs with android 5.0 APi 21 but this code works perfectly with Android API 33 I'm using SimpleSQLiteQuery to create my select query[![enter image description here override fun getPropertyBySearch( type: String, city: String, minSurface: Int, maxSurface: Int, minPrice: Int, maxPrice: Int, onTheMarketLessALastWeek: Boolean, soldOn3LastMonth: Boolean, min3photos: Boolean, schools: Boolean, shops: Boolean ): LiveData<List<RealEstateDatabase>> { val iso = ISOChronology.getInstance() val today = LocalDate(iso) Log.e("today",today.toString()) val dateMinusThreeMonth = today.minusMonths(3) val dateMinus1Week = today.minusDays(7) Log.e("dateMinusThreeMonth",dateMinusThreeMonth.toString()) Log.e("dateMinus1Week",dateMinus1Week.toString()) val query = """SELECT * FROM RealEstateDatabase WHERE ('$type' ='' OR type LIKE '%$type%' ) AND ('$city' ='' OR city LIKE '%$city%' ) AND ($schools = false OR schoolsNear = $schools ) AND ($shops = false OR shopsNear = $shops ) AND ($min3photos = false OR count_photo >= 3 ) AND ($minSurface =0 AND $maxSurface = 0 OR area BETWEEN $minSurface AND $maxSurface ) AND ($minPrice =0 AND $maxPrice = 0 OR price BETWEEN $minPrice AND $maxPrice ) AND ($onTheMarketLessALastWeek = false OR dateOfEntry BETWEEN '$dateMinus1Week' AND '$today' ) AND ($soldOn3LastMonth = false OR dateOfSale BETWEEN '$dateMinusThreeMonth' AND '$today') """ Log.e("query", query) return realEstateDao.getPropertyBySearch(SimpleSQLiteQuery(query)) } Here is the table in question @Entity @Parcelize data class RealEstateDatabase( @PrimaryKey var id: String, var type: String? = null, var price: Int? = null, var area: Int? = null, var numberRoom: String? = null, var description: String? = null, var numberAndStreet: String? = null, var numberApartment: String? = null, var city: String? = null, var region: String? = null, var postalCode: String? = null, var country: String? = null, var status: String? = null, var dateOfEntry: String? = null, var dateOfSale: String? = null, var realEstateAgent: String? = null, var lat: Double ?=null, var lng: Double ?=null, var hospitalsNear : Boolean = false, var schoolsNear : Boolean = false, var shopsNear : Boolean = false, var parksNear : Boolean = false, @ColumnInfo(name = "listPhotoWithText") var listPhotoWithText : List<PhotoWithTextFirebase> ?=null, var count_photo : Int? = listPhotoWithText?.size, ) I don't explain that this code works on one api rather than another, I'm leaning towards a non-compatibility of my sql lite query but I'm not sure or how to fix it
[ "The constant false (as well as true) was introduced in version 3.23.0 of SQLite; as per :-\n\n\n\n2018-04-02 (3.23.0)\n\n\n\n\n\nAdd the sqlite3_serialize() and sqlite3_deserialize() interfaces when the SQLITE_ENABLE_DESERIALIZE compile-time option is used.\n\n\n\n\n\n\nRecognize TRUE and FALSE as constants. (For compatibility, if there exist columns named \"true\" or \"false\", then the identifiers refer to the columns rather than Boolean constants.)\n\n\n\n\n\nSupport operators IS TRUE, IS FALSE, IS NOT TRUE, and IS NOT FALSE.\n\n\nhttps://www.sqlite.org/changes.html\n\n\n\n\nAs such only android API's of 30+ support the use of true and false, as per\n\nhttps://developer.android.com/reference/android/database/sqlite/package-summary.html\n\nInstead you could use 0 instead of false (non zero, typically 1 for true). e.g.\nval query = \"\"\"SELECT * FROM RealEstateDatabase WHERE \n ('$type' ='' OR type LIKE '%$type%' ) AND \n ('$city' ='' OR city LIKE '%$city%' ) AND\n ($schools = 0 OR schoolsNear = $schools ) AND \n ($shops = 0 OR shopsNear = $shops ) AND \n ($min3photos = 0 OR count_photo >= 3 ) AND\n ($minSurface =0 AND $maxSurface = 0 OR area BETWEEN $minSurface AND $maxSurface ) AND \n ($minPrice =0 AND $maxPrice = 0 OR price BETWEEN $minPrice AND $maxPrice ) AND \n ($onTheMarketLessALastWeek = 0 OR dateOfEntry BETWEEN '$dateMinus1Week' AND '$today' ) AND \n ($soldOn3LastMonth = 0 OR dateOfSale BETWEEN '$dateMinusThreeMonth' AND '$today') \"\"\"\n\n" ]
[ 0 ]
[]
[]
[ "android", "android_room", "sqlite" ]
stackoverflow_0074679764_android_android_room_sqlite.txt
Q: How to print the dense layer values? I have created a CNN model. In that I want to the print 1D array values of the dense layer. How can I print the dense layer output vector in python. A: Please check: Keras, How to get the output of each layer? When creating your dense layer you can use "name" argument, like: model.add(Dense(1024, name="my_dense_layer")) Than you get this layer output using: model.get_layer("my_dense_layer").output
How to print the dense layer values?
I have created a CNN model. In that I want to the print 1D array values of the dense layer. How can I print the dense layer output vector in python.
[ "Please check:\nKeras, How to get the output of each layer?\nWhen creating your dense layer you can use \"name\" argument, like:\nmodel.add(Dense(1024, name=\"my_dense_layer\"))\nThan you get this layer output using:\nmodel.get_layer(\"my_dense_layer\").output\n" ]
[ 0 ]
[]
[]
[ "conv_neural_network", "deep_learning", "machine_learning", "neural_network", "recurrent_neural_network" ]
stackoverflow_0074679763_conv_neural_network_deep_learning_machine_learning_neural_network_recurrent_neural_network.txt
Q: Convert Float to Time I am trying to convert a DataFrame series with floats like "1200" into 12:00:00. My initial DataFrame is this one: import pandas as pd df = pd.DataFrame([1200.0, 0.0, 1536.0, 1530.0, 0.0], columns=['Occurred Time']) print(df) Occurred Time 0 1200.0 1 0.0 2 1536.0 3 1530.0 4 0.0 I am trying to convert the "Occurred Time" float from 1200.0 to 12:00:00. I used this code: import pandas as pd df = pd.DataFrame([1200.0, 0.0, 1536.0, 1530.0, 0.0], columns=['Occurred Time']) df['Occurred Time'] = pd.to_datetime(df['Occurred Time']) print(df) but it does not work and the output is this: Occurred Time 0 1970-01-01 00:00:00.000001200 1 1970-01-01 00:00:00.000000000 2 1970-01-01 00:00:00.000001536 3 1970-01-01 00:00:00.000001530 4 1970-01-01 00:00:00.000000000 I don't know what to do! A: This should work if you convert to strings, pad with zeros and provide a format to to_datetime: df['time'] = pd.to_datetime(df['Occurred Time'].astype(int) .astype(str).str.zfill(4), format='%H%M') Output: Occurred Time time 0 1200 1900-01-01 12:00:00 1 0 1900-01-01 00:00:00 2 1536 1900-01-01 15:36:00 Add .dt.time if you want only the time: df['time'] = pd.to_datetime(df['Occurred Time'].astype(int) .astype(str).str.zfill(4), format='%H%M').dt.time Output: Occurred Time time 0 1200 12:00:00 1 0 00:00:00 2 1536 15:36:00
Convert Float to Time
I am trying to convert a DataFrame series with floats like "1200" into 12:00:00. My initial DataFrame is this one: import pandas as pd df = pd.DataFrame([1200.0, 0.0, 1536.0, 1530.0, 0.0], columns=['Occurred Time']) print(df) Occurred Time 0 1200.0 1 0.0 2 1536.0 3 1530.0 4 0.0 I am trying to convert the "Occurred Time" float from 1200.0 to 12:00:00. I used this code: import pandas as pd df = pd.DataFrame([1200.0, 0.0, 1536.0, 1530.0, 0.0], columns=['Occurred Time']) df['Occurred Time'] = pd.to_datetime(df['Occurred Time']) print(df) but it does not work and the output is this: Occurred Time 0 1970-01-01 00:00:00.000001200 1 1970-01-01 00:00:00.000000000 2 1970-01-01 00:00:00.000001536 3 1970-01-01 00:00:00.000001530 4 1970-01-01 00:00:00.000000000 I don't know what to do!
[ "This should work if you convert to strings, pad with zeros and provide a format to to_datetime:\ndf['time'] = pd.to_datetime(df['Occurred Time'].astype(int)\n .astype(str).str.zfill(4),\n format='%H%M')\n\nOutput:\n Occurred Time time\n0 1200 1900-01-01 12:00:00\n1 0 1900-01-01 00:00:00\n2 1536 1900-01-01 15:36:00\n\nAdd .dt.time if you want only the time:\ndf['time'] = pd.to_datetime(df['Occurred Time'].astype(int)\n .astype(str).str.zfill(4),\n format='%H%M').dt.time\n\nOutput:\n Occurred Time time\n0 1200 12:00:00\n1 0 00:00:00\n2 1536 15:36:00\n\n" ]
[ 2 ]
[]
[]
[ "dataframe", "datetime", "pandas", "python" ]
stackoverflow_0074680074_dataframe_datetime_pandas_python.txt
Q: Evaluation failed: ReferenceError: _MainBody is not defined - Puppeteer I am super, super new to this subject (Today it is my first day): end 2 end unit tests and I discovered puppeteer. I have my page where I launch a function to fetch some info from an API and than I display the info on the page. What I want to do is the following. I want to make this end 2 end test to check if the page has a header and a footer + if the function gets called and gives a response if called. Bellow I will attach my code. The question is: Why does it says that _MainBody is not defined since that is the name of the function and the file name where everything happens: fetch the data and display it. I will attach it below so you can understand what I did and where is the problem. Thank you in advance to everyone that is willing to help. e2e.test.tsx import getRandomBeer from "./MainBody"; import puppeteer from "puppeteer"; describe("myTest", () => { let browser: puppeteer.Browser; let page: puppeteer.Page; beforeAll(async () => { browser = await puppeteer.launch(); page = await browser.newPage(); }); it('The function gets called', async () => { await page.goto('LINK'); console.log(await page.evaluate(() => typeof getRandomBeer === 'function')); }) afterAll(() => browser.close()); }); file where everything happens and where the function gets called to fetch the data import { render } from '@testing-library/react'; import React, { useState, useEffect } from 'react'; import './App.css'; import axios, { AxiosResponse } from 'axios'; import Beer from './BeerClass'; //Function that gets called in order to fetch the beers one by one async function getRandomBeer() { const req = await fetch('https://api.punkapi.com/v2/beers/random'); const data = await req.json(); console.log(data[0]); return data[0] as Beer; } const nBeers = 30; function MainBody() { const [beerData, setBeerData] = useState<Beer[]>([]); console.log(beerData); //----------------------------------------------------------------------------------------------------------------- //NOTE: Some of the beers come with NULL for the image link so some of the beers don't have a photo unfortunatelly. //----------------------------------------------------------------------------------------------------------------- //Saving all the beers inside an array that initally gets filled with zeros and than I map the beers inside it and than I se the beerData so I can display it below //It waits until it does not fetch all the beers useEffect(() => { Promise.all(new Array(nBeers).fill(0).map(getRandomBeer).reverse()).then(setBeerData); }, []) //Display the beer data, beer after beer return ( <div id="beers"> {beerData && beerData.map((beerData) => { return ( <div className="container"> <div className="image"> <img src={beerData.image_url} width={30} height={100}></img> <div className='text'> <h4>{beerData.name} </h4> <p>{beerData.tagline}</p> </div> </div> </div> ); } )} </div> ); }; export default { MainBody , getRandomBeer}; A: You need to use this after defining page now you can use your function in the callback you use in evaluate. evaluate returns the element you define await page.exposeFunction("getRandomBeer", getRandomBeer);
Evaluation failed: ReferenceError: _MainBody is not defined - Puppeteer
I am super, super new to this subject (Today it is my first day): end 2 end unit tests and I discovered puppeteer. I have my page where I launch a function to fetch some info from an API and than I display the info on the page. What I want to do is the following. I want to make this end 2 end test to check if the page has a header and a footer + if the function gets called and gives a response if called. Bellow I will attach my code. The question is: Why does it says that _MainBody is not defined since that is the name of the function and the file name where everything happens: fetch the data and display it. I will attach it below so you can understand what I did and where is the problem. Thank you in advance to everyone that is willing to help. e2e.test.tsx import getRandomBeer from "./MainBody"; import puppeteer from "puppeteer"; describe("myTest", () => { let browser: puppeteer.Browser; let page: puppeteer.Page; beforeAll(async () => { browser = await puppeteer.launch(); page = await browser.newPage(); }); it('The function gets called', async () => { await page.goto('LINK'); console.log(await page.evaluate(() => typeof getRandomBeer === 'function')); }) afterAll(() => browser.close()); }); file where everything happens and where the function gets called to fetch the data import { render } from '@testing-library/react'; import React, { useState, useEffect } from 'react'; import './App.css'; import axios, { AxiosResponse } from 'axios'; import Beer from './BeerClass'; //Function that gets called in order to fetch the beers one by one async function getRandomBeer() { const req = await fetch('https://api.punkapi.com/v2/beers/random'); const data = await req.json(); console.log(data[0]); return data[0] as Beer; } const nBeers = 30; function MainBody() { const [beerData, setBeerData] = useState<Beer[]>([]); console.log(beerData); //----------------------------------------------------------------------------------------------------------------- //NOTE: Some of the beers come with NULL for the image link so some of the beers don't have a photo unfortunatelly. //----------------------------------------------------------------------------------------------------------------- //Saving all the beers inside an array that initally gets filled with zeros and than I map the beers inside it and than I se the beerData so I can display it below //It waits until it does not fetch all the beers useEffect(() => { Promise.all(new Array(nBeers).fill(0).map(getRandomBeer).reverse()).then(setBeerData); }, []) //Display the beer data, beer after beer return ( <div id="beers"> {beerData && beerData.map((beerData) => { return ( <div className="container"> <div className="image"> <img src={beerData.image_url} width={30} height={100}></img> <div className='text'> <h4>{beerData.name} </h4> <p>{beerData.tagline}</p> </div> </div> </div> ); } )} </div> ); }; export default { MainBody , getRandomBeer};
[ "You need to use this after defining page\nnow you can use your function in the callback you use in evaluate.\nevaluate returns the element you define\nawait page.exposeFunction(\"getRandomBeer\", getRandomBeer);\n\n" ]
[ 0 ]
[]
[]
[ "e2e_testing", "javascript", "puppeteer", "typescript", "unit_testing" ]
stackoverflow_0071130953_e2e_testing_javascript_puppeteer_typescript_unit_testing.txt
Q: Starting IIS Express cmd rather than VS2022 The ASP.Net Core 6 Web Api runs under IIS Express via VS2022 just fine and allows me to load the swagger page (http://localhost:8084/swagger/index.html): #Software: Microsoft Internet Information Services 10.0 #Version: 1.0 #Date: YYYY-DD-MM 17:49:22 #Fields: date time s-ip cs-method cs-uri-stem cs-uri-query s-port cs-username c-ip cs(User-Agent) cs(Referer) sc-status sc-substatus sc-win32-status time-taken YYYY-DD-MM 17:49:22 ::1 GET /_framework/aspnetcore-browser-refresh.js - 8084 - ::1 Mozilla/5.0+(Windows+NT+10.0;+Win64;+x64)+AppleWebKit/537.36+(KHTML,+like+Gecko)+Chrome/107.0.0.0+Safari/537.36 http://localhost:8084/swagger/index.html 200 0 0 6 YYYY-DD-MM 17:49:22 ::1 GET /swagger/index.html - 8084 - ::1 Mozilla/5.0+(Windows+NT+10.0;+Win64;+x64)+AppleWebKit/537.36+(KHTML,+like+Gecko)+Chrome/107.0.0.0+Safari/537.36 - 200 0 0 3664 YYYY-DD-MM 17:49:22 ::1 GET /_vs/browserLink - 8084 - ::1 Mozilla/5.0+(Windows+NT+10.0;+Win64;+x64)+AppleWebKit/537.36+(KHTML,+like+Gecko)+Chrome/107.0.0.0+Safari/537.36 http://localhost:8084/swagger/index.html 200 0 0 59 YYYY-DD-MM 17:49:22 ::1 GET /swagger/v1/swagger.json - 8084 - ::1 Mozilla/5.0+(Windows+NT+10.0;+Win64;+x64)+AppleWebKit/537.36+(KHTML,+like+Gecko)+Chrome/107.0.0.0+Safari/537.36 http://localhost:8084/swagger/index.html 200 0 0 149 When running it from the cmdline: "C:\Program Files (x86)\IIS Express\iisexpress" /config:applicationhost.config /site:TheWebApi Swagger does not load returning a HTTP 500: #Software: Microsoft Internet Information Services 10.0 #Version: 1.0 #Date: 2022-11-19 17:54:41 #Fields: date time s-ip cs-method cs-uri-stem cs-uri-query s-port cs-username c-ip cs(User-Agent) cs(Referer) sc-status sc-substatus sc-win32-status time-taken 2022-11-19 17:54:41 ::1 GET /swagger/index.html - 8084 - ::1 Mozilla/5.0+(Windows+NT+10.0;+Win64;+x64)+AppleWebKit/537.36+(KHTML,+like+Gecko)+Chrome/107.0.0.0+Safari/537.36 - 500 0 574 8 2022-11-19 17:54:41 ::1 GET /favicon.ico - 8084 - ::1 Mozilla/5.0+(Windows+NT+10.0;+Win64;+x64)+AppleWebKit/537.36+(KHTML,+like+Gecko)+Chrome/107.0.0.0+Safari/537.36 http://localhost:8084/swagger/index.html 500 0 574 0 Lex Li's blog How Visual Studio Launches IIS Express to Debug ASP.NET Core Apps was very helpful in me gaining more information on what is going on. Clearly, VS2022 is doing some magic to make things run in debug mode. In my case, I am trying to get a desktop app to launch the ASP.Net Core 6 application, so debugging is not a concern. When I looked into the event log, I found what might be the problem, but don't know the solution: Application 'C:\Users\sam\source\repos\WebApplication1\' failed to start. Exception message: Executable was not found at 'C:\Users\sam\source\repos\WebApplication1\bin\Debug\net5.0\WebApplication1.exe' The problem is the application is .Net 6, not .Net 5. It was originally .Net 5, but it has been upgraded to .Net 6. This has me thinking... I did just create a new .Net WebApi project (WebApplication2) to compare the applicationhost.conf generated vs the real project (WebApplication1). The WebApplication1 has the following section which was not in WebApplicaiton2: <site name="WebApplication1" id="2"> <application path="/" applicationPool="WebApplication1 AppPool"> <virtualDirectory path="/" physicalPath="C:\Users\sam\source\repos\WebApplication1" /> </application> <bindings> <binding protocol="http" bindingInformation="*:8084:localhost" /> </bindings> </site> Since I don't want Visual Studio 2022 to be taken into account, should physicalPath="C:\Users\sam\source\repos\WebApplication1\bin\Debug\net6.0"? A: In my particular case, there is a desktop application that is managing the execution of the ASP.Net Core 6 Web API application in question. It turns out that it is possible to publish the ASP.Net Core 6 Web API application into a single EXE, which is called a Self-contained deployment. This is what I needed.
Starting IIS Express cmd rather than VS2022
The ASP.Net Core 6 Web Api runs under IIS Express via VS2022 just fine and allows me to load the swagger page (http://localhost:8084/swagger/index.html): #Software: Microsoft Internet Information Services 10.0 #Version: 1.0 #Date: YYYY-DD-MM 17:49:22 #Fields: date time s-ip cs-method cs-uri-stem cs-uri-query s-port cs-username c-ip cs(User-Agent) cs(Referer) sc-status sc-substatus sc-win32-status time-taken YYYY-DD-MM 17:49:22 ::1 GET /_framework/aspnetcore-browser-refresh.js - 8084 - ::1 Mozilla/5.0+(Windows+NT+10.0;+Win64;+x64)+AppleWebKit/537.36+(KHTML,+like+Gecko)+Chrome/107.0.0.0+Safari/537.36 http://localhost:8084/swagger/index.html 200 0 0 6 YYYY-DD-MM 17:49:22 ::1 GET /swagger/index.html - 8084 - ::1 Mozilla/5.0+(Windows+NT+10.0;+Win64;+x64)+AppleWebKit/537.36+(KHTML,+like+Gecko)+Chrome/107.0.0.0+Safari/537.36 - 200 0 0 3664 YYYY-DD-MM 17:49:22 ::1 GET /_vs/browserLink - 8084 - ::1 Mozilla/5.0+(Windows+NT+10.0;+Win64;+x64)+AppleWebKit/537.36+(KHTML,+like+Gecko)+Chrome/107.0.0.0+Safari/537.36 http://localhost:8084/swagger/index.html 200 0 0 59 YYYY-DD-MM 17:49:22 ::1 GET /swagger/v1/swagger.json - 8084 - ::1 Mozilla/5.0+(Windows+NT+10.0;+Win64;+x64)+AppleWebKit/537.36+(KHTML,+like+Gecko)+Chrome/107.0.0.0+Safari/537.36 http://localhost:8084/swagger/index.html 200 0 0 149 When running it from the cmdline: "C:\Program Files (x86)\IIS Express\iisexpress" /config:applicationhost.config /site:TheWebApi Swagger does not load returning a HTTP 500: #Software: Microsoft Internet Information Services 10.0 #Version: 1.0 #Date: 2022-11-19 17:54:41 #Fields: date time s-ip cs-method cs-uri-stem cs-uri-query s-port cs-username c-ip cs(User-Agent) cs(Referer) sc-status sc-substatus sc-win32-status time-taken 2022-11-19 17:54:41 ::1 GET /swagger/index.html - 8084 - ::1 Mozilla/5.0+(Windows+NT+10.0;+Win64;+x64)+AppleWebKit/537.36+(KHTML,+like+Gecko)+Chrome/107.0.0.0+Safari/537.36 - 500 0 574 8 2022-11-19 17:54:41 ::1 GET /favicon.ico - 8084 - ::1 Mozilla/5.0+(Windows+NT+10.0;+Win64;+x64)+AppleWebKit/537.36+(KHTML,+like+Gecko)+Chrome/107.0.0.0+Safari/537.36 http://localhost:8084/swagger/index.html 500 0 574 0 Lex Li's blog How Visual Studio Launches IIS Express to Debug ASP.NET Core Apps was very helpful in me gaining more information on what is going on. Clearly, VS2022 is doing some magic to make things run in debug mode. In my case, I am trying to get a desktop app to launch the ASP.Net Core 6 application, so debugging is not a concern. When I looked into the event log, I found what might be the problem, but don't know the solution: Application 'C:\Users\sam\source\repos\WebApplication1\' failed to start. Exception message: Executable was not found at 'C:\Users\sam\source\repos\WebApplication1\bin\Debug\net5.0\WebApplication1.exe' The problem is the application is .Net 6, not .Net 5. It was originally .Net 5, but it has been upgraded to .Net 6. This has me thinking... I did just create a new .Net WebApi project (WebApplication2) to compare the applicationhost.conf generated vs the real project (WebApplication1). The WebApplication1 has the following section which was not in WebApplicaiton2: <site name="WebApplication1" id="2"> <application path="/" applicationPool="WebApplication1 AppPool"> <virtualDirectory path="/" physicalPath="C:\Users\sam\source\repos\WebApplication1" /> </application> <bindings> <binding protocol="http" bindingInformation="*:8084:localhost" /> </bindings> </site> Since I don't want Visual Studio 2022 to be taken into account, should physicalPath="C:\Users\sam\source\repos\WebApplication1\bin\Debug\net6.0"?
[ "In my particular case, there is a desktop application that is managing the execution of the ASP.Net Core 6 Web API application in question. It turns out that it is possible to publish the ASP.Net Core 6 Web API application into a single EXE, which is called a Self-contained deployment. This is what I needed.\n" ]
[ 0 ]
[]
[]
[ "applicationhost", "asp.net_core_6.0", "iis_express", "iis_express_10", "swagger" ]
stackoverflow_0074502423_applicationhost_asp.net_core_6.0_iis_express_iis_express_10_swagger.txt
Q: Overloading standard C++ library functions inside a templated class file I'm trying to overload std::to_string() function to where it can take a string as its argument and just return the string, in the same file as a templated class. This is so it can be used by the member functions. But it's gicing me the error: out-of-line definition of 'to_string' does not match any declaration in namespace 'std' Here's a generalized version of what I'm going for: #include <string> using namespace std; string std::to_string(string str){return str;} template <class Type> class myClass { public: int getPrintLength(Type var); }; template <class Type> int myClass<Type>::getPrintLength(Type var) { return to_string(var).size(); } For context, I'm doing this so that I can get the number of characters a variable (of any standard type) would have if printed, including string, via to_string(var).size(), which requires the function to take strings as an argument (so I don't have to check what type the variable is). But of course, there may be a better way of doing this, to which I am open. I have tried using different scopes, and templating my to_string() overload (using template<> instead of my usual template<class Type>). These resulted in the class simply using the overload and never the standard C++ function, and a no function template matches function template specialization 'to_string' error respectively. A: You can write a separate to_string() function for string input. Compiler will take care of calling your to_string() or std::to_string() based on the input type. using namespace std; string to_string(std::string str){ return str; } template <class Type> class myClass { public: int getPrintLength(Type var); }; template <class Type> int myClass<Type>::getPrintLength(Type var){ return to_string(var).size(); } int main(){ myClass<int> myInt; myClass<std::string> var; cout<<myInt.getPrintLength(1235)<<endl; cout<<var.getPrintLength("StarRocket")<<endl; }
Overloading standard C++ library functions inside a templated class file
I'm trying to overload std::to_string() function to where it can take a string as its argument and just return the string, in the same file as a templated class. This is so it can be used by the member functions. But it's gicing me the error: out-of-line definition of 'to_string' does not match any declaration in namespace 'std' Here's a generalized version of what I'm going for: #include <string> using namespace std; string std::to_string(string str){return str;} template <class Type> class myClass { public: int getPrintLength(Type var); }; template <class Type> int myClass<Type>::getPrintLength(Type var) { return to_string(var).size(); } For context, I'm doing this so that I can get the number of characters a variable (of any standard type) would have if printed, including string, via to_string(var).size(), which requires the function to take strings as an argument (so I don't have to check what type the variable is). But of course, there may be a better way of doing this, to which I am open. I have tried using different scopes, and templating my to_string() overload (using template<> instead of my usual template<class Type>). These resulted in the class simply using the overload and never the standard C++ function, and a no function template matches function template specialization 'to_string' error respectively.
[ "You can write a separate to_string() function for string input. Compiler will take care of calling your to_string() or std::to_string() based on the input type.\nusing namespace std;\n\nstring to_string(std::string str){\n return str;\n}\n\ntemplate <class Type>\nclass myClass\n{\n public:\n int getPrintLength(Type var);\n};\n\ntemplate <class Type>\nint myClass<Type>::getPrintLength(Type var){\n return to_string(var).size();\n}\nint main(){\n myClass<int> myInt;\n myClass<std::string> var;\n cout<<myInt.getPrintLength(1235)<<endl;\n cout<<var.getPrintLength(\"StarRocket\")<<endl;\n}\n\n" ]
[ 0 ]
[]
[]
[ "c++", "c++11", "tostring" ]
stackoverflow_0074637782_c++_c++11_tostring.txt
Q: Spark i/o with S3 Reading this below from https://blog.duyet.net/2021/04/spark-kubernetes-performance-tuning.html I/O with S3 It’s longer time to append data to an existing dataset and in particular, all of Spark jobs have finished, but your command has not finished, it is because driver node is moving the output files of tasks from the job temporary directory to the final destination one-by-one, which is slow with cloud storage (e.g. S3). Enable this optimization: spark.hadoop.mapreduce.fileoutputcommitter.algorithm.version=2 I am wanting to check if the bold statement is true. I have never heard that the Spark Driver writes files / controls writing with S3. Sure, not an HDFS Cluster, and Spark Driver does work necessarily on reading from S3. My knowledge is that the Executors write the data to data at rest, or KAFKA, even if running Spark on AWS. But, presumably I am wrong, or not? If true, same for ADLS2? The comment "I have faced the same issue, and I found It was quicker to write the content on a temporary HDFS directory and the move the content with a command such as s3-dist-cp to S3" is not what I am asking about. A: Spark's driver node is responsible for coordinating the execution of tasks across the worker nodes in a cluster, but it does not directly write data to S3 or ADLS2. Instead, the driver node instructs the executor nodes to write the data to the final destination, which in the case of S3 or ADLS2 would be handled by the Hadoop API. The setting you mentioned, spark.hadoop.mapreduce.fileoutputcommitter.algorithm.version=2, is used to improve the performance of writing data to S3 by using a different algorithm for committing the output files. However, this setting only affects the performance of writing data and does not change the overall process of how data is written to S3 or ADLS2.
Spark i/o with S3
Reading this below from https://blog.duyet.net/2021/04/spark-kubernetes-performance-tuning.html I/O with S3 It’s longer time to append data to an existing dataset and in particular, all of Spark jobs have finished, but your command has not finished, it is because driver node is moving the output files of tasks from the job temporary directory to the final destination one-by-one, which is slow with cloud storage (e.g. S3). Enable this optimization: spark.hadoop.mapreduce.fileoutputcommitter.algorithm.version=2 I am wanting to check if the bold statement is true. I have never heard that the Spark Driver writes files / controls writing with S3. Sure, not an HDFS Cluster, and Spark Driver does work necessarily on reading from S3. My knowledge is that the Executors write the data to data at rest, or KAFKA, even if running Spark on AWS. But, presumably I am wrong, or not? If true, same for ADLS2? The comment "I have faced the same issue, and I found It was quicker to write the content on a temporary HDFS directory and the move the content with a command such as s3-dist-cp to S3" is not what I am asking about.
[ "Spark's driver node is responsible for coordinating the execution of tasks across the worker nodes in a cluster, but it does not directly write data to S3 or ADLS2. Instead, the driver node instructs the executor nodes to write the data to the final destination, which in the case of S3 or ADLS2 would be handled by the Hadoop API. The setting you mentioned, spark.hadoop.mapreduce.fileoutputcommitter.algorithm.version=2, is used to improve the performance of writing data to S3 by using a different algorithm for committing the output files. However, this setting only affects the performance of writing data and does not change the overall process of how data is written to S3 or ADLS2.\n" ]
[ 0 ]
[]
[]
[ "amazon_s3", "apache_spark", "databricks" ]
stackoverflow_0074679685_amazon_s3_apache_spark_databricks.txt
Q: How to get item.docId so that i can get url for pdf in firestore i'm trying to implement pdf viewer from url stored in firestore in react js how i can get item.docId in setPdfUrls please help me out i'm new to react js and web development Where I'm stuck is that I don't understand how to do it please help How to get item.docId so that i can get url for pdf in firestore ` import React, { useState, useEffect, useContext } from "react"; import { Card, Header, Player } from "../components"; import * as ROUTES from "../constants/routes"; import { FirebaseContext } from "../context/firebase"; import { ref, getDownloadURL } from "firebase/storage"; import { storage } from "../lib/firebase.prod"; import { SelectProfileContainer } from "./profiles"; import { FooterContainer } from "./footer"; export function BrowseContainer({ slides }) { var [pdfUrls, setPdfUrls] = useState([]); const [resume, setResume]=useState(null); useEffect(()=>{ getDownloadURL(ref(storage, 'Resume.pdf')).then((url)=>{ setResume(url); }) },[]); const [category, setCategory] = useState("articles"); const [profile, setProfile] = useState({}); const [loading, setLoading] = useState(true); const [slideRows, setSlideRows] = useState([]); const { firebase } = useContext(FirebaseContext); const user = firebase.auth().currentUser || {}; useEffect(() => { setTimeout(() => { setLoading(false); }, 3000); }, [profile.displayName]); useEffect(() => { setSlideRows(slides[category]); }, [slides, category]); return profile.displayName ? ( <> <Card.Group> {slideRows.map((slideItem) => ( <Card key={`${category}-${slideItem.title.toLowerCase()}`}> <Card.Title>{slideItem.title}</Card.Title> <Card.Entities> {slideItem.data.map((item) => ( <Card.Item key={item.docId} item={item}> <Card.Meta> <Card.SubTitle>{item.title}</Card.SubTitle> <br/> <br/> </Card.Meta> <Card.Image src={item.image} alt={item.title}/> </Card.Item> ))} </Card.Entities> <Card.Feature category={category}> <Player> <Player.Button /> <Player.Video src={resume} /> </Player> </Card.Feature> </Card> ))} </Card.Group> <FooterContainer /> </> ) : ( <SelectProfileContainer user={user} setProfile={setProfile} /> ); } ` A: To get the docId of an item in the slides array, you can use the map method to create a new array containing only the docId values. Here is an example: const docIds = slides[category].map((slideItem) => slideItem.data.map((item) => item.docId)); This will create a two-dimensional array containing all the docId values for each slide in the specified category. To flatten this array into a single dimension, you can use the flat method, which is available in newer versions of JavaScript: const docIds = slides[category].map((slideItem) => slideItem.data.map((item) => item.docId)).flat(); Now you can use the docIds array to get the PDF URLs from Firestore. You can use the docId values to construct a query that retrieves the PDF URLs from the database. For example: const query = firebase.firestore().collection('pdfs').where('docId', 'in', docIds); query.get().then((snapshot) => { snapshot.forEach((doc) => { setPdfUrls((urls) => [...urls, doc.data().url]); }); }); This will retrieve all the PDF URLs for the specified docId values and add them to the pdfUrls array. You can then use the pdfUrls array to render the PDFs in your React component. I hope this helps!!
How to get item.docId so that i can get url for pdf in firestore
i'm trying to implement pdf viewer from url stored in firestore in react js how i can get item.docId in setPdfUrls please help me out i'm new to react js and web development Where I'm stuck is that I don't understand how to do it please help How to get item.docId so that i can get url for pdf in firestore ` import React, { useState, useEffect, useContext } from "react"; import { Card, Header, Player } from "../components"; import * as ROUTES from "../constants/routes"; import { FirebaseContext } from "../context/firebase"; import { ref, getDownloadURL } from "firebase/storage"; import { storage } from "../lib/firebase.prod"; import { SelectProfileContainer } from "./profiles"; import { FooterContainer } from "./footer"; export function BrowseContainer({ slides }) { var [pdfUrls, setPdfUrls] = useState([]); const [resume, setResume]=useState(null); useEffect(()=>{ getDownloadURL(ref(storage, 'Resume.pdf')).then((url)=>{ setResume(url); }) },[]); const [category, setCategory] = useState("articles"); const [profile, setProfile] = useState({}); const [loading, setLoading] = useState(true); const [slideRows, setSlideRows] = useState([]); const { firebase } = useContext(FirebaseContext); const user = firebase.auth().currentUser || {}; useEffect(() => { setTimeout(() => { setLoading(false); }, 3000); }, [profile.displayName]); useEffect(() => { setSlideRows(slides[category]); }, [slides, category]); return profile.displayName ? ( <> <Card.Group> {slideRows.map((slideItem) => ( <Card key={`${category}-${slideItem.title.toLowerCase()}`}> <Card.Title>{slideItem.title}</Card.Title> <Card.Entities> {slideItem.data.map((item) => ( <Card.Item key={item.docId} item={item}> <Card.Meta> <Card.SubTitle>{item.title}</Card.SubTitle> <br/> <br/> </Card.Meta> <Card.Image src={item.image} alt={item.title}/> </Card.Item> ))} </Card.Entities> <Card.Feature category={category}> <Player> <Player.Button /> <Player.Video src={resume} /> </Player> </Card.Feature> </Card> ))} </Card.Group> <FooterContainer /> </> ) : ( <SelectProfileContainer user={user} setProfile={setProfile} /> ); } `
[ "To get the docId of an item in the slides array, you can use the map method to create a new array containing only the docId values.\nHere is an example:\nconst docIds = slides[category].map((slideItem) => slideItem.data.map((item) => item.docId));\n\nThis will create a two-dimensional array containing all the docId values for each slide in the specified category. To flatten this array into a single dimension,\nyou can use the flat method, which is available in newer versions of JavaScript:\nconst docIds = slides[category].map((slideItem) => slideItem.data.map((item) => item.docId)).flat();\n\nNow you can use the docIds array to get the PDF URLs from Firestore. You can use the docId values to construct a query that retrieves the PDF URLs from the database. For example:\nconst query = firebase.firestore().collection('pdfs').where('docId', 'in', docIds);\nquery.get().then((snapshot) => {\n snapshot.forEach((doc) => {\n setPdfUrls((urls) => [...urls, doc.data().url]);\n });\n});\n\nThis will retrieve all the PDF URLs for the specified docId values and add them to the pdfUrls array. You can then use the pdfUrls array to render the PDFs in your React component.\nI hope this helps!!\n" ]
[ 0 ]
[]
[]
[ "javascript", "react_hooks", "reactjs" ]
stackoverflow_0074680063_javascript_react_hooks_reactjs.txt
Q: What is the difference between NS() and ns() (Rshiny) NS() vs ns() When to use the upper Case variant and when to use the lower case? A: in the R Shiny framework, NS() and ns() are functions that are used to create namespaces. A namespace is a way of grouping together related objects, such as functions, in order to prevent naming conflicts. The main difference between NS() and ns() is that NS() is used to create a namespace at the global level, while ns() is used to create a namespace within a specific Shiny module. This means that objects created within a namespace created using ns() will only be accessible within that module, while objects created within a namespace created using NS() will be accessible globally. As a general rule, you should use the uppercase version of a function (in this case, NS()) when creating a namespace at the global level, and the lowercase version (ns()) when creating a namespace within a specific Shiny module. This convention helps to make your code more readable and easier to understand. Try again Free Research Preview: ChatGPT is optimized for dialogue. Our goal is to make AI systems more natural to interact with, and your feedback will help us imp
What is the difference between NS() and ns() (Rshiny)
NS() vs ns() When to use the upper Case variant and when to use the lower case?
[ "in the R Shiny framework, NS() and ns() are functions that are used to create namespaces. A namespace is a way of grouping together related objects, such as functions, in order to prevent naming conflicts.\nThe main difference between NS() and ns() is that NS() is used to create a namespace at the global level, while ns() is used to create a namespace within a specific Shiny module. This means that objects created within a namespace created using ns() will only be accessible within that module, while objects created within a namespace created using NS() will be accessible globally.\nAs a general rule, you should use the uppercase version of a function (in this case, NS()) when creating a namespace at the global level, and the lowercase version (ns()) when creating a namespace within a specific Shiny module. This convention helps to make your code more readable and easier to understand.\nTry again\nFree Research Preview: ChatGPT is optimized for dialogue. Our goal is to make AI systems more natural to interact with, and your feedback will help us imp\n" ]
[ 0 ]
[]
[]
[ "namespaces", "shiny" ]
stackoverflow_0074680038_namespaces_shiny.txt
Q: PyInstaller problem making exe files that using transformers and PyQt5 library So I'm working on an AI project using huggingface library, and I need to convert it into an exe file. I'm using PyQt5 for the interface, and transformers and datasets library from huggingface. I tried using PyInstaller to convert it into an exe file, it does finish building the exe files of the project, but it gives me this error when I run the exe file: Traceback (most recent call last): File "transformers\utils\versions.py", line 105, in require_version File "importlib\metadata.py", line 530, in version File "importlib\metadata.py", line 503, in distribution File "importlib\metadata.py", line 177, in from_name importlib.metadata.PackageNotFoundError: tqdm During handling of the above exception, another exception occurred: Traceback (most recent call last): File "App.py", line 5, in <module> File "PyInstaller\loader\pyimod03_importers.py", line 476, in exec_module File "transformers\__init__.py", line 43, in <module> File "PyInstaller\loader\pyimod03_importers.py", line 476, in exec_module File "transformers\dependency_versions_check.py", line 41, in <module> File "transformers\utils\versions.py", line 120, in require_version_core File "transformers\utils\versions.py", line 107, in require_version importlib.metadata.PackageNotFoundError: The 'tqdm>=4.27' distribution was not found and is required by this application. Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git master [736] Failed to execute script 'App' due to unhandled exception! [process exited with code 1] Line 5 on my code was a line of code for importing the transformers library. ... 4| from PyQt5.QtCore import QThread, QObject, pyqtSignal 5| from transformers import AutoTokenizer, AutoModelForQuestionAnswering, pipeline ... ... And this is my .spec file: # -*- mode: python ; coding: utf-8 -*- block_cipher = None a = Analysis(['App.py'], pathex=[], binaries=[], datas=[ ('./resources/images/logo.png', '.'), ('./resources/model/config.json', '.'), ('./resources/model/pytorch_model.bin', '.'), ('./resources/model/special_tokens_map.json', '.'), ('./resources/model/tokenizer.json', '.'), ('./resources/model/tokenizer_config.json', '.'), ('./resources/model/vocab.txt', '.') ], hiddenimports=[], hookspath=[], hooksconfig={}, runtime_hooks=[], excludes=[], win_no_prefer_redirects=False, win_private_assemblies=False, cipher=block_cipher, noarchive=False) pyz = PYZ(a.pure, a.zipped_data, cipher=block_cipher) exe = EXE(pyz, a.scripts, [], exclude_binaries=True, name='App', debug=False, bootloader_ignore_signals=False, strip=False, upx=True, console=True, disable_windowed_traceback=False, target_arch=None, codesign_identity=None, entitlements_file=None , icon='logo.ico') coll = COLLECT(exe, a.binaries, a.zipfiles, a.datas, strip=False, upx=True, upx_exclude=[], name='App') I would really appreciate any help given, thanks :D A: First, pip install tqdm if you haven't already. Second, specify the path to your Lib/site-packages. You can do this by either: Adding an argument to pathex in your .spec file (.venv for a virtual environment at some folder .venv in your local directory, or the absolute path to your global Python install Lib/site-packages if you're not using a virtual environment): pathex=['.venv/Lib/site-packages'] Specifying the path to Lib/site-packages from the command-line: pyinstaller --paths '.venv/Lib/site-packages' my_program.py From the pyinstaller docs pathex: a list of paths to search for imports (like using PYTHONPATH), including paths given by the --paths option. Some Python scripts import modules in ways that PyInstaller cannot detect: for example, by using the __import__() function with variable data, using importlib.import_module(), or manipulating the sys.path value at run time.
PyInstaller problem making exe files that using transformers and PyQt5 library
So I'm working on an AI project using huggingface library, and I need to convert it into an exe file. I'm using PyQt5 for the interface, and transformers and datasets library from huggingface. I tried using PyInstaller to convert it into an exe file, it does finish building the exe files of the project, but it gives me this error when I run the exe file: Traceback (most recent call last): File "transformers\utils\versions.py", line 105, in require_version File "importlib\metadata.py", line 530, in version File "importlib\metadata.py", line 503, in distribution File "importlib\metadata.py", line 177, in from_name importlib.metadata.PackageNotFoundError: tqdm During handling of the above exception, another exception occurred: Traceback (most recent call last): File "App.py", line 5, in <module> File "PyInstaller\loader\pyimod03_importers.py", line 476, in exec_module File "transformers\__init__.py", line 43, in <module> File "PyInstaller\loader\pyimod03_importers.py", line 476, in exec_module File "transformers\dependency_versions_check.py", line 41, in <module> File "transformers\utils\versions.py", line 120, in require_version_core File "transformers\utils\versions.py", line 107, in require_version importlib.metadata.PackageNotFoundError: The 'tqdm>=4.27' distribution was not found and is required by this application. Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git master [736] Failed to execute script 'App' due to unhandled exception! [process exited with code 1] Line 5 on my code was a line of code for importing the transformers library. ... 4| from PyQt5.QtCore import QThread, QObject, pyqtSignal 5| from transformers import AutoTokenizer, AutoModelForQuestionAnswering, pipeline ... ... And this is my .spec file: # -*- mode: python ; coding: utf-8 -*- block_cipher = None a = Analysis(['App.py'], pathex=[], binaries=[], datas=[ ('./resources/images/logo.png', '.'), ('./resources/model/config.json', '.'), ('./resources/model/pytorch_model.bin', '.'), ('./resources/model/special_tokens_map.json', '.'), ('./resources/model/tokenizer.json', '.'), ('./resources/model/tokenizer_config.json', '.'), ('./resources/model/vocab.txt', '.') ], hiddenimports=[], hookspath=[], hooksconfig={}, runtime_hooks=[], excludes=[], win_no_prefer_redirects=False, win_private_assemblies=False, cipher=block_cipher, noarchive=False) pyz = PYZ(a.pure, a.zipped_data, cipher=block_cipher) exe = EXE(pyz, a.scripts, [], exclude_binaries=True, name='App', debug=False, bootloader_ignore_signals=False, strip=False, upx=True, console=True, disable_windowed_traceback=False, target_arch=None, codesign_identity=None, entitlements_file=None , icon='logo.ico') coll = COLLECT(exe, a.binaries, a.zipfiles, a.datas, strip=False, upx=True, upx_exclude=[], name='App') I would really appreciate any help given, thanks :D
[ "First, pip install tqdm if you haven't already. Second, specify the path to your Lib/site-packages. You can do this by either:\n\nAdding an argument to pathex in your .spec file\n(.venv for a virtual environment at some folder .venv in your local directory, or the absolute path to your global Python install Lib/site-packages if you're not using a virtual environment):\n\npathex=['.venv/Lib/site-packages']\n\n\nSpecifying the path to Lib/site-packages from the command-line:\n\npyinstaller --paths '.venv/Lib/site-packages' my_program.py\n\nFrom the pyinstaller docs\n\npathex: a list of paths to search for imports (like using PYTHONPATH), including paths given by the --paths option.\n\nSome Python scripts import modules in ways that PyInstaller cannot detect: for example, by using the __import__() function with variable data, using importlib.import_module(), or manipulating the sys.path value at run time.\n" ]
[ 1 ]
[ "Hi is this the accepted answer still valid? I have the exact same problem but the solution doesn't work for me.\n" ]
[ -2 ]
[ "huggingface_transformers", "pyqt", "pyside", "python" ]
stackoverflow_0069874436_huggingface_transformers_pyqt_pyside_python.txt
Q: Toastr messages to show without refresh in flask-dash I am using Plotly-Dash in my Flask application to display some graphs. And have Toastr setup in the app to handle notifications. What I want to do, is that upon a dash button click, an event handler runs a function, and upon any error in the function, I flash the error, and expect that error to be thrown at me using Toastr messages in real time. But that does not happen, I get the toastr message displayed after I refresh my page. In the docs, there is the flash being called first and then the rendering process happens, anyone knows how to show flashes without new render? I followed the docs, and tried multiple things, but none of them seem to work. UPDATE: Just for more context, I am redirecting to a Dash app which has the following function: @dashapp.callback( [ ServersideOutput( "data-store", "data", backend=cache_backend, arg_check=False ), ServersideOutput( "availability-data-store", "data", backend=cache_backend, arg_check=False, ), ], Input("refresh-placeholder", "n_clicks"), prevent_initial_call=True, ) def _fetch_data(_n): temp_output = get_data(get_session_vinsight_token()) if not temp_output: print("flash?") flash( "No data loaded. Please reconnect to vinsight from your account page if the error continues.", "error", ) return [None, None] (df, availability_df) = temp_output return [df, availability_df] Here, I am just checking if I have an error, and if yes, I am trying to flash the error to the user. But as the docs say, and as how the code normally works, something like: @server_bp.route("/login", methods=["GET", "POST"]) def login(): form = LoginForm() if form.validate_on_submit(): email = form.email.data password = form.password.data remember_me = form.remember_me.data try: auth.login(email, password, remember_me) except Exception as e: flash("Couldn't signin, reason: {}".format(e)) return redirect("/login") Where the flash works due to return redirect("/login"), I want to flash my message from another file. Basically, the _fetch_data is in another file which is why I cannot use the redirect method. In my head, answer to any of the two will be a good workaround: Is there any way to force reload the page in Flask? Is there any way to show flash messages without reloading in Flask? A: It sounds like you are trying to use the Flask flash function to show a message to the user in real time, but the message is only being displayed after the page is refreshed. The reason this is happening is that the Flask flash function stores messages in the user's session, and those messages are only displayed to the user when the page is rendered. This means that if you call the flash function and then immediately return a response, the user will not see the flashed message until they refresh the page or navigate to a different page in your app. To show the flashed message in real time, you will need to trigger a new page render after calling the flash function. One way to do this is to use the Flask redirect function to redirect the user to the same page (or a different page) after calling the flash function. This will cause the page to be rendered again, and the flashed message will be displayed to the user. Here is an example of how you could use the redirect function to show a flashed message in real time: @dashapp.callback( [ ServersideOutput( "data-store", "data", backend=cache_backend, arg_check=False ), ServersideOutput( "availability-data-store", "data", backend=cache_backend, arg_check=False, ), ], Input("refresh-placeholder", "n_clicks"), prevent_initial_call=True, ) def _fetch_data(_n): temp_output = get_data(get_session_vinsight_token()) if not temp_output: flash( "No data loaded. Please reconnect to vinsight from your account page if the error continues.", "error", ) # Use the redirect function to redirect the user to the same page return redirect(request.url) # Alternatively, you can redirect the user to a different page # return redirect("/some-other-page") (df, availability_df) = temp_output return [df, availability_df] In this code, the _fetch_data function calls the flash function to store a message in the user's session if there is an error. Then, it uses the redirect function to redirect the user to the same page (using request.url). This will cause the page to be rendered again, and the flashed message will be displayed to the user in real time.
Toastr messages to show without refresh in flask-dash
I am using Plotly-Dash in my Flask application to display some graphs. And have Toastr setup in the app to handle notifications. What I want to do, is that upon a dash button click, an event handler runs a function, and upon any error in the function, I flash the error, and expect that error to be thrown at me using Toastr messages in real time. But that does not happen, I get the toastr message displayed after I refresh my page. In the docs, there is the flash being called first and then the rendering process happens, anyone knows how to show flashes without new render? I followed the docs, and tried multiple things, but none of them seem to work. UPDATE: Just for more context, I am redirecting to a Dash app which has the following function: @dashapp.callback( [ ServersideOutput( "data-store", "data", backend=cache_backend, arg_check=False ), ServersideOutput( "availability-data-store", "data", backend=cache_backend, arg_check=False, ), ], Input("refresh-placeholder", "n_clicks"), prevent_initial_call=True, ) def _fetch_data(_n): temp_output = get_data(get_session_vinsight_token()) if not temp_output: print("flash?") flash( "No data loaded. Please reconnect to vinsight from your account page if the error continues.", "error", ) return [None, None] (df, availability_df) = temp_output return [df, availability_df] Here, I am just checking if I have an error, and if yes, I am trying to flash the error to the user. But as the docs say, and as how the code normally works, something like: @server_bp.route("/login", methods=["GET", "POST"]) def login(): form = LoginForm() if form.validate_on_submit(): email = form.email.data password = form.password.data remember_me = form.remember_me.data try: auth.login(email, password, remember_me) except Exception as e: flash("Couldn't signin, reason: {}".format(e)) return redirect("/login") Where the flash works due to return redirect("/login"), I want to flash my message from another file. Basically, the _fetch_data is in another file which is why I cannot use the redirect method. In my head, answer to any of the two will be a good workaround: Is there any way to force reload the page in Flask? Is there any way to show flash messages without reloading in Flask?
[ "It sounds like you are trying to use the Flask flash function to show a message to the user in real time, but the message is only being displayed after the page is refreshed.\nThe reason this is happening is that the Flask flash function stores messages in the user's session, and those messages are only displayed to the user when the page is rendered. This means that if you call the flash function and then immediately return a response, the user will not see the flashed message until they refresh the page or navigate to a different page in your app.\nTo show the flashed message in real time, you will need to trigger a new page render after calling the flash function. One way to do this is to use the Flask redirect function to redirect the user to the same page (or a different page) after calling the flash function. This will cause the page to be rendered again, and the flashed message will be displayed to the user.\nHere is an example of how you could use the redirect function to show a flashed message in real time:\n@dashapp.callback(\n[\n ServersideOutput(\n \"data-store\", \"data\", backend=cache_backend, arg_check=False\n ),\n\n\n ServersideOutput(\n \"availability-data-store\",\n \"data\",\n backend=cache_backend,\n arg_check=False,\n ),\n ],\n Input(\"refresh-placeholder\", \"n_clicks\"),\n prevent_initial_call=True,\n)\n\ndef _fetch_data(_n):\n temp_output = get_data(get_session_vinsight_token())\n if not temp_output:\n flash(\n \"No data loaded. Please reconnect to vinsight from your account page if the error continues.\",\n \"error\",\n )\n # Use the redirect function to redirect the user to the same page\n return redirect(request.url)\n # Alternatively, you can redirect the user to a different page\n # return redirect(\"/some-other-page\")\n\n (df, availability_df) = temp_output\n return [df, availability_df]\n\nIn this code, the _fetch_data function calls the flash function to store a message in the user's session if there is an error. Then, it uses the redirect function to redirect the user to the same page (using request.url). This will cause the page to be rendered again, and the flashed message will be displayed to the user in real time.\n" ]
[ 0 ]
[]
[]
[ "flask", "notifications", "plotly_dash", "python", "toastr" ]
stackoverflow_0074666045_flask_notifications_plotly_dash_python_toastr.txt
Q: Function that extracts each unique character in a string Let's say that I have a string "rrkn". Is there a function in R that'll return a vector "r", "k", "n" (i.e. each unique character in the string)? A: If you want to make it slightly less cumbersome to type: uniqchars <- function(x) unique(strsplit(x, "")[[1]]) A: Another solution to use rawToChar(unique(charToRaw(x))). A: A stringr option is: library(stringr) str_unique(str_split_1("rrkn", ""))
Function that extracts each unique character in a string
Let's say that I have a string "rrkn". Is there a function in R that'll return a vector "r", "k", "n" (i.e. each unique character in the string)?
[ "If you want to make it slightly less cumbersome to type:\nuniqchars <- function(x) unique(strsplit(x, \"\")[[1]]) \n\n", "Another solution to use rawToChar(unique(charToRaw(x))).\n", "A stringr option is:\nlibrary(stringr)\nstr_unique(str_split_1(\"rrkn\", \"\"))\n\n" ]
[ 9, 2, 0 ]
[]
[]
[ "r" ]
stackoverflow_0031814548_r.txt
Q: Maximum calls exceeded somehow Today I (tried) created some code to create mcq questions. The code is supposed to generate a random codon, find its amino acid from the codon table/chart and display it on the document along with other (3) random wrong options. I want to make a mcq with 4 options (1 is correct rest are wrong). What I am trying to do below is: The computer will form a random sequence of 3 nucleotides (i.e.digits) using form() function. Variable formed will store a codon (eg. UCA, ACC etc.) which will be the question. Now I declared array arr which will store the correct answer at 0th position. Then I created a function generateWrongOptions() which will (is supposed to) add the other 3 dissimilar wrong answers to the array. What I tried to do here is that the function will declare a new amino acid (eg. Phe, Ile, Met etc.) which is stored as wrong and a new empty array arr2. The next loop is supposed to check if wrong is already present in arr or not; if it is not then it will push an element 'a' ('a' here doesn't has any meaning) in arr2, if it is then it won't. Now if will check if the arr length is equal to arr2 which simply means if the variable wrong is unique or not (or is duplicate). I wanted to create 4 options (1 was already present) hence I looped the code for i<3 times. I found better ways to do this same task online, but those were more advanced and I couldn't understand them. Hence I'd come with my own solution (best I could've guessed). const obj = { UUU:"Phe", UUC:"Phe", UUA:"Leu", UUG:"Leu", CUU:"Leu", CUC:"Leu", CUA:"Leu", CUG:"Leu", AUU:"Ile", AUC:"Ile", AUA:"Ile", AUG:"Met", GUU:"Val", GUC:"Val", GUA:"Val", GUG:"Val", /* - */ UCU:"Ser", UCC:"Ser", UCA:"Ser", UCG:"Ser", CCU:"Pro", CCC:"Pro", CCA:"Pro", CCG:"Pro", ACU:"Thr", ACC:"Thr", ACA:"Thr", ACG:"Thr", GCU:"Ala", GCC:"Ala", GCA:"Ala", GCG:"Ala", /* - */ UAU:"Tyr", UAC:"Tyr", UAA:"Stop", UAG:"Stop", CAU:"His", CAC:"His", CAA:"Gln", CAG:"Gln", AAU:"Asn", AAC:"Asn", AAA:"Lys", AAG:"Lys", GAU:"Asp", GAC:"Asp", GAA:"Glu", GAG:"Glu", /* - */ UGU:"Cys", UGC:"Cys", UGA:"Stop", UGG:"trp", CGU:"Arg", CGC:"Arg", CGA:"Arg", CGG:"Arg", AGU:"Ser", AGC:"Ser", AGA:"Arg", AGG:"Arg", GGU:"Gly", GGC:"Gly", GGA:"Gly", GGG:"Gly", }; const digit = ['U', 'C', 'A', 'G']; function x() { return Math.floor(Math.random()*4); }; function form() { return digit[x()]+digit[x()]+digit[x()] } let formed = form(); let arr = [obj[formed]]; function generateWrongOptions() { for (i = 0; i < 4; i++) { let wrong = obj[form()]; let arr2 = []; for (i = 0; i < arr.length; i++) { if (wrong!==arr[i]){ arr2.push('a'); }; if(arr2.length==arr.length){ arr.push(wrong) } else { generateWrongOptions() }; }; }; }; generateWrongOptions(); for (let n of arr) { console.log(n) } Console returns Maximum calls exceeded; On the other hand a similar code I wrote before creating this - as a guideline - to form an array of 4 different numbers works: function x() { return Math.floor(Math.random()*10) } let y = x(); let arr = [y]; function aa() { for (i = 0; i < 4; i++) { let z = x() let arr2 = [] for (i = 0; i < arr.length ; i++) { if (z!==arr[i]) {arr2.push('a')}; } if (arr2.length==arr.length) {arr.push(z)} else {aa()} }; }; aa(); console.log(arr); I think I can fix this code by declaring a new array of all the amino acids in the codon table (obj), but still want to know why the first code doesn't work while the latter does. A: I'm not so sure if I understand your code correctly. However, I can see that you have two for loops in which you forgot to create a new variable: you used "for (i .... )" , and you forgot to say "for (let i ..... )". Another issue i noticed is the redeclaration of "arr" in the last function, which I found weird since you already declared it outside of the function scope. In addition, there is an "arr2" that was also not declared with "let" or "var" words.
Maximum calls exceeded somehow
Today I (tried) created some code to create mcq questions. The code is supposed to generate a random codon, find its amino acid from the codon table/chart and display it on the document along with other (3) random wrong options. I want to make a mcq with 4 options (1 is correct rest are wrong). What I am trying to do below is: The computer will form a random sequence of 3 nucleotides (i.e.digits) using form() function. Variable formed will store a codon (eg. UCA, ACC etc.) which will be the question. Now I declared array arr which will store the correct answer at 0th position. Then I created a function generateWrongOptions() which will (is supposed to) add the other 3 dissimilar wrong answers to the array. What I tried to do here is that the function will declare a new amino acid (eg. Phe, Ile, Met etc.) which is stored as wrong and a new empty array arr2. The next loop is supposed to check if wrong is already present in arr or not; if it is not then it will push an element 'a' ('a' here doesn't has any meaning) in arr2, if it is then it won't. Now if will check if the arr length is equal to arr2 which simply means if the variable wrong is unique or not (or is duplicate). I wanted to create 4 options (1 was already present) hence I looped the code for i<3 times. I found better ways to do this same task online, but those were more advanced and I couldn't understand them. Hence I'd come with my own solution (best I could've guessed). const obj = { UUU:"Phe", UUC:"Phe", UUA:"Leu", UUG:"Leu", CUU:"Leu", CUC:"Leu", CUA:"Leu", CUG:"Leu", AUU:"Ile", AUC:"Ile", AUA:"Ile", AUG:"Met", GUU:"Val", GUC:"Val", GUA:"Val", GUG:"Val", /* - */ UCU:"Ser", UCC:"Ser", UCA:"Ser", UCG:"Ser", CCU:"Pro", CCC:"Pro", CCA:"Pro", CCG:"Pro", ACU:"Thr", ACC:"Thr", ACA:"Thr", ACG:"Thr", GCU:"Ala", GCC:"Ala", GCA:"Ala", GCG:"Ala", /* - */ UAU:"Tyr", UAC:"Tyr", UAA:"Stop", UAG:"Stop", CAU:"His", CAC:"His", CAA:"Gln", CAG:"Gln", AAU:"Asn", AAC:"Asn", AAA:"Lys", AAG:"Lys", GAU:"Asp", GAC:"Asp", GAA:"Glu", GAG:"Glu", /* - */ UGU:"Cys", UGC:"Cys", UGA:"Stop", UGG:"trp", CGU:"Arg", CGC:"Arg", CGA:"Arg", CGG:"Arg", AGU:"Ser", AGC:"Ser", AGA:"Arg", AGG:"Arg", GGU:"Gly", GGC:"Gly", GGA:"Gly", GGG:"Gly", }; const digit = ['U', 'C', 'A', 'G']; function x() { return Math.floor(Math.random()*4); }; function form() { return digit[x()]+digit[x()]+digit[x()] } let formed = form(); let arr = [obj[formed]]; function generateWrongOptions() { for (i = 0; i < 4; i++) { let wrong = obj[form()]; let arr2 = []; for (i = 0; i < arr.length; i++) { if (wrong!==arr[i]){ arr2.push('a'); }; if(arr2.length==arr.length){ arr.push(wrong) } else { generateWrongOptions() }; }; }; }; generateWrongOptions(); for (let n of arr) { console.log(n) } Console returns Maximum calls exceeded; On the other hand a similar code I wrote before creating this - as a guideline - to form an array of 4 different numbers works: function x() { return Math.floor(Math.random()*10) } let y = x(); let arr = [y]; function aa() { for (i = 0; i < 4; i++) { let z = x() let arr2 = [] for (i = 0; i < arr.length ; i++) { if (z!==arr[i]) {arr2.push('a')}; } if (arr2.length==arr.length) {arr.push(z)} else {aa()} }; }; aa(); console.log(arr); I think I can fix this code by declaring a new array of all the amino acids in the codon table (obj), but still want to know why the first code doesn't work while the latter does.
[ "I'm not so sure if I understand your code correctly. However, I can see that you have two for loops in which you forgot to create a new variable:\nyou used \"for (i .... )\" , and you forgot to say \"for (let i ..... )\". Another issue i noticed is the redeclaration of \"arr\" in the last function, which I found weird since you already declared it outside of the function scope. In addition, there is an \"arr2\" that was also not declared with \"let\" or \"var\" words.\n" ]
[ 1 ]
[]
[]
[ "function", "javascript" ]
stackoverflow_0074679526_function_javascript.txt
Q: Matrix with the diagonal set to 0, and the rest of the elements set from 1 to n (in C) Firstly, a number N has to be input, and the matrix is of NxN dimensions then. The diagonal of the matrix has to be all 0, the elements of the matrix above the diagonal have to be from 1 to N diagonally, and the elements under the diagonal need to be filled from -1 to -N also diagonally. It should be something like this (if N=5): But the problem that I have is that I print it out like this: and I don't know how to fix it. This is the code that I have: `#include <stdio.h> int main() { int matrix[50][50], i, j, N; printf("N: "); scanf("%d",&N); int k=0; for(i=0; i<N; i++){ for(j=0; j<N; j++){ if(i==j){ matrix[i][j]=0; } else if(j>i && i!=j){ for(k=0; k<N; k++){ matrix[k-1][j]=k; } } else if(j<i && i!=j){ for(k=0; k<N; k++){ matrix[i][k-1]=-k; } } } } printf("Matrix:\n"); for (i=0; i<N; i++) { for (j=0; j<N; j++) printf("%4d", matrix[i][j]); printf("\n"); } return 0; }` I would really appreciate the help. A: Here is you code modified, notice that 3 inner loops are removed with only one line. second, you ask for the number N, however due to statically initialisation to maximum 50, you should as well verify that it is not. otherwise segmentation fault will happen. or if you want to allow N >50 then better to do dynamic allocation on matrix. #include <stdio.h> int main() { int matrix[50][50], i, j, N; printf("N: "); scanf("%d", &N); if (N > 50){ printf("N should be smaller than 50, \n"); N = 50; } for(i=0; i<N; i++){ for(j=0; j<N; j++){ matrix[i][j]= j - i; } } printf("Matrix:\n"); for (i=0; i<N; i++) { for (j=0; j<N; j++) printf("%4d", matrix[i][j]); printf("\n"); } return 0; }
Matrix with the diagonal set to 0, and the rest of the elements set from 1 to n (in C)
Firstly, a number N has to be input, and the matrix is of NxN dimensions then. The diagonal of the matrix has to be all 0, the elements of the matrix above the diagonal have to be from 1 to N diagonally, and the elements under the diagonal need to be filled from -1 to -N also diagonally. It should be something like this (if N=5): But the problem that I have is that I print it out like this: and I don't know how to fix it. This is the code that I have: `#include <stdio.h> int main() { int matrix[50][50], i, j, N; printf("N: "); scanf("%d",&N); int k=0; for(i=0; i<N; i++){ for(j=0; j<N; j++){ if(i==j){ matrix[i][j]=0; } else if(j>i && i!=j){ for(k=0; k<N; k++){ matrix[k-1][j]=k; } } else if(j<i && i!=j){ for(k=0; k<N; k++){ matrix[i][k-1]=-k; } } } } printf("Matrix:\n"); for (i=0; i<N; i++) { for (j=0; j<N; j++) printf("%4d", matrix[i][j]); printf("\n"); } return 0; }` I would really appreciate the help.
[ "Here is you code modified, notice that 3 inner loops are removed with only one line.\nsecond, you ask for the number N, however due to statically initialisation to maximum 50, you should as well verify that it is not. otherwise segmentation fault will happen.\nor if you want to allow N >50 then better to do dynamic allocation on matrix.\n#include <stdio.h>\n\nint main() {\n int matrix[50][50], i, j, N;\n printf(\"N: \");\n scanf(\"%d\", &N);\n if (N > 50){\n printf(\"N should be smaller than 50, \\n\");\n N = 50;\n }\n\n for(i=0; i<N; i++){\n for(j=0; j<N; j++){\n matrix[i][j]= j - i;\n }\n }\n\n printf(\"Matrix:\\n\");\n for (i=0; i<N; i++) {\n for (j=0; j<N; j++)\n printf(\"%4d\", matrix[i][j]);\n printf(\"\\n\");\n }\n return 0;\n}\n\n" ]
[ 1 ]
[]
[]
[ "c", "diagonal", "matrix" ]
stackoverflow_0074680012_c_diagonal_matrix.txt
Q: Algorithm to create a singly linked list I have an algorithm that should create a singly linked list from a textbook. It barely touched on any examples, so I would need some help figuring it out (still new to C.) Essentially, the algorithm runs as follows: Algorithm: CREATE (HEAD, ITEM) 1. [Create NEW node] a) Allocate memory for NEW node. b) IF NEW = NULL then Print: “Memory not Available” and Return c) Set NEW→DATA = ITEM d) Set NEW→LINK = NULL 2. [Whether List is empty, head is the content of HEADER] If HEAD = NULL then Set HEAD = NEW 3. Else a) Set Temp = HEAD b) While Temp→LINK ≠ NULL do Set Temp = Temp→LINK [End of while] c) Set Temp→LINK = NEW [End of IF] 4. Return Here is what I have tried so far, but I could not understand the arrow mappings in the algorithm. Are these existing C features? #include <stdlib.h> #include <stdio.h> typedef struct node { int DATA; struct node * LINK; } node_t; node_t *create(int head, int item){ node_t* new = NULL; new = (node_t *)malloc(5*sizeof(node_t)); if(new == NULL){ prtinf("Memory not available"); return -1; } } A: The arrow operator (->) in the algorithm is used to access a member of a struct using a pointer to that struct. It is equivalent to using the dot operator (.) to access a member of a struct, but with the added step of dereferencing the pointer to the struct. For example, if p is a pointer to a struct node, then p->DATA is equivalent to (*p).DATA. In your code, you are using the node_t typedef to define the struct node type. This means that you can use node_t * to refer to a pointer to a struct node, and you can use node_t->DATA to access the DATA member of a struct node pointed to by node_t. It looks like you are trying to implement the first step of the algorithm, which is to allocate memory for the new node and store the ITEM value in the DATA member of the node. You are using malloc to allocate memory for the new node, but you need to initialize the DATA member and the LINK member of the node. You can do this by using the arrow operator to access the members of the node: node_t *create(int head, int item){ node_t* new = NULL; new = (node_t *)malloc(sizeof(node_t)); if(new == NULL){ printf("Memory not available"); return -1; } new->DATA = item; // Set the DATA member of the new node new->LINK = NULL; // Set the LINK member of the new node to NULL } You also need to return the new node from the create function so that you can use it later in the algorithm. node_t *create(int head, int item){ node_t* new = NULL; new = (node_t *)malloc(sizeof(node_t)); if(new == NULL){ printf("Memory not available"); return -1; } new->DATA = item; // Set the DATA member of the new node new->LINK = NULL; // Set the LINK member of the new node to NULL return new; // Return the new node } You can then use the create function to create a new node and store it in a local variable in your main program. This will allow you to continue implementing the rest of the algorithm. int main() { int head = 0; // The head of the linked list int item = 5; // The data to store in the new node node_t *new_node = create(head, item); // Create a new node // Continue implementing the rest of the algorithm... return 0; }
Algorithm to create a singly linked list
I have an algorithm that should create a singly linked list from a textbook. It barely touched on any examples, so I would need some help figuring it out (still new to C.) Essentially, the algorithm runs as follows: Algorithm: CREATE (HEAD, ITEM) 1. [Create NEW node] a) Allocate memory for NEW node. b) IF NEW = NULL then Print: “Memory not Available” and Return c) Set NEW→DATA = ITEM d) Set NEW→LINK = NULL 2. [Whether List is empty, head is the content of HEADER] If HEAD = NULL then Set HEAD = NEW 3. Else a) Set Temp = HEAD b) While Temp→LINK ≠ NULL do Set Temp = Temp→LINK [End of while] c) Set Temp→LINK = NEW [End of IF] 4. Return Here is what I have tried so far, but I could not understand the arrow mappings in the algorithm. Are these existing C features? #include <stdlib.h> #include <stdio.h> typedef struct node { int DATA; struct node * LINK; } node_t; node_t *create(int head, int item){ node_t* new = NULL; new = (node_t *)malloc(5*sizeof(node_t)); if(new == NULL){ prtinf("Memory not available"); return -1; } }
[ "The arrow operator (->) in the algorithm is used to access a member of a struct using a pointer to that struct. It is equivalent to using the dot operator (.) to access a member of a struct, but with the added step of dereferencing the pointer to the struct. For example, if p is a pointer to a struct node, then p->DATA is equivalent to (*p).DATA.\nIn your code, you are using the node_t typedef to define the struct node type. This means that you can use node_t * to refer to a pointer to a struct node, and you can use node_t->DATA to access the DATA member of a struct node pointed to by node_t.\nIt looks like you are trying to implement the first step of the algorithm, which is to allocate memory for the new node and store the ITEM value in the DATA member of the node. You are using malloc to allocate memory for the new node, but you need to initialize the DATA member and the LINK member of the node. You can do this by using the arrow operator to access the members of the node:\nnode_t *create(int head, int item){\n node_t* new = NULL;\n new = (node_t *)malloc(sizeof(node_t));\n if(new == NULL){\n printf(\"Memory not available\");\n return -1;\n }\n\n new->DATA = item; // Set the DATA member of the new node\n new->LINK = NULL; // Set the LINK member of the new node to NULL\n}\n\n\nYou also need to return the new node from the create function so that you can use it later in the algorithm.\nnode_t *create(int head, int item){\n node_t* new = NULL;\n new = (node_t *)malloc(sizeof(node_t));\n if(new == NULL){\n printf(\"Memory not available\");\n return -1;\n }\n\n new->DATA = item; // Set the DATA member of the new node\n new->LINK = NULL; // Set the LINK member of the new node to NULL\n\n return new; // Return the new node\n}\n\n\nYou can then use the create function to create a new node and store it in a local variable in your main program. This will allow you to continue implementing the rest of the algorithm.\nint main() {\n int head = 0; // The head of the linked list\n int item = 5; // The data to store in the new node\n\n node_t *new_node = create(head, item); // Create a new node\n\n // Continue implementing the rest of the algorithm...\n\n return 0;\n}\n\n\n" ]
[ 2 ]
[]
[]
[ "algorithm", "c" ]
stackoverflow_0074680048_algorithm_c.txt
Q: How to improve Julia's performance using just in time compilation (JIT) I have been playing with JAX (automatic differentiation library in Python) and Zygote (the automatic differentiation library in Julia) to implement Gauss-Newton minimisation method. I came upon the @jit macro in Jax that runs my Python code in around 0.6 seconds compared to ~60 seconds for the version that does not use @jit. Julia ran the code in around 40 seconds. Is there an equivalent of @jit in Julia or Zygote that results is a better performance? Here are the codes I used: Python from jax import grad, jit, jacfwd import jax.numpy as jnp import numpy as np import time def gaussian(x, params): amp = params[0] mu = params[1] sigma = params[2] amplitude = amp/(jnp.abs(sigma)*jnp.sqrt(2*np.pi)) arg = ((x-mu)/sigma) return amplitude*jnp.exp(-0.5*(arg**2)) def myjacobian(x, params): return jacfwd(gaussian, argnums = 1)(x, params) def op(jac): return jnp.matmul( jnp.linalg.inv(jnp.matmul(jnp.transpose(jac),jac)), jnp.transpose(jac)) def res(x, data, params): return data - gaussian(x, params) @jit def step(x, data, params): residuals = res(x, data, params) jacobian_operation = op(myjacobian(x, params)) temp = jnp.matmul(jacobian_operation, residuals) return params + temp N = 2000 x = np.linspace(start = -100, stop = 100, num= N) data = gaussian(x, [5.65, 25.5, 37.23]) ini = jnp.array([0.9, 5., 5.0]) t1 = time.time() for i in range(5000): ini = step(x, data, ini) t2 = time.time() print('t2-t1: ', t2-t1) ini Julia using Zygote function gaussian(x::Union{Vector{Float64}, Float64}, params::Vector{Float64}) amp = params[1] mu = params[2] sigma = params[3] amplitude = amp/(abs(sigma)*sqrt(2*pi)) arg = ((x.-mu)./sigma) return amplitude.*exp.(-0.5.*(arg.^2)) end function myjacobian(x::Vector{Float64}, params::Vector{Float64}) output = zeros(length(x), length(params)) for (index, ele) in enumerate(x) output[index,:] = collect(gradient((params)->gaussian(ele, params), params))[1] end return output end function op(jac::Matrix{Float64}) return inv(jac'*jac)*jac' end function res(x::Vector{Float64}, data::Vector{Float64}, params::Vector{Float64}) return data - gaussian(x, params) end function step(x::Vector{Float64}, data::Vector{Float64}, params::Vector{Float64}) residuals = res(x, data, params) jacobian_operation = op(myjacobian(x, params)) temp = jacobian_operation*residuals return params + temp end N = 2000 x = collect(range(start = -100, stop = 100, length= N)) params = vec([5.65, 25.5, 37.23]) data = gaussian(x, params) ini = vec([0.9, 5., 5.0]) @time for i in range(start = 1, step = 1, length = 5000) ini = step(x, data, ini) end ini A: Your Julia code doing a number of things that aren't idiomatic and are worsening your performance. This won't be a full overview, but it should give you a good idea to start. The first thing is passing params as a Vector is a bad idea. This means it will have to be heap allocated, and the compiler doesn't know how long it is. Instead, use a Tuple which will allow for a lot more optimization. Secondly, don't make gaussian act on a Vector of xs. Instead, write the scalar version and broadcast it. Specifically, with these changes, you will have function gaussian(x::Number, params::NTuple{3, Float64}) amp, mu, sigma = params # The next 2 lines should probably be done outside this function, but I'll leave them here for now. amplitude = amp/(abs(sigma)*sqrt(2*pi)) arg = ((x-mu)/sigma) return amplitude*exp(-0.5*(arg^2)) end A: One straightforward way to speed this up is to use ForwardDiff not Zygote, since you are taking a gradient of a vector of length 3, many times. Here this gets me from 16 to 3.5 seconds, with the last factor of 2 involving Chunk(3) to improve type-stability. Perhaps this can be improved further. function myjacobian(x::Vector, params) # return rand(eltype(x), length(x), length(params)) # with no gradient, takes 0.5s output = zeros(eltype(x), length(x), length(params)) config = ForwardDiff.GradientConfig(nothing, params, ForwardDiff.Chunk(3)) for (i, xi) in enumerate(x) # grad = gradient(p->gaussian(xi, p), params)[1] # original, takes 16s # grad = ForwardDiff.gradient(p-> gaussian(xi, p)) # ForwardDiff, takes 7s grad = ForwardDiff.gradient(p-> gaussian(xi, p), params, config) # takes 3.5s copyto!(view(output,i,:), grad) # this allows params::Tuple, OK for Zygote, no help end return output end # This needs gaussian.(x, Ref(params)) elsewhere to use on many x, same params function gaussian(x::Real, params) # amp, mu, sigma = params # with params::Vector this is slower, 19 sec amp = params[1] mu = params[2] sigma = params[3] # like this, 16 sec T = typeof(x) # avoids having (2*pi)::Float64 promote everything amplitude = amp/(abs(sigma)*sqrt(2*T(pi))) arg = (x-mu)/sigma return amplitude * exp(-(arg^2)/2) end However, this is still computing many small gradient arrays in a loop. It could easily compute one big gradient array instead. While in general Julia is happy to compile loops to something fast, loops that make individual arrays tend to be a bad idea. And this is especially true for Zygote, which is fastest on matlab-ish whole-array code. Here's how this looks, it gets me under 1s for the whole program: function gaussian(x::Real, amp::Real, mu::Real, sigma::Real) T = typeof(x) amplitude = amp/(abs(sigma)*sqrt(2*T(pi))) arg = (x-mu)/sigma return amplitude * exp(-(arg^2)/2) end function myjacobian2(x::Vector, params) # with this, 0.9s amp = fill(params[1], length(x)) mu = fill(params[2], length(x)) sigma = fill(params[3], length(x)) # use same sigma & different x value at each row: grads = gradient((amp, mu, sigma) -> sum(gaussian.(x, amp, mu, sigma)), amp, mu, sigma) hcat(grads...) end # Check that it agrees: myjacobian2(x, params) ≈ myjacobian(x, params) While this has little effect on the speed, I think you probably also want op(jac::Matrix) = Hermitian(jac'*jac) \ jac' rather than inv.
How to improve Julia's performance using just in time compilation (JIT)
I have been playing with JAX (automatic differentiation library in Python) and Zygote (the automatic differentiation library in Julia) to implement Gauss-Newton minimisation method. I came upon the @jit macro in Jax that runs my Python code in around 0.6 seconds compared to ~60 seconds for the version that does not use @jit. Julia ran the code in around 40 seconds. Is there an equivalent of @jit in Julia or Zygote that results is a better performance? Here are the codes I used: Python from jax import grad, jit, jacfwd import jax.numpy as jnp import numpy as np import time def gaussian(x, params): amp = params[0] mu = params[1] sigma = params[2] amplitude = amp/(jnp.abs(sigma)*jnp.sqrt(2*np.pi)) arg = ((x-mu)/sigma) return amplitude*jnp.exp(-0.5*(arg**2)) def myjacobian(x, params): return jacfwd(gaussian, argnums = 1)(x, params) def op(jac): return jnp.matmul( jnp.linalg.inv(jnp.matmul(jnp.transpose(jac),jac)), jnp.transpose(jac)) def res(x, data, params): return data - gaussian(x, params) @jit def step(x, data, params): residuals = res(x, data, params) jacobian_operation = op(myjacobian(x, params)) temp = jnp.matmul(jacobian_operation, residuals) return params + temp N = 2000 x = np.linspace(start = -100, stop = 100, num= N) data = gaussian(x, [5.65, 25.5, 37.23]) ini = jnp.array([0.9, 5., 5.0]) t1 = time.time() for i in range(5000): ini = step(x, data, ini) t2 = time.time() print('t2-t1: ', t2-t1) ini Julia using Zygote function gaussian(x::Union{Vector{Float64}, Float64}, params::Vector{Float64}) amp = params[1] mu = params[2] sigma = params[3] amplitude = amp/(abs(sigma)*sqrt(2*pi)) arg = ((x.-mu)./sigma) return amplitude.*exp.(-0.5.*(arg.^2)) end function myjacobian(x::Vector{Float64}, params::Vector{Float64}) output = zeros(length(x), length(params)) for (index, ele) in enumerate(x) output[index,:] = collect(gradient((params)->gaussian(ele, params), params))[1] end return output end function op(jac::Matrix{Float64}) return inv(jac'*jac)*jac' end function res(x::Vector{Float64}, data::Vector{Float64}, params::Vector{Float64}) return data - gaussian(x, params) end function step(x::Vector{Float64}, data::Vector{Float64}, params::Vector{Float64}) residuals = res(x, data, params) jacobian_operation = op(myjacobian(x, params)) temp = jacobian_operation*residuals return params + temp end N = 2000 x = collect(range(start = -100, stop = 100, length= N)) params = vec([5.65, 25.5, 37.23]) data = gaussian(x, params) ini = vec([0.9, 5., 5.0]) @time for i in range(start = 1, step = 1, length = 5000) ini = step(x, data, ini) end ini
[ "Your Julia code doing a number of things that aren't idiomatic and are worsening your performance. This won't be a full overview, but it should give you a good idea to start.\nThe first thing is passing params as a Vector is a bad idea. This means it will have to be heap allocated, and the compiler doesn't know how long it is. Instead, use a Tuple which will allow for a lot more optimization. Secondly, don't make gaussian act on a Vector of xs. Instead, write the scalar version and broadcast it. Specifically, with these changes, you will have\nfunction gaussian(x::Number, params::NTuple{3, Float64})\n amp, mu, sigma = params\n \n # The next 2 lines should probably be done outside this function, but I'll leave them here for now.\n amplitude = amp/(abs(sigma)*sqrt(2*pi))\n arg = ((x-mu)/sigma)\n return amplitude*exp(-0.5*(arg^2))\nend\n\n", "One straightforward way to speed this up is to use ForwardDiff not Zygote, since you are taking a gradient of a vector of length 3, many times. Here this gets me from 16 to 3.5 seconds, with the last factor of 2 involving Chunk(3) to improve type-stability. Perhaps this can be improved further.\nfunction myjacobian(x::Vector, params)\n # return rand(eltype(x), length(x), length(params)) # with no gradient, takes 0.5s\n output = zeros(eltype(x), length(x), length(params))\n config = ForwardDiff.GradientConfig(nothing, params, ForwardDiff.Chunk(3))\n for (i, xi) in enumerate(x)\n # grad = gradient(p->gaussian(xi, p), params)[1] # original, takes 16s\n # grad = ForwardDiff.gradient(p-> gaussian(xi, p)) # ForwardDiff, takes 7s\n grad = ForwardDiff.gradient(p-> gaussian(xi, p), params, config) # takes 3.5s\n copyto!(view(output,i,:), grad) # this allows params::Tuple, OK for Zygote, no help\n end\n return output\nend\n# This needs gaussian.(x, Ref(params)) elsewhere to use on many x, same params\nfunction gaussian(x::Real, params)\n # amp, mu, sigma = params # with params::Vector this is slower, 19 sec\n amp = params[1]\n mu = params[2]\n sigma = params[3] # like this, 16 sec\n T = typeof(x) # avoids having (2*pi)::Float64 promote everything\n amplitude = amp/(abs(sigma)*sqrt(2*T(pi)))\n arg = (x-mu)/sigma\n return amplitude * exp(-(arg^2)/2)\nend\n\nHowever, this is still computing many small gradient arrays in a loop. It could easily compute one big gradient array instead.\nWhile in general Julia is happy to compile loops to something fast, loops that make individual arrays tend to be a bad idea. And this is especially true for Zygote, which is fastest on matlab-ish whole-array code.\nHere's how this looks, it gets me under 1s for the whole program:\nfunction gaussian(x::Real, amp::Real, mu::Real, sigma::Real)\n T = typeof(x)\n amplitude = amp/(abs(sigma)*sqrt(2*T(pi)))\n arg = (x-mu)/sigma\n return amplitude * exp(-(arg^2)/2)\nend\nfunction myjacobian2(x::Vector, params) # with this, 0.9s\n amp = fill(params[1], length(x))\n mu = fill(params[2], length(x))\n sigma = fill(params[3], length(x)) # use same sigma & different x value at each row:\n grads = gradient((amp, mu, sigma) -> sum(gaussian.(x, amp, mu, sigma)), amp, mu, sigma)\n hcat(grads...)\nend\n# Check that it agrees:\nmyjacobian2(x, params) ≈ myjacobian(x, params)\n\nWhile this has little effect on the speed, I think you probably also want op(jac::Matrix) = Hermitian(jac'*jac) \\ jac' rather than inv.\n" ]
[ 4, 2 ]
[]
[]
[ "jax", "julia", "optimization", "python" ]
stackoverflow_0074678931_jax_julia_optimization_python.txt
Q: Forgot to migrate free tier Postgres and now my app is empty My project is again live no problems whatsoever, but the database is empty. I am either trying to restore an old backup to the new Postgres instance or upload a backup from my local disk. I tried to do pg:restore but I don’t know where to grab the backup (from the free Heroku tier). A: I got the answer I was looking for by going into some of the most hidden help articles in heroku. https://help.heroku.com/QG1W7LIJ/how-do-i-restore-a-partial-backup-or-single-table-to-heroku-postgres pg_restore --verbose --clean --no-acl --no-owner -h localhost -U YOUR_USERNAME-d CONNECTION_URL mydb.dump Had to recreate the backup from the local imported database but at the end it worked flawlessly.
Forgot to migrate free tier Postgres and now my app is empty
My project is again live no problems whatsoever, but the database is empty. I am either trying to restore an old backup to the new Postgres instance or upload a backup from my local disk. I tried to do pg:restore but I don’t know where to grab the backup (from the free Heroku tier).
[ "I got the answer I was looking for by going into some of the most hidden help articles in heroku.\nhttps://help.heroku.com/QG1W7LIJ/how-do-i-restore-a-partial-backup-or-single-table-to-heroku-postgres\npg_restore --verbose --clean --no-acl --no-owner -h localhost -U YOUR_USERNAME-d CONNECTION_URL mydb.dump\n\nHad to recreate the backup from the local imported database but at the end it worked flawlessly.\n" ]
[ 0 ]
[]
[]
[ "heroku", "heroku_postgres" ]
stackoverflow_0074679580_heroku_heroku_postgres.txt
Q: Firebase events as conversions in Google Ads not working After importing our Firebase app events in Google Ads as conversions, their status stays at "no recent conversions". The events are recordeding fine in Firebase. Both our native iOS and Android app are implemented, but none show conversions in Google Ads The package name of one of our apps was updated We unlinked Google Ads from Firebase and linked it again, but that didn't work either We are talking about custom events (an in-app action) and native events (like first open) It seems like we can't really delete conversions. We can delete and re-enable, but can't "start over". How can we make recording our events as conversions work? A: The answer was hiding in the comment made by @Akif Demirezen. On Firebase, go to project settings Under "Your apps" section, make sure to fill the "App Store ID" field, you can find this information on your App Store connect page, under Apple ID
Firebase events as conversions in Google Ads not working
After importing our Firebase app events in Google Ads as conversions, their status stays at "no recent conversions". The events are recordeding fine in Firebase. Both our native iOS and Android app are implemented, but none show conversions in Google Ads The package name of one of our apps was updated We unlinked Google Ads from Firebase and linked it again, but that didn't work either We are talking about custom events (an in-app action) and native events (like first open) It seems like we can't really delete conversions. We can delete and re-enable, but can't "start over". How can we make recording our events as conversions work?
[ "The answer was hiding in the comment made by @Akif Demirezen.\nOn Firebase, go to project settings \n\nUnder \"Your apps\" section, make sure to fill the \"App Store ID\" field, you can find this information on your App Store connect page, under Apple ID\n\n\n" ]
[ 5 ]
[ "I have the same issue :(\nTracking working on Firebase, but not sending it to Google Ads.\nI have checked the APP ID on Firebase, double checked if both are connected and contacted GA support who just told me they can't see anything wrong.\n" ]
[ -1 ]
[ "firebase", "google_ads_api" ]
stackoverflow_0064461654_firebase_google_ads_api.txt
Q: This version of µWS is not compatible with your Node.js build: Error: node-loader. while running "truffle console" I'm working on a Blockchain project where I use Truffle. when I run the Truffle Console command I face this issue. Complete Log: This version of µWS is not compatible with your Node.js build: Error: node-loader: Error: The specified module could not be found. C:\Users\sudda\AppData\Roaming\npm\node_modules\truffle\node_modules\ganache\dist\node/3jj9vE3p.node Falling back to a NodeJS implementation; performance may be degraded. Package.json { "name": "nft-marketplace", "version": "0.1.0", "description": "An NFT Marketplace", "author": "clarionnorth@gmail.com", "dependencies": { "@metamask/detect-provider": "^1.2.0", "@openzeppelin/contracts": "^4.3.1", "babel-polyfill": "6.26.0", "babel-preset-env": "1.7.0", "babel-preset-es2015": "6.24.1", "babel-preset-stage-2": "6.24.1", "babel-preset-stage-3": "6.24.1", "babel-register": "6.26.0", "bootstrap": "4.3.1", "chai": "4.2.0", "chai-as-promised": "7.1.1", "chai-bignumber": "3.0.0", "mdb-react-ui-kit": "^1.3.0", "mdb-ui-kit": "^3.9.0", "react": "16.8.4", "react-bootstrap": "1.0.0-beta.5", "react-dom": "16.8.4", "react-scripts": "2.1.3", "truffle": "5.0.5", "web3": "1.0.0-beta.55" }, "scripts": { "start": "react-scripts start", "build": "react-scripts build", "test": "react-scripts test", "eject": "react-scripts eject" }, "eslintConfig": { "extends": "react-app" }, "browserslist": [ ">0.2%", "not dead", "not ie <= 11", "not op_mini all" ] } My node version is 16.14.0. How I can remove this error/warning. A: This happened to me with v19.1.0 while I run truffle deploy --network goerli I switched to nvm use --lts Now using node v18.12.1 (npm v8.19.2) problem is resolved
This version of µWS is not compatible with your Node.js build: Error: node-loader. while running "truffle console"
I'm working on a Blockchain project where I use Truffle. when I run the Truffle Console command I face this issue. Complete Log: This version of µWS is not compatible with your Node.js build: Error: node-loader: Error: The specified module could not be found. C:\Users\sudda\AppData\Roaming\npm\node_modules\truffle\node_modules\ganache\dist\node/3jj9vE3p.node Falling back to a NodeJS implementation; performance may be degraded. Package.json { "name": "nft-marketplace", "version": "0.1.0", "description": "An NFT Marketplace", "author": "clarionnorth@gmail.com", "dependencies": { "@metamask/detect-provider": "^1.2.0", "@openzeppelin/contracts": "^4.3.1", "babel-polyfill": "6.26.0", "babel-preset-env": "1.7.0", "babel-preset-es2015": "6.24.1", "babel-preset-stage-2": "6.24.1", "babel-preset-stage-3": "6.24.1", "babel-register": "6.26.0", "bootstrap": "4.3.1", "chai": "4.2.0", "chai-as-promised": "7.1.1", "chai-bignumber": "3.0.0", "mdb-react-ui-kit": "^1.3.0", "mdb-ui-kit": "^3.9.0", "react": "16.8.4", "react-bootstrap": "1.0.0-beta.5", "react-dom": "16.8.4", "react-scripts": "2.1.3", "truffle": "5.0.5", "web3": "1.0.0-beta.55" }, "scripts": { "start": "react-scripts start", "build": "react-scripts build", "test": "react-scripts test", "eject": "react-scripts eject" }, "eslintConfig": { "extends": "react-app" }, "browserslist": [ ">0.2%", "not dead", "not ie <= 11", "not op_mini all" ] } My node version is 16.14.0. How I can remove this error/warning.
[ "This happened to me with v19.1.0 while I run\ntruffle deploy --network goerli\n\nI switched to\nnvm use --lts\n\nNow using node v18.12.1 (npm v8.19.2)\n\nproblem is resolved\n" ]
[ 0 ]
[]
[]
[ "ganache", "nft", "node.js", "truffle" ]
stackoverflow_0071081725_ganache_nft_node.js_truffle.txt
Q: Why protected member are not accessible in a different package , when we create an object of Superclass package p1; class A{ protected void fun(){ } //////////////////////////////////////////////////// package p2; import p1.*; class B extends A{ A a = new A(); a.fun() ; // This is not accessible B b = new B(); b.fun(); // This is accessible } So , when we create an object of A, it doesn't work . We have to create an Object of B to access fun(). Can anyone please explain? A: In object-oriented programming, the protected access modifier is used to indicate that a member (such as a method or a property) can only be accessed from within the same class or from a subclass of that class. This means that if a method is marked as protected, it will not be accessible to code outside of the class or its subclasses. In the code example you provided, the fun() method is marked as protected, which means that it is only accessible from within the A class or from any of its subclasses. When you try to access the fun() method from the p2 package using an object of the A class, it does not work because the fun() method is not accessible from outside the A class or its subclasses. To access the fun() method, you need to create an object of the B class, which is a subclass of A and therefore has access to the fun() method. When you create an object of the B class and call the fun() method on it, the method is executed successfully because it is being called from within a subclass of the A class, where the fun() method is accessible.
Why protected member are not accessible in a different package , when we create an object of Superclass
package p1; class A{ protected void fun(){ } //////////////////////////////////////////////////// package p2; import p1.*; class B extends A{ A a = new A(); a.fun() ; // This is not accessible B b = new B(); b.fun(); // This is accessible } So , when we create an object of A, it doesn't work . We have to create an Object of B to access fun(). Can anyone please explain?
[ "In object-oriented programming, the protected access modifier is used to indicate that a member (such as a method or a property) can only be accessed from within the same class or from a subclass of that class. This means that if a method is marked as protected, it will not be accessible to code outside of the class or its subclasses.\nIn the code example you provided, the fun() method is marked as protected, which means that it is only accessible from within the A class or from any of its subclasses. When you try to access the fun() method from the p2 package using an object of the A class, it does not work because the fun() method is not accessible from outside the A class or its subclasses.\nTo access the fun() method, you need to create an object of the B class, which is a subclass of A and therefore has access to the fun() method. When you create an object of the B class and call the fun() method on it, the method is executed successfully because it is being called from within a subclass of the A class, where the fun() method is accessible.\n" ]
[ 0 ]
[]
[]
[ "access_modifiers", "java", "protected" ]
stackoverflow_0074680121_access_modifiers_java_protected.txt
Q: Can I connect to Memgraph Cloud using Rust? I'm wondering if I can connect from Rust to Memgraph Cloud. If it is important, I'm using a free version of the Memgraph Cloud at the moment. A: It is possible to connect to the Memgraph Cloud from Rust using the neo4j-graph-rs crate, which provides a Rust driver for Neo4j. Memgraph Cloud uses the Neo4j Bolt protocol for client-server communication, so the neo4j-graph-rs crate should be able to connect to it. Here is an example of how you might use the neo4j-graph-rs crate to connect to the Memgraph Cloud: use neo4j_graph_rs::prelude::*; fn main() -> Result<(), Box<dyn std::error::Error>> { // Set up a connection to the Memgraph Cloud instance let mut driver = Driver::new("bolt://<host>:<port>")?; let session = driver.session()?; // Execute a Cypher query let result = session.run("MATCH (n) RETURN n.name LIMIT 10")?; // Print the results for record in result { let name: String = record.get("n.name")?; println!("Name: {}", name); } Ok(()) } Note that you will need to replace <host> and <port> in the connection URL with the host and port of your Memgraph Cloud instance. You can find this information in the Memgraph Cloud console.
Can I connect to Memgraph Cloud using Rust?
I'm wondering if I can connect from Rust to Memgraph Cloud. If it is important, I'm using a free version of the Memgraph Cloud at the moment.
[ "It is possible to connect to the Memgraph Cloud from Rust using the neo4j-graph-rs crate, which provides a Rust driver for Neo4j. Memgraph Cloud uses the Neo4j Bolt protocol for client-server communication, so the neo4j-graph-rs crate should be able to connect to it.\nHere is an example of how you might use the neo4j-graph-rs crate to connect to the Memgraph Cloud:\nuse neo4j_graph_rs::prelude::*;\n\nfn main() -> Result<(), Box<dyn std::error::Error>> {\n // Set up a connection to the Memgraph Cloud instance\n let mut driver = Driver::new(\"bolt://<host>:<port>\")?;\n let session = driver.session()?;\n\n // Execute a Cypher query\n let result = session.run(\"MATCH (n) RETURN n.name LIMIT 10\")?;\n\n // Print the results\n for record in result {\n let name: String = record.get(\"n.name\")?;\n println!(\"Name: {}\", name);\n }\n\n Ok(())\n}\n\nNote that you will need to replace <host> and <port> in the connection URL with the host and port of your Memgraph Cloud instance. You can find this information in the Memgraph Cloud console.\n" ]
[ 1 ]
[]
[]
[ "memgraphdb" ]
stackoverflow_0074680141_memgraphdb.txt
Q: SDK Resolver Failure - Net 7 - Net 6 Just downloaded and installed SDK Net 7.0.100 and it broke existing applications and they won't load any more in VS 2022 or Rider. Copied the follwing error: error : SDK Resolver Failure: "The SDK resolver "Microsoft.DotNet.MSBuildSdkResolver" failed while attempting to resolve the SDK "Microsoft.NET.Sdk". Exception: "Microsoft.NET.Sdk.WorkloadManifestReader.WorkloadManifestCompositionException: Workload definition 'wasm-tools' in manifest 'microsoft.net.workload.mono.toolchain.net7' [C:\Program Files\dotnet\sdk-manifests\7.0.100\microsoft.net.workload.mono.toolchain.net7\WorkloadManifest.json] conflicts with manifest 'microsoft.net.workload.mono.toolchain' [C:\Program Files\dotnet\sdk-manifests\7.0.100\microsoft.net.workload.mono.toolchain\WorkloadManifest.json] at Microsoft.NET.Sdk.WorkloadManifestReader.WorkloadResolver.ComposeWorkloadManifests() at Microsoft.NET.Sdk.WorkloadManifestReader.WorkloadResolver.Create(IWorkloadManifestProvider manifestProvider, String dotnetRootPath, String sdkVersion, String userProfileDir) at Microsoft.NET.Sdk.WorkloadMSBuildSdkResolver.CachingWorkloadResolver.Resolve(String sdkReferenceName, String dotnetRootPath, String sdkVersion, String userProfileDir) at Microsoft.DotNet.MSBuildSdkResolver.DotNetMSBuildSdkResolver.Resolve(SdkReference sdkReference, SdkResolverContext context, SdkResultFactory factory) at Microsoft.Build.BackEnd.SdkResolution.SdkResolverService.TryResolveSdkUsingSpecifiedResolvers(IList`1 resolvers, Int32 submissionId, SdkReference A: I experienced a similar problem. I uninstalled the 7.0.100-preview.5.22307.18 sdk using add remove programs and then changed the TargetFramework in the project file to use 7.0 and then I was able to load the projects. A: EDIT: This is officially documented in the release notes known issues. I'm from the .NET SDK team. Sorry you're going through this. I would love to comment on the other answers, but I don't have the reputation to do so. What Tim Farley suggested is an officially endorsed workaround; uninstalling any preview 7 SDKs with add/remove programs should resolve the problem. As for why this happened and why uninstalling preview SDKs will fix the issue, there's a bit of an explanation I put here: https://github.com/dotnet/sdk/issues/28947#issuecomment-1307987337. TLDR: Some workloads were renamed in the middle of .NET 7 preview development to support things like multitargeting, and when you download the new RTM old preview files interfere can with it. Updating the TargetFramework is recommended but it's unrelated to this issue. Usually breaking changes for each .NET version and related new features are gated behind your TargetFramework (TFM), so things don't break until you update the TFM, not when you update the SDK. (Unfortunately, not true in this case.) In response to whether this will happen again or not when upgrading to .NET 8, per Scott: with how workloads are currently structured this issue would happen again. We're discussing how to make changes to prevent this from happening again though as it's not ideal. We're also considering adding dotnet workload clean or something to repair this for you. Communicating with us on the SDK GH thread, or with me here, is a good way to send us feedback about this. A: Repairing Visual Studio installation did not help me. Neither did uninstalling .net 7 faulty workload ('wasm-tools'), since any attempt to uninstall or repair it ended up in the same error message. Being ran out of conventional options to address the issue, I just went ahead and physically deleted the conflicting folder: microsoft.net.workload.mono.toolchain, leaving second one microsoft.net.workload.mono.toolchain.net7 intact. It luckily solved problems on my machine. From now on I'll be more cautious on installing Ms RC packages A: I had a similar problem, even on new .Net projects. I uninstalled the 7.0.100-preview.2.22153.17 SDK using add remove programs and I was able to create a new project again. I am now left with only one .Net 7.0 SDK (from Visual Studio) as shown below. A: Uninstall Microsoft .Net SDK 7.0 preview A: I had the same issue as above except on my M1 mac. I followed the following guide and manually removed all the directories for all the .net 7 runtimes and sdks. Then reinstalled .net7 and all seems to be well now. rider can open my projects and the dotnet cli no longer complains Not sure if a similar scorched earth approach will work for the windows folks. https://devkimchi.com/2021/11/24/removing-dotnet-sdks-from-macos-manually/ A: I had the same issue, except that no preview version of .NET 7 was currently installed on my system, but the preview artifacts were still in the C:\Program Files\dotnet\sdk-manifests\7.0.100 directory. My solution was to uninstall the stable version of .NET 7 (not the version from VS 2022), then install and uninstall 7.0.100-preview.7. Doing so resulted in the preview artifacts being removed and this error being resolved. A: I had same problem, but i think the problem because M1 mac A: Had the same problem. Deleted manually installed net 7 sdks in windows uninstaller. A: It looks like there is a conflict between the "wasm-tools" workload definition in the "microsoft.net.workload.mono.toolchain.net7" manifest, and the "microsoft.net.workload.mono.toolchain" manifest. This can happen if you have multiple versions of the .NET SDK installed, and the conflicting definitions are being read by the SDK resolver. To fix this issue, you can try uninstalling any conflicting versions of the .NET SDK, and then reinstalling the version you are currently using (SDK Net 7.0.100). This should resolve the conflict and allow your existing applications to load again in Visual Studio and Rider. If this does not fix the issue, you can also try cleaning the SDK resolver cache by running the following command in a command prompt: dotnet clean --interactive This will remove any cached SDK information, and the resolver will re-read the manifests when you try to build your application again. This may help resolve the conflict and allow your applications to load correctly. A: I found out that I needed to rm -rf **/obj (delete all folders named obj) in the solution root folder after upgrading from net 6 to net 7, in addition to dotnet clean
SDK Resolver Failure - Net 7 - Net 6
Just downloaded and installed SDK Net 7.0.100 and it broke existing applications and they won't load any more in VS 2022 or Rider. Copied the follwing error: error : SDK Resolver Failure: "The SDK resolver "Microsoft.DotNet.MSBuildSdkResolver" failed while attempting to resolve the SDK "Microsoft.NET.Sdk". Exception: "Microsoft.NET.Sdk.WorkloadManifestReader.WorkloadManifestCompositionException: Workload definition 'wasm-tools' in manifest 'microsoft.net.workload.mono.toolchain.net7' [C:\Program Files\dotnet\sdk-manifests\7.0.100\microsoft.net.workload.mono.toolchain.net7\WorkloadManifest.json] conflicts with manifest 'microsoft.net.workload.mono.toolchain' [C:\Program Files\dotnet\sdk-manifests\7.0.100\microsoft.net.workload.mono.toolchain\WorkloadManifest.json] at Microsoft.NET.Sdk.WorkloadManifestReader.WorkloadResolver.ComposeWorkloadManifests() at Microsoft.NET.Sdk.WorkloadManifestReader.WorkloadResolver.Create(IWorkloadManifestProvider manifestProvider, String dotnetRootPath, String sdkVersion, String userProfileDir) at Microsoft.NET.Sdk.WorkloadMSBuildSdkResolver.CachingWorkloadResolver.Resolve(String sdkReferenceName, String dotnetRootPath, String sdkVersion, String userProfileDir) at Microsoft.DotNet.MSBuildSdkResolver.DotNetMSBuildSdkResolver.Resolve(SdkReference sdkReference, SdkResolverContext context, SdkResultFactory factory) at Microsoft.Build.BackEnd.SdkResolution.SdkResolverService.TryResolveSdkUsingSpecifiedResolvers(IList`1 resolvers, Int32 submissionId, SdkReference
[ "I experienced a similar problem.\nI uninstalled the 7.0.100-preview.5.22307.18 sdk using add remove programs and then changed the TargetFramework in the project file to use 7.0 and then I was able to load the projects.\n", "EDIT: This is officially documented in the release notes known issues.\nI'm from the .NET SDK team. Sorry you're going through this. I would love to comment on the other answers, but I don't have the reputation to do so.\nWhat Tim Farley suggested is an officially endorsed workaround; uninstalling any preview 7 SDKs with add/remove programs should resolve the problem.\nAs for why this happened and why uninstalling preview SDKs will fix the issue, there's a bit of an explanation I put here: https://github.com/dotnet/sdk/issues/28947#issuecomment-1307987337.\nTLDR: Some workloads were renamed in the middle of .NET 7 preview development to support things like multitargeting, and when you download the new RTM old preview files interfere can with it.\nUpdating the TargetFramework is recommended but it's unrelated to this issue. Usually breaking changes for each .NET version and related new features are gated behind your TargetFramework (TFM), so things don't break until you update the TFM, not when you update the SDK. (Unfortunately, not true in this case.)\nIn response to whether this will happen again or not when upgrading to .NET 8, per Scott: with how workloads are currently structured this issue would happen again. We're discussing how to make changes to prevent this from happening again though as it's not ideal. We're also considering adding dotnet workload clean or something to repair this for you. Communicating with us on the SDK GH thread, or with me here, is a good way to send us feedback about this.\n", "Repairing Visual Studio installation did not help me. Neither did uninstalling .net 7 faulty workload ('wasm-tools'), since any attempt to uninstall or repair it ended up in the same error message.\n\nBeing ran out of conventional options to address the issue, I just went ahead and physically deleted the conflicting folder: microsoft.net.workload.mono.toolchain, leaving second one microsoft.net.workload.mono.toolchain.net7 intact. It luckily solved problems on my machine. From now on I'll be more cautious on installing Ms RC packages\n", "I had a similar problem, even on new .Net projects. I uninstalled the 7.0.100-preview.2.22153.17 SDK using add remove programs and I was able to create a new project again. I am now left with only one .Net 7.0 SDK (from Visual Studio) as shown below.\n\n", "Uninstall Microsoft .Net SDK 7.0 preview\n", "I had the same issue as above except on my M1 mac. I followed the following guide and manually removed all the directories for all the .net 7 runtimes and sdks. Then reinstalled .net7 and all seems to be well now. rider can open my projects and the dotnet cli no longer complains\nNot sure if a similar scorched earth approach will work for the windows folks.\nhttps://devkimchi.com/2021/11/24/removing-dotnet-sdks-from-macos-manually/\n", "I had the same issue, except that no preview version of .NET 7 was currently installed on my system, but the preview artifacts were still in the C:\\Program Files\\dotnet\\sdk-manifests\\7.0.100 directory. My solution was to uninstall the stable version of .NET 7 (not the version from VS 2022), then install and uninstall 7.0.100-preview.7. Doing so resulted in the preview artifacts being removed and this error being resolved.\n", "I had same problem, but i think the problem because M1 mac\n", "Had the same problem. Deleted manually installed net 7 sdks in windows uninstaller.\n", "It looks like there is a conflict between the \"wasm-tools\" workload definition in the \"microsoft.net.workload.mono.toolchain.net7\" manifest, and the \"microsoft.net.workload.mono.toolchain\" manifest. This can happen if you have multiple versions of the .NET SDK installed, and the conflicting definitions are being read by the SDK resolver.\nTo fix this issue, you can try uninstalling any conflicting versions of the .NET SDK, and then reinstalling the version you are currently using (SDK Net 7.0.100). This should resolve the conflict and allow your existing applications to load again in Visual Studio and Rider.\nIf this does not fix the issue, you can also try cleaning the SDK resolver cache by running the following command in a command prompt:\ndotnet clean --interactive\n\n\nThis will remove any cached SDK information, and the resolver will re-read the manifests when you try to build your application again. This may help resolve the conflict and allow your applications to load correctly.\n", "I found out that I needed to rm -rf **/obj\n(delete all folders named obj) in the solution root folder after upgrading from net 6 to net 7, in addition to dotnet clean\n" ]
[ 36, 29, 10, 7, 4, 0, 0, 0, 0, 0, 0 ]
[]
[]
[ ".net_7.0", "c#", "jetbrains_ide", "rider", "visual_studio" ]
stackoverflow_0074365441_.net_7.0_c#_jetbrains_ide_rider_visual_studio.txt
Q: How to remove a comma 2nd to the last line of the file using SED? I'm trying to search and remove a comma , at the 2nd to the last line using sed. This is what I have now: } "user-account-id": "John", "user-account-number": "v1001", "user-account-app": "v10.0.0", "user-account-dbase": "v10.1.0", } I want the end result to be like this: } "user-account-id": "John", "user-account-number": "v1001", "user-account-app": "v10.0.0", "user-account-dbase": "v10.1.0" } I thought I found the answer an hour after I posted this but I was wrong. It didn't work. Dry run with any of these combination doesn't work: sed '2,$ s/,$//' filename sed '2,$ s/,//' filename sed '2,$ s/,//g' filename sed '2,$s/,$//' filename sed '2,$s/,//' filename sed '2,$s/,//g' filename Actual removal with any of these combination doesn't work: sed -i '2,$ s/,$//' filename sed -i '2,$ s/,//' filename sed -i '2,$ s/,//g' filename sed -i '2,$s/,$//' filename sed -i '2,$s/,//' filename sed -i '2,$s/,//g' filename I thought running sed with '2,$ would only modify "2nd to the last line" in the file. The output would just delete commas in every line, which doesn't make sense: } "user-account-id": "John" "user-account-number": "v1001" "user-account-app": "v10.0.0" "user-account-dbase": "v10.1.0" } A: 2,$ is a range starting at the 2nd line from the beginning and ending at the last line (so all lines except for the first one). Modifying the 2nd last line is hard in sed, see for example Replace the "pattern" on second-to-last line of a file. But in your case, there is an easier solution with GNU sed: Treat the entire file as one string and delete the last comma followed by an } at the end of the file (ignoring any whitespace, even linebreaks). sed -Ez 's/,([ \t\r\n]*)\}([ \t\r\n]*)$/\1}\2/' file In case you know the last A: Another tactic: reverse the file, remove the trailing comma on the first time it's seen, then re-reverse the file: tac file | awk -v p=1 'p && /,$/ {sub(/,$/, ""); p=0} 1' | tac
How to remove a comma 2nd to the last line of the file using SED?
I'm trying to search and remove a comma , at the 2nd to the last line using sed. This is what I have now: } "user-account-id": "John", "user-account-number": "v1001", "user-account-app": "v10.0.0", "user-account-dbase": "v10.1.0", } I want the end result to be like this: } "user-account-id": "John", "user-account-number": "v1001", "user-account-app": "v10.0.0", "user-account-dbase": "v10.1.0" } I thought I found the answer an hour after I posted this but I was wrong. It didn't work. Dry run with any of these combination doesn't work: sed '2,$ s/,$//' filename sed '2,$ s/,//' filename sed '2,$ s/,//g' filename sed '2,$s/,$//' filename sed '2,$s/,//' filename sed '2,$s/,//g' filename Actual removal with any of these combination doesn't work: sed -i '2,$ s/,$//' filename sed -i '2,$ s/,//' filename sed -i '2,$ s/,//g' filename sed -i '2,$s/,$//' filename sed -i '2,$s/,//' filename sed -i '2,$s/,//g' filename I thought running sed with '2,$ would only modify "2nd to the last line" in the file. The output would just delete commas in every line, which doesn't make sense: } "user-account-id": "John" "user-account-number": "v1001" "user-account-app": "v10.0.0" "user-account-dbase": "v10.1.0" }
[ "2,$ is a range starting at the 2nd line from the beginning and ending at the last line (so all lines except for the first one). Modifying the 2nd last line is hard in sed, see for example Replace the \"pattern\" on second-to-last line of a file.\nBut in your case, there is an easier solution with GNU sed:\nTreat the entire file as one string and delete the last comma followed by an } at the end of the file (ignoring any whitespace, even linebreaks).\nsed -Ez 's/,([ \\t\\r\\n]*)\\}([ \\t\\r\\n]*)$/\\1}\\2/' file\n\nIn case you know the last\n", "Another tactic: reverse the file, remove the trailing comma on the first time it's seen, then re-reverse the file:\ntac file | awk -v p=1 'p && /,$/ {sub(/,$/, \"\"); p=0} 1' | tac\n\n" ]
[ 1, 0 ]
[]
[]
[ "bash", "sed", "shell" ]
stackoverflow_0074678314_bash_sed_shell.txt
Q: Locating tags in a string in PHP (with respect to the string with tags removed) I want to create a function that labels the location of certain HTML tags (e.g., italics tags) in a string with respect to the locations of characters in a tagless version of the string. (I intend to use this label data to train a neural network for tag recovery from data that has had the tags stripped out.) The magic function I want to create is label_italics() in the below code. $string = 'Disney movies: <i>Aladdin</i>, <i>Beauty and the Beast</i>.'; $string_all_tags_stripped_but_italics = strip_tags($string, '<i>'); // same as $string in this example $string_all_tags_stripped = strip_tags($string); // 'Disney movies: Aladdin, Beauty and the Beast.' $featr_string = $string_all_tags_stripped.' '; // Add a single space at the end $label_string = label_italics($string_all_tags_stripped_but_italics); echo $featr_string; // 'Disney movies: Aladdin, Beauty and the Beast. ' echo $label_string; // '0000000000000001000000101000000000000000000010' If a character is supposed to have an <i> or </i> tag immediately preceding it, it is labeled with a 1 in $label_string; otherwise, it is labeled with a 0 in $label_string. (I'm thinking I don't need to worry about the difference between <i> and </i> because the recoverer will simply alternate between <i> and </i> so as to maintain well-formed markup, but I'm open to reasons as to why I'm wrong about this.) I'm just not sure what the best way to create label_italics() is. I wrote this function that seems to work in most cases, but it also seems a little clunky and I'm posting here in hopes that there is a better way. (If this turns out to be the best way, the below function would be easily generalizable to any HTML tag passed in as a second argument to the function, which could be renamed label_tag().) function label_italics($stripped) { while ((stripos($stripped, '<i>') || stripos($stripped, '</i>')) !== FALSE) { $position = stripos($stripped, '<i>'); if (is_numeric($position)) { for ($c = 0; $c < $position; $c++) { $output .= '0'; } $output .= '1'; } $stripped = substr($stripped, $position + 4, NULL); $position = stripos($stripped, '</i>'); if (is_numeric($position)) { for ($c = 0; $c < $position; $c++) { $output .= '0'; } $output .= '1'; } $stripped = substr($stripped, $position + 5, NULL); } for ($c = 0; $c <= strlen($stripped); $c++) { $output .= '0'; } return $output; } The function produces bad output if the tags are surplus or the markup is badly formed in the input. For example, for the following input: $string = 'Disney movies: <i><i>Aladdin</i>, <i>Beauty and the Beast</i>.'; The following misaligned output is given. Disney movies: Aladdin, Beauty and the Beast. 0000000000000001000000000101000000000000000000010 (I'm also open to reasons why I'm going about the creation of the label data all wrong.) A: I think I've got something. How about this: function label_italics($string) { return preg_replace(['/<i>/', '/<\/i>/', '/[^#]/', '/##0/', '/#0/'], ['#', '#', '0', '2', '1'], $string); } see: https://3v4l.org/cKG46 Note that you need to supply the string with the tags in it. How does it work? I use preg_replace() because it can use regular expressions, which I need once. This function goes through the two arrays and execute each replacement in order. First it replace all occurrences of <i> and </i> by # and anything else by 0. Then replaces ##0 by 2 and #0 by 1. The 2 is extra to be able to replace <i></i>. You can remove it, and simplify the function, if you don't need it. The use of the # is arbitrary. You should use anything that doesn't clash with the content of your string. Here's an updated version. It copes with tags at the end of the line and it ignores any # characters in the line. function label_italics($string) { return preg_replace(['/[^<\/i\>]/', '/<i>/', '/<\/i>/', '/i/', '/##0/', '/#0/'], ['0', '#', '#', '0', '2', '1'], $string . ' '); } See: https://3v4l.org/BTnLc A: Here is an alternative approach to writing the label_italics function: function label_italics($stripped) { $output = ''; $tag_open = '<i>'; $tag_close = '</i>'; // Find the positions of the <i> and </i> tags in the input string $open_positions = array_keys(str_word_count($stripped, 1, $tag_open)); $close_positions = array_keys(str_word_count($stripped, 1, $tag_close)); // Create a list of all the tag positions $tag_positions = array_merge($open_positions, $close_positions); sort($tag_positions); // Loop through each character in the input string for ($i = 0; $i < strlen($stripped); $i++) { // If the current character has a tag immediately preceding it, add a 1 to the output string if (in_array($i, $tag_positions)) { $output .= '1'; } else { $output .= '0'; } } return $output; } This function uses the str_word_count function to find the positions of the and tags in the input string, and then loops through each character in the input string to determine if it has a tag immediately preceding it. This approach should be more robust than stripos approach, as it doesn't rely on using the stripos function to search for the tags. A: After some additional experimentation, this is what I arrived at: $label_string = mb_ereg_replace('#0', '1', mb_ereg_replace('(#)\1+0', '1', mb_ereg_replace('\/', '0', mb_ereg_replace('i', '0', mb_ereg_replace('<\/i>', '#', mb_ereg_replace('<i>', '#', mb_ereg_replace('[^<\/i\>]', '0', mb_strtolower($featr_string)))))))); I couldn't get @KIKO Software's preg_replace()-based solution to work with multibyte strings. So I changed to this slightly ungainly, but better-operative, mb_ereg_replace()-based solution instead.
Locating tags in a string in PHP (with respect to the string with tags removed)
I want to create a function that labels the location of certain HTML tags (e.g., italics tags) in a string with respect to the locations of characters in a tagless version of the string. (I intend to use this label data to train a neural network for tag recovery from data that has had the tags stripped out.) The magic function I want to create is label_italics() in the below code. $string = 'Disney movies: <i>Aladdin</i>, <i>Beauty and the Beast</i>.'; $string_all_tags_stripped_but_italics = strip_tags($string, '<i>'); // same as $string in this example $string_all_tags_stripped = strip_tags($string); // 'Disney movies: Aladdin, Beauty and the Beast.' $featr_string = $string_all_tags_stripped.' '; // Add a single space at the end $label_string = label_italics($string_all_tags_stripped_but_italics); echo $featr_string; // 'Disney movies: Aladdin, Beauty and the Beast. ' echo $label_string; // '0000000000000001000000101000000000000000000010' If a character is supposed to have an <i> or </i> tag immediately preceding it, it is labeled with a 1 in $label_string; otherwise, it is labeled with a 0 in $label_string. (I'm thinking I don't need to worry about the difference between <i> and </i> because the recoverer will simply alternate between <i> and </i> so as to maintain well-formed markup, but I'm open to reasons as to why I'm wrong about this.) I'm just not sure what the best way to create label_italics() is. I wrote this function that seems to work in most cases, but it also seems a little clunky and I'm posting here in hopes that there is a better way. (If this turns out to be the best way, the below function would be easily generalizable to any HTML tag passed in as a second argument to the function, which could be renamed label_tag().) function label_italics($stripped) { while ((stripos($stripped, '<i>') || stripos($stripped, '</i>')) !== FALSE) { $position = stripos($stripped, '<i>'); if (is_numeric($position)) { for ($c = 0; $c < $position; $c++) { $output .= '0'; } $output .= '1'; } $stripped = substr($stripped, $position + 4, NULL); $position = stripos($stripped, '</i>'); if (is_numeric($position)) { for ($c = 0; $c < $position; $c++) { $output .= '0'; } $output .= '1'; } $stripped = substr($stripped, $position + 5, NULL); } for ($c = 0; $c <= strlen($stripped); $c++) { $output .= '0'; } return $output; } The function produces bad output if the tags are surplus or the markup is badly formed in the input. For example, for the following input: $string = 'Disney movies: <i><i>Aladdin</i>, <i>Beauty and the Beast</i>.'; The following misaligned output is given. Disney movies: Aladdin, Beauty and the Beast. 0000000000000001000000000101000000000000000000010 (I'm also open to reasons why I'm going about the creation of the label data all wrong.)
[ "I think I've got something. How about this:\nfunction label_italics($string) {\n return preg_replace(['/<i>/', '/<\\/i>/', '/[^#]/', '/##0/', '/#0/'], \n ['#', '#', '0', '2', '1'], $string);\n}\n\nsee: https://3v4l.org/cKG46\nNote that you need to supply the string with the tags in it.\nHow does it work?\nI use preg_replace() because it can use regular expressions, which I need once. This function goes through the two arrays and execute each replacement in order. First it replace all occurrences of <i> and </i> by # and anything else by 0. Then replaces ##0 by 2 and #0 by 1. The 2 is extra to be able to replace <i></i>. You can remove it, and simplify the function, if you don't need it.\nThe use of the # is arbitrary. You should use anything that doesn't clash with the content of your string.\n\nHere's an updated version. It copes with tags at the end of the line and it ignores any # characters in the line.\nfunction label_italics($string) {\n return preg_replace(['/[^<\\/i\\>]/', '/<i>/', '/<\\/i>/', '/i/', '/##0/', '/#0/'], \n ['0', '#', '#', '0', '2', '1'], $string . ' ');\n}\n\nSee: https://3v4l.org/BTnLc\n", "Here is an alternative approach to writing the label_italics function:\nfunction label_italics($stripped) {\n $output = '';\n $tag_open = '<i>';\n $tag_close = '</i>';\n\n // Find the positions of the <i> and </i> tags in the input string\n $open_positions = array_keys(str_word_count($stripped, 1, $tag_open));\n $close_positions = array_keys(str_word_count($stripped, 1, $tag_close));\n\n // Create a list of all the tag positions\n $tag_positions = array_merge($open_positions, $close_positions);\n sort($tag_positions);\n\n // Loop through each character in the input string\n for ($i = 0; $i < strlen($stripped); $i++) {\n // If the current character has a tag immediately preceding it, add a 1 to the output string\n if (in_array($i, $tag_positions)) {\n $output .= '1';\n } else {\n $output .= '0';\n }\n }\n return $output;\n}\n\nThis function uses the str_word_count function to find the positions of the and tags in the input string, and then loops through each character in the input string to determine if it has a tag immediately preceding it. This approach should be more robust than stripos approach, as it doesn't rely on using the stripos function to search for the tags.\n", "After some additional experimentation, this is what I arrived at:\n$label_string = mb_ereg_replace('#0', '1', mb_ereg_replace('(#)\\1+0', '1', mb_ereg_replace('\\/', '0', mb_ereg_replace('i', '0', mb_ereg_replace('<\\/i>', '#', mb_ereg_replace('<i>', '#', mb_ereg_replace('[^<\\/i\\>]', '0', mb_strtolower($featr_string))))))));\nI couldn't get @KIKO Software's preg_replace()-based solution to work with multibyte strings. So I changed to this slightly ungainly, but better-operative, mb_ereg_replace()-based solution instead.\n" ]
[ 1, 0, 0 ]
[]
[]
[ "italics", "label", "php", "pytorch", "string" ]
stackoverflow_0074671399_italics_label_php_pytorch_string.txt
Q: ColdFusion inital value of currentrow when no index specified in cfloop I am converting a ColdFusion application to C# (I'm a CF n00b). I have a script that performs a cfquery and then cfloop's through the results, and it appears to be trying to compare the current row to its following row. And it appears to be trying to make sure that it doesnt try to read past the end of the array. <cfquery name="qTripLegs" datasource="#sdb#"> SELECT ... </cfquery> <cfloop query="qTripLegs"> <cfif (customs_stop[currentrow] NEQ "" OR fuel_stop[currentrow] NEQ "") AND recordcount GT currentrow AND departure[currentrow] NEQ arrival[currentrow+1]> It feels like currentrow is 1-based (currentrow will have a value of 1 when it first enters the cfloop). Am I correct? I have looked in the coldfusion documentation and I dont see anything about this. A: Yes, queries and arrays in CF are 1-based. The CurrentRow and RecordCount variables are properties of the query (inside a query loop they are automatically scoped). <cfloop query="QueryName">...</cfloop> will loop through the entire query*, from 1 to QueryName.RecordCount, and the QueryName.CurrentRow index is automatically populated/incremented appropriately. Its value prior to query loop isn't used. *(unless cfbreak/etc used) Also to point out there is generally no need to prevent reading past the end (as above, the query loop handles it), it's only because CurrentRow+1 is being used that it's needed to avoid an error. A: query.currentRow() Returns the current row number queryCurrentRow(query) → returns numeric Member Function Syntax <cfscript> var myQuery = queryNew("id,title","integer,varchar",[[1,"Charlottes Web"],[3,"The Outsiders"],[4,"Mieko and the Fifth Treasure"]]); cfloop(query = "myQuery"){ if (title Eq "Mieko and the Fifth Treasure"){ writeOutput(myQuery.currentRow()); } } </cfscript>
ColdFusion inital value of currentrow when no index specified in cfloop
I am converting a ColdFusion application to C# (I'm a CF n00b). I have a script that performs a cfquery and then cfloop's through the results, and it appears to be trying to compare the current row to its following row. And it appears to be trying to make sure that it doesnt try to read past the end of the array. <cfquery name="qTripLegs" datasource="#sdb#"> SELECT ... </cfquery> <cfloop query="qTripLegs"> <cfif (customs_stop[currentrow] NEQ "" OR fuel_stop[currentrow] NEQ "") AND recordcount GT currentrow AND departure[currentrow] NEQ arrival[currentrow+1]> It feels like currentrow is 1-based (currentrow will have a value of 1 when it first enters the cfloop). Am I correct? I have looked in the coldfusion documentation and I dont see anything about this.
[ "Yes, queries and arrays in CF are 1-based.\nThe CurrentRow and RecordCount variables are properties of the query (inside a query loop they are automatically scoped).\n<cfloop query=\"QueryName\">...</cfloop> will loop through the entire query*, from 1 to QueryName.RecordCount, and the QueryName.CurrentRow index is automatically populated/incremented appropriately. Its value prior to query loop isn't used.\n*(unless cfbreak/etc used)\nAlso to point out there is generally no need to prevent reading past the end (as above, the query loop handles it), it's only because CurrentRow+1 is being used that it's needed to avoid an error.\n", "query.currentRow()\nReturns the current row number\nqueryCurrentRow(query) → returns numeric\nMember Function Syntax\n<cfscript>\nvar myQuery = queryNew(\"id,title\",\"integer,varchar\",[[1,\"Charlottes Web\"],[3,\"The Outsiders\"],[4,\"Mieko and the Fifth Treasure\"]]);\ncfloop(query = \"myQuery\"){\n if (title Eq \"Mieko and the Fifth Treasure\"){\n writeOutput(myQuery.currentRow());\n }\n}\n</cfscript>\n\n" ]
[ 22, 0 ]
[]
[]
[ "cfquery", "coldfusion", "coldfusion_9" ]
stackoverflow_0014225655_cfquery_coldfusion_coldfusion_9.txt
Q: Excel formula to check a text value from a cell against table's first column and return a value from the table second column excel formula: Formula in column B (cell B1, B2, ...) must check if a word from C1:C4 is used in A1 and return a corresponding value from D1:D4. In result, the column B should look like below. This is an example of input data and results in the column B. A B C D Go 2 steps M Jump J Turn left T Go M Jump fence J Turn T Run forward M Run M Turn around T Go 5 steps M I have done that some time ago using INDEX, MATCH AND VLOOKUP or HLOOKUP but can't find it any-more. I remember that it took me long time to make it work. If a cell in column A is empty, corresponding cell in column B stays clear. A: You can use the LOOKUP() function to accomplish the desired output in column B • Formula used in cell B1 =LOOKUP(2,1/(SEARCH($C$1:$C$4,A1)),$D$1:$D$4) And Fill Down for the rest of the cells.
Excel formula to check a text value from a cell against table's first column and return a value from the table second column
excel formula: Formula in column B (cell B1, B2, ...) must check if a word from C1:C4 is used in A1 and return a corresponding value from D1:D4. In result, the column B should look like below. This is an example of input data and results in the column B. A B C D Go 2 steps M Jump J Turn left T Go M Jump fence J Turn T Run forward M Run M Turn around T Go 5 steps M I have done that some time ago using INDEX, MATCH AND VLOOKUP or HLOOKUP but can't find it any-more. I remember that it took me long time to make it work. If a cell in column A is empty, corresponding cell in column B stays clear.
[ "You can use the LOOKUP() function to accomplish the desired output in column B\n\n• Formula used in cell B1\n=LOOKUP(2,1/(SEARCH($C$1:$C$4,A1)),$D$1:$D$4)\n\nAnd Fill Down for the rest of the cells.\n" ]
[ 1 ]
[]
[]
[ "excel", "excel_formula" ]
stackoverflow_0074680104_excel_excel_formula.txt
Q: How to create an add method, that gets an argument, a reference to another object and then adds that object's value to the receiver's? class Money{ private int cents; private int dollars; public Money(){ this.cents=0; } public Money(Scanner sc){ String token=sc.next(); int dot=token.indexOf("."); this.cents=Integer.parseInt(token.substring(dot+1)); this.dollars=Integer.parseInt(token.substring(1,dot)); } public String toString(){ return "$"+dollars+"."+cents; } public boolean equals(Money other){ if(!(other instanceof Money)){ return false; } return this.dollars==other.dollars && this.cents==other.cents; } public Money add(Money other){ return } } Here is my class, I can't seem to figure out how to create the add method that adds an object's value to the receiver's. Any tips or help is greatly appreciated! A: Possible solution assuming that the cents value should always be kept within [0, 100] range in any instance of Money: public Money add(Money other) { if (null != other) { this.cents += other.cents; if (this.cents >= 100) { this.cents %= 100; this.dollars++; } this.dollars += other.dollars; } return this; }
How to create an add method, that gets an argument, a reference to another object and then adds that object's value to the receiver's?
class Money{ private int cents; private int dollars; public Money(){ this.cents=0; } public Money(Scanner sc){ String token=sc.next(); int dot=token.indexOf("."); this.cents=Integer.parseInt(token.substring(dot+1)); this.dollars=Integer.parseInt(token.substring(1,dot)); } public String toString(){ return "$"+dollars+"."+cents; } public boolean equals(Money other){ if(!(other instanceof Money)){ return false; } return this.dollars==other.dollars && this.cents==other.cents; } public Money add(Money other){ return } } Here is my class, I can't seem to figure out how to create the add method that adds an object's value to the receiver's. Any tips or help is greatly appreciated!
[ "Possible solution assuming that the cents value should always be kept within [0, 100] range in any instance of Money:\npublic Money add(Money other) {\n if (null != other) {\n this.cents += other.cents;\n if (this.cents >= 100) {\n this.cents %= 100;\n this.dollars++;\n }\n this.dollars += other.dollars;\n }\n return this;\n}\n\n" ]
[ 0 ]
[]
[]
[ "class", "java", "oop" ]
stackoverflow_0074679926_class_java_oop.txt
Q: Sum in magic square isnt working properly for size> 37 I'm first year csd student I have an assignment where I need to check whether a square is a magic square and print "Yes" or "No" for each row (including diagonals) and column that meets the sum of the requirement magic square , if each number on it is unique and in the end if it is magic. For example it should like this:Input of the size of the square + the numbers we put in it -the required output My problem is that my algorithm works for the test that are from numbers (size of square) 1 to 9.Then the test inputs go to 37+. For the tests that the size of box is 37+ the sum of each row and column does not add correctly. #include <stdio.h> #include <string.h> #include <stdlib.h> #define MAXN 100 int main(int argc, char *argv[]){ if (argc==2 || argc==3){ if ( strcmp("-check",argv[1])==0){ int p=0; int i; int j; int sum; int sum2; int N; char ch[]="YES"; char unique[]="YES"; int pin1[MAXN]; scanf("%d", &N); int pin[MAXN][MAXN]; for (i=0; i<N;i++){ for (j=0; j<N;j++){ scanf ("%d",&pin[i][j]); pin1[i*N+j]=pin[i][j]; } } i=0; if (pin1[i]>N*N){ strcpy(unique, "NO"); } for (i=0;i<N*N;i++){ for (j=i+1;j<N*N;j++){ if (pin1[i]==pin1[j] || pin1[i]>N*N){ strcpy(unique, "NO"); } } } for (i=0;i<N;i++){ sum=0; strcpy(ch, "YES"); for (j=0;j<N;j++){ sum=sum+pin[i][j]; } if (sum!=(N*(N*N+1)/2)){ strcpy(ch, "NO"); p=1; } printf("ROW %d %s\n",i+1,ch); } for (j=0;j<N;j++){ sum=0; strcpy(ch, "YES"); for (i=0;i<N;i++){ sum=sum+pin[i][j]; } if (sum!=(N*(N*N+1)/2)){ strcpy(ch, "NO"); p=1; } printf("COLUMN %d %s\n",j+1,ch); } sum=0; sum2=0; strcpy(ch, "YES"); for (i=0;i<N;i++){ sum=sum+pin[i][i]; sum2=sum2+pin[N-1-i][i]; } if (sum!=(N*(N*N+1)/2)){ strcpy(ch, "NO"); p=1; } printf("DIAG1 %s\n",ch); strcpy(ch, "YES"); if (sum2!=(N*(N*N+1)/2)){ strcpy(ch, "NO"); p=1; } printf("DIAG2 %s\n",ch); printf("UNIQUE %s\n",unique); strcpy(ch, "YES"); if (p==1){ strcpy(ch, "NO"); } printf("MAGIC %s\n",ch); } else if ( strcmp("-create",argv[1])==0){ int N=atoi(argv[2]); int pin[MAXN][MAXN]; int row=N-1; int col=N/2; for (int i=1;i<=N*N;i++){ int tempr=row; int tempc=col; pin[row][col]=i; row++; col++; if (row==N){ row=0; } if (col==N){ col=0; } if ((pin[row][col]>=1) & (pin[row][col]<=N*N)){ row=tempr-1; col=tempc; } } printf("%d\n",N); for (int i=0;i<N;i++){ for (int j=0;j<N;j++){ printf("%d ",pin[i][j]); } printf("\n"); } } else { printf("Such action cannot be done"); } } else if (argc>3){ printf("Too many arguments supplied.\n"); } else{ printf("At least one argument expected.\n"); } return 0; } one of the inputs that fail: 37 667 706 745 784 823 862 901 940 979 1018 1057 1096 1135 1174 1213 1252 1291 1330 1369 2 41 80 119 158 197 236 275 314 353 392 431 470 509 548 587 626 665 666 668 707 746 785 824 863 902 941 980 1019 1058 1097 1136 1175 1214 1253 1292 1331 1333 3 42 81 120 159 198 237 276 315 354 393 432 471 510 549 588 627 628 630 669 708 747 786 825 864 903 942 981 1020 1059 1098 1137 1176 1215 1254 1293 1332 1334 4 43 82 121 160 199 238 277 316 355 394 433 472 511 550 589 590 629 631 670 709 748 787 826 865 904 943 982 1021 1060 1099 1138 1177 1216 1255 1294 1296 1335 5 44 83 122 161 200 239 278 317 356 395 434 473 512 551 552 591 593 632 671 710 749 788 827 866 905 944 983 1022 1061 1100 1139 1178 1217 1256 1295 1297 1336 6 45 84 123 162 201 240 279 318 357 396 435 474 513 514 553 592 594 633 672 711 750 789 828 867 906 945 984 1023 1062 1101 1140 1179 1218 1257 1259 1298 1337 7 46 85 124 163 202 241 280 319 358 397 436 475 476 515 554 556 595 634 673 712 751 790 829 868 907 946 985 1024 1063 1102 1141 1180 1219 1258 1260 1299 1338 8 47 86 125 164 203 242 281 320 359 398 437 438 477 516 555 557 596 635 674 713 752 791 830 869 908 947 986 1025 1064 1103 1142 1181 1220 1222 1261 1300 1339 9 48 87 126 165 204 243 282 321 360 399 400 439 478 517 519 558 597 636 675 714 753 792 831 870 909 948 987 1026 1065 1104 1143 1182 1221 1223 1262 1301 1340 10 49 88 127 166 205 244 283 322 361 362 401 440 479 518 520 559 598 637 676 715 754 793 832 871 910 949 988 1027 1066 1105 1144 1183 1185 1224 1263 1302 1341 11 50 89 128 167 206 245 284 323 324 363 402 441 480 482 521 560 599 638 677 716 755 794 833 872 911 950 989 1028 1067 1106 1145 1184 1186 1225 1264 1303 1342 12 51 90 129 168 207 246 285 286 325 364 403 442 481 483 522 561 600 639 678 717 756 795 834 873 912 951 990 1029 1068 1107 1146 1148 1187 1226 1265 1304 1343 13 52 91 130 169 208 247 248 287 326 365 404 443 445 484 523 562 601 640 679 718 757 796 835 874 913 952 991 1030 1069 1108 1147 1149 1188 1227 1266 1305 1344 14 53 92 131 170 209 210 249 288 327 366 405 444 446 485 524 563 602 641 680 719 758 797 836 875 914 953 992 1031 1070 1109 1111 1150 1189 1228 1267 1306 1345 15 54 93 132 171 172 211 250 289 328 367 406 408 447 486 525 564 603 642 681 720 759 798 837 876 915 954 993 1032 1071 1110 1112 1151 1190 1229 1268 1307 1346 16 55 94 133 134 173 212 251 290 329 368 407 409 448 487 526 565 604 643 682 721 760 799 838 877 916 955 994 1033 1072 1074 1113 1152 1191 1230 1269 1308 1347 17 56 95 96 135 174 213 252 291 330 369 371 410 449 488 527 566 605 644 683 722 761 800 839 878 917 956 995 1034 1073 1075 1114 1153 1192 1231 1270 1309 1348 18 57 58 97 136 175 214 253 292 331 370 372 411 450 489 528 567 606 645 684 723 762 801 840 879 918 957 996 1035 1037 1076 1115 1154 1193 1232 1271 1310 1349 19 20 59 98 137 176 215 254 293 332 334 373 412 451 490 529 568 607 646 685 724 763 802 841 880 919 958 997 1036 1038 1077 1116 1155 1194 1233 1272 1311 1350 1351 21 60 99 138 177 216 255 294 333 335 374 413 452 491 530 569 608 647 686 725 764 803 842 881 920 959 998 1000 1039 1078 1117 1156 1195 1234 1273 1312 1313 1352 22 61 100 139 178 217 256 295 297 336 375 414 453 492 531 570 609 648 687 726 765 804 843 882 921 960 999 1001 1040 1079 1118 1157 1196 1235 1274 1275 1314 1353 23 62 101 140 179 218 257 296 298 337 376 415 454 493 532 571 610 649 688 727 766 805 844 883 922 961 963 1002 1041 1080 1119 1158 1197 1236 1237 1276 1315 1354 24 63 102 141 180 219 258 260 299 338 377 416 455 494 533 572 611 650 689 728 767 806 845 884 923 962 964 1003 1042 1081 1120 1159 1198 1199 1238 1277 1316 1355 25 64 103 142 181 220 259 261 300 339 378 417 456 495 534 573 612 651 690 729 768 807 846 885 924 926 965 1004 1043 1082 1121 1160 1161 1200 1239 1278 1317 1356 26 65 104 143 182 221 223 262 301 340 379 418 457 496 535 574 613 652 691 730 769 808 847 886 925 927 966 1005 1044 1083 1122 1123 1162 1201 1240 1279 1318 1357 27 66 105 144 183 222 224 263 302 341 380 419 458 497 536 575 614 653 692 731 770 809 848 887 889 928 967 1006 1045 1084 1085 1124 1163 1202 1241 1280 1319 1358 28 67 106 145 184 186 225 264 303 342 381 420 459 498 537 576 615 654 693 732 771 810 849 888 890 929 968 1007 1046 1047 1086 1125 1164 1203 1242 1281 1320 1359 29 68 107 146 185 187 226 265 304 343 382 421 460 499 538 577 616 655 694 733 772 811 850 852 891 930 969 1008 1009 1048 1087 1126 1165 1204 1243 1282 1321 1360 30 69 108 147 149 188 227 266 305 344 383 422 461 500 539 578 617 656 695 734 773 812 851 853 892 931 970 971 1010 1049 1088 1127 1166 1205 1244 1283 1322 1361 31 70 109 148 150 189 228 267 306 345 384 423 462 501 540 579 618 657 696 735 774 813 815 854 893 932 933 972 1011 1050 1089 1128 1167 1206 1245 1284 1323 1362 32 71 110 112 151 190 229 268 307 346 385 424 463 502 541 580 619 658 697 736 775 814 816 855 894 895 934 973 1012 1051 1090 1129 1168 1207 1246 1285 1324 1363 33 72 111 113 152 191 230 269 308 347 386 425 464 503 542 581 620 659 698 737 776 778 817 856 857 896 935 974 1013 1052 1091 1130 1169 1208 1247 1286 1325 1364 34 73 75 114 153 192 231 270 309 348 387 426 465 504 543 582 621 660 699 738 777 779 818 819 858 897 936 975 1014 1053 1092 1131 1170 1209 1248 1287 1326 1365 35 74 76 115 154 193 232 271 310 349 388 427 466 505 544 583 622 661 700 739 741 780 781 820 859 898 937 976 1015 1054 1093 1132 1171 1210 1249 1288 1327 1366 36 38 77 116 155 194 233 272 311 350 389 428 467 506 545 584 623 662 701 740 742 743 782 821 860 899 938 977 1016 1055 1094 1133 1172 1211 1250 1289 1328 1367 37 39 78 117 156 195 234 273 312 351 390 429 468 507 546 585 624 663 702 704 705 744 783 822 861 900 939 978 1017 1056 1095 1134 1173 1212 1251 1290 1329 1368 1 40 79 118 157 196 235 274 313 352 391 430 469 508 547 586 625 664 703 inputs that work: 1) 1 1 2) 3 2 7 6 9 5 1 4 3 8 3) 4 16 3 2 13 5 10 11 8 9 6 7 12 4 15 14 1 4) 5 11 18 25 2 9 10 12 19 21 3 4 6 13 20 22 23 5 7 14 16 17 24 1 8 15 5) 7 22 31 40 49 2 11 20 21 23 32 41 43 3 12 13 15 24 33 42 44 4 5 14 16 25 34 36 45 46 6 8 17 26 35 37 38 47 7 9 18 27 29 30 39 48 1 10 19 28 6) 9 37 48 59 70 81 2 13 24 35 36 38 49 60 71 73 3 14 25 26 28 39 50 61 72 74 4 15 16 27 29 40 51 62 64 75 5 6 17 19 30 41 52 63 65 76 77 7 18 20 31 42 53 55 66 67 78 8 10 21 32 43 54 56 57 68 79 9 11 22 33 44 46 47 58 69 80 1 12 23 34 45 this is my code for the algorithm explained above and the problem is on sum. pin[][] is the array with the square inputs in it and pin1[] is the array i made from pin to check if it has unique numbers or not. Your help will be greatly appreciated.(I havent included main and etc because the assigment asks for 2 things to check if a square is magic and to create one using arguements, i have included only the stuff i have done for the check process. MAXN is defined 100 A: A few issues ... At first I thought that sum et. al. was overflowing an int [and needed to be long long] but that's not the case The pin array is [probably] too big to fit on the stack -- needs to be global scope. The pin1 array is int pin1[MAXN]; but is accessed via (e.g.) pin1[i * N + j] so it needs to be pin1[MAXN * MAXN];. And, should also be global scope. There were intermittant segfaults, so moving to global scope fixed those. Here is the corrected code. Because I needed to run the program under gdb, I hacked in an fopen call and changed the scanf to be fscanf. #include <stdio.h> #include <string.h> #include <stdlib.h> #define MAXN 100 #define SCAN(_sym) \ fscanf(fin,"%d",&_sym) #ifndef INPUT #define INPUT "inp37.txt" #endif #if 1 typedef int TYPE; #else typedef long long TYPE; #endif // NOTE/FIX: these are too large to be function scoped and won't fit on the // stack #if 1 int pin[MAXN][MAXN]; int pin1[MAXN * MAXN]; #endif int main(int argc, char *argv[]) { // NOTE/FIX: hack for gdb/debug #if 1 FILE *fin = fopen(INPUT,"r"); if (fin == NULL) { perror(INPUT); exit(1); } #endif if (argc == 2 || argc == 3) { if (strcmp("-check", argv[1]) == 0) { int p = 0; int i; int j; TYPE sum; TYPE sum2; int N; char ch[] = "YES"; char unique[] = "YES"; // NOTE/BUG: wrong/insufficient dimension #if 0 int pin1[MAXN]; #endif #if 0 scanf("%d", &N); #else SCAN(N); TYPE N2 = N * N; TYPE NGOOD = N; NGOOD = (NGOOD * (NGOOD * NGOOD + 1) / 2); #endif // NOTE/BUG: too large for stack variable #if 0 int pin[MAXN][MAXN]; #endif for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { #if 0 scanf("%d", &pin[i][j]); #else SCAN(pin[i][j]); #endif pin1[i * N + j] = pin[i][j]; } } i = 0; if (pin1[i] > N2) { strcpy(unique, "NO"); } for (i = 0; i < N2; i++) { for (j = i + 1; j < N2; j++) { if (pin1[i] == pin1[j] || pin1[i] > N2) { strcpy(unique, "NO"); } } } for (i = 0; i < N; i++) { sum = 0; strcpy(ch, "YES"); for (j = 0; j < N; j++) { sum = sum + pin[i][j]; } if (sum != NGOOD) { strcpy(ch, "NO"); p = 1; } printf("ROW %d %s\n", i + 1, ch); } for (j = 0; j < N; j++) { sum = 0; strcpy(ch, "YES"); for (i = 0; i < N; i++) { sum = sum + pin[i][j]; } if (sum != NGOOD) { strcpy(ch, "NO"); p = 1; } printf("COLUMN %d %s\n", j + 1, ch); } sum = 0; sum2 = 0; strcpy(ch, "YES"); for (i = 0; i < N; i++) { sum = sum + pin[i][i]; sum2 = sum2 + pin[N - 1 - i][i]; } if (sum != NGOOD) { strcpy(ch, "NO"); p = 1; } printf("DIAG1 %s\n", ch); strcpy(ch, "YES"); if (sum2 != NGOOD) { strcpy(ch, "NO"); p = 1; } printf("DIAG2 %s\n", ch); printf("UNIQUE %s\n", unique); strcpy(ch, "YES"); if (p == 1) { strcpy(ch, "NO"); } printf("MAGIC %s\n", ch); } else if (strcmp("-create", argv[1]) == 0) { int N = atoi(argv[2]); #if 0 int pin[MAXN][MAXN]; #endif int row = N - 1; int col = N / 2; for (int i = 1; i <= N * N; i++) { int tempr = row; int tempc = col; pin[row][col] = i; row++; col++; if (row == N) { row = 0; } if (col == N) { col = 0; } if ((pin[row][col] >= 1) & (pin[row][col] <= N * N)) { row = tempr - 1; col = tempc; } } printf("%d\n", N); for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { printf("%d ", pin[i][j]); } printf("\n"); } } else { printf("Such action cannot be done"); } } else if (argc > 3) { printf("Too many arguments supplied.\n"); } else { printf("At least one argument expected.\n"); } #if 1 fclose(fin); #endif return 0; } In the code above, I've used cpp conditionals to denote old vs. new code: #if 0 // old code #else // new code #endif #if 1 // new code #endif Note: this can be cleaned up by running the file through unifdef -k Here is the program output for your posted "big" input: ROW 1 YES ROW 2 YES ROW 3 YES ROW 4 YES ROW 5 YES ROW 6 YES ROW 7 YES ROW 8 YES ROW 9 YES ROW 10 YES ROW 11 YES ROW 12 YES ROW 13 YES ROW 14 YES ROW 15 YES ROW 16 YES ROW 17 YES ROW 18 YES ROW 19 YES ROW 20 YES ROW 21 YES ROW 22 YES ROW 23 YES ROW 24 YES ROW 25 YES ROW 26 YES ROW 27 YES ROW 28 YES ROW 29 YES ROW 30 YES ROW 31 YES ROW 32 YES ROW 33 YES ROW 34 YES ROW 35 YES ROW 36 YES ROW 37 YES COLUMN 1 YES COLUMN 2 YES COLUMN 3 YES COLUMN 4 YES COLUMN 5 YES COLUMN 6 YES COLUMN 7 YES COLUMN 8 YES COLUMN 9 YES COLUMN 10 YES COLUMN 11 YES COLUMN 12 YES COLUMN 13 YES COLUMN 14 YES COLUMN 15 YES COLUMN 16 YES COLUMN 17 YES COLUMN 18 YES COLUMN 19 YES COLUMN 20 YES COLUMN 21 YES COLUMN 22 YES COLUMN 23 YES COLUMN 24 YES COLUMN 25 YES COLUMN 26 YES COLUMN 27 YES COLUMN 28 YES COLUMN 29 YES COLUMN 30 YES COLUMN 31 YES COLUMN 32 YES COLUMN 33 YES COLUMN 34 YES COLUMN 35 YES COLUMN 36 YES COLUMN 37 YES DIAG1 YES DIAG2 YES UNIQUE YES MAGIC YES
Sum in magic square isnt working properly for size> 37
I'm first year csd student I have an assignment where I need to check whether a square is a magic square and print "Yes" or "No" for each row (including diagonals) and column that meets the sum of the requirement magic square , if each number on it is unique and in the end if it is magic. For example it should like this:Input of the size of the square + the numbers we put in it -the required output My problem is that my algorithm works for the test that are from numbers (size of square) 1 to 9.Then the test inputs go to 37+. For the tests that the size of box is 37+ the sum of each row and column does not add correctly. #include <stdio.h> #include <string.h> #include <stdlib.h> #define MAXN 100 int main(int argc, char *argv[]){ if (argc==2 || argc==3){ if ( strcmp("-check",argv[1])==0){ int p=0; int i; int j; int sum; int sum2; int N; char ch[]="YES"; char unique[]="YES"; int pin1[MAXN]; scanf("%d", &N); int pin[MAXN][MAXN]; for (i=0; i<N;i++){ for (j=0; j<N;j++){ scanf ("%d",&pin[i][j]); pin1[i*N+j]=pin[i][j]; } } i=0; if (pin1[i]>N*N){ strcpy(unique, "NO"); } for (i=0;i<N*N;i++){ for (j=i+1;j<N*N;j++){ if (pin1[i]==pin1[j] || pin1[i]>N*N){ strcpy(unique, "NO"); } } } for (i=0;i<N;i++){ sum=0; strcpy(ch, "YES"); for (j=0;j<N;j++){ sum=sum+pin[i][j]; } if (sum!=(N*(N*N+1)/2)){ strcpy(ch, "NO"); p=1; } printf("ROW %d %s\n",i+1,ch); } for (j=0;j<N;j++){ sum=0; strcpy(ch, "YES"); for (i=0;i<N;i++){ sum=sum+pin[i][j]; } if (sum!=(N*(N*N+1)/2)){ strcpy(ch, "NO"); p=1; } printf("COLUMN %d %s\n",j+1,ch); } sum=0; sum2=0; strcpy(ch, "YES"); for (i=0;i<N;i++){ sum=sum+pin[i][i]; sum2=sum2+pin[N-1-i][i]; } if (sum!=(N*(N*N+1)/2)){ strcpy(ch, "NO"); p=1; } printf("DIAG1 %s\n",ch); strcpy(ch, "YES"); if (sum2!=(N*(N*N+1)/2)){ strcpy(ch, "NO"); p=1; } printf("DIAG2 %s\n",ch); printf("UNIQUE %s\n",unique); strcpy(ch, "YES"); if (p==1){ strcpy(ch, "NO"); } printf("MAGIC %s\n",ch); } else if ( strcmp("-create",argv[1])==0){ int N=atoi(argv[2]); int pin[MAXN][MAXN]; int row=N-1; int col=N/2; for (int i=1;i<=N*N;i++){ int tempr=row; int tempc=col; pin[row][col]=i; row++; col++; if (row==N){ row=0; } if (col==N){ col=0; } if ((pin[row][col]>=1) & (pin[row][col]<=N*N)){ row=tempr-1; col=tempc; } } printf("%d\n",N); for (int i=0;i<N;i++){ for (int j=0;j<N;j++){ printf("%d ",pin[i][j]); } printf("\n"); } } else { printf("Such action cannot be done"); } } else if (argc>3){ printf("Too many arguments supplied.\n"); } else{ printf("At least one argument expected.\n"); } return 0; } one of the inputs that fail: 37 667 706 745 784 823 862 901 940 979 1018 1057 1096 1135 1174 1213 1252 1291 1330 1369 2 41 80 119 158 197 236 275 314 353 392 431 470 509 548 587 626 665 666 668 707 746 785 824 863 902 941 980 1019 1058 1097 1136 1175 1214 1253 1292 1331 1333 3 42 81 120 159 198 237 276 315 354 393 432 471 510 549 588 627 628 630 669 708 747 786 825 864 903 942 981 1020 1059 1098 1137 1176 1215 1254 1293 1332 1334 4 43 82 121 160 199 238 277 316 355 394 433 472 511 550 589 590 629 631 670 709 748 787 826 865 904 943 982 1021 1060 1099 1138 1177 1216 1255 1294 1296 1335 5 44 83 122 161 200 239 278 317 356 395 434 473 512 551 552 591 593 632 671 710 749 788 827 866 905 944 983 1022 1061 1100 1139 1178 1217 1256 1295 1297 1336 6 45 84 123 162 201 240 279 318 357 396 435 474 513 514 553 592 594 633 672 711 750 789 828 867 906 945 984 1023 1062 1101 1140 1179 1218 1257 1259 1298 1337 7 46 85 124 163 202 241 280 319 358 397 436 475 476 515 554 556 595 634 673 712 751 790 829 868 907 946 985 1024 1063 1102 1141 1180 1219 1258 1260 1299 1338 8 47 86 125 164 203 242 281 320 359 398 437 438 477 516 555 557 596 635 674 713 752 791 830 869 908 947 986 1025 1064 1103 1142 1181 1220 1222 1261 1300 1339 9 48 87 126 165 204 243 282 321 360 399 400 439 478 517 519 558 597 636 675 714 753 792 831 870 909 948 987 1026 1065 1104 1143 1182 1221 1223 1262 1301 1340 10 49 88 127 166 205 244 283 322 361 362 401 440 479 518 520 559 598 637 676 715 754 793 832 871 910 949 988 1027 1066 1105 1144 1183 1185 1224 1263 1302 1341 11 50 89 128 167 206 245 284 323 324 363 402 441 480 482 521 560 599 638 677 716 755 794 833 872 911 950 989 1028 1067 1106 1145 1184 1186 1225 1264 1303 1342 12 51 90 129 168 207 246 285 286 325 364 403 442 481 483 522 561 600 639 678 717 756 795 834 873 912 951 990 1029 1068 1107 1146 1148 1187 1226 1265 1304 1343 13 52 91 130 169 208 247 248 287 326 365 404 443 445 484 523 562 601 640 679 718 757 796 835 874 913 952 991 1030 1069 1108 1147 1149 1188 1227 1266 1305 1344 14 53 92 131 170 209 210 249 288 327 366 405 444 446 485 524 563 602 641 680 719 758 797 836 875 914 953 992 1031 1070 1109 1111 1150 1189 1228 1267 1306 1345 15 54 93 132 171 172 211 250 289 328 367 406 408 447 486 525 564 603 642 681 720 759 798 837 876 915 954 993 1032 1071 1110 1112 1151 1190 1229 1268 1307 1346 16 55 94 133 134 173 212 251 290 329 368 407 409 448 487 526 565 604 643 682 721 760 799 838 877 916 955 994 1033 1072 1074 1113 1152 1191 1230 1269 1308 1347 17 56 95 96 135 174 213 252 291 330 369 371 410 449 488 527 566 605 644 683 722 761 800 839 878 917 956 995 1034 1073 1075 1114 1153 1192 1231 1270 1309 1348 18 57 58 97 136 175 214 253 292 331 370 372 411 450 489 528 567 606 645 684 723 762 801 840 879 918 957 996 1035 1037 1076 1115 1154 1193 1232 1271 1310 1349 19 20 59 98 137 176 215 254 293 332 334 373 412 451 490 529 568 607 646 685 724 763 802 841 880 919 958 997 1036 1038 1077 1116 1155 1194 1233 1272 1311 1350 1351 21 60 99 138 177 216 255 294 333 335 374 413 452 491 530 569 608 647 686 725 764 803 842 881 920 959 998 1000 1039 1078 1117 1156 1195 1234 1273 1312 1313 1352 22 61 100 139 178 217 256 295 297 336 375 414 453 492 531 570 609 648 687 726 765 804 843 882 921 960 999 1001 1040 1079 1118 1157 1196 1235 1274 1275 1314 1353 23 62 101 140 179 218 257 296 298 337 376 415 454 493 532 571 610 649 688 727 766 805 844 883 922 961 963 1002 1041 1080 1119 1158 1197 1236 1237 1276 1315 1354 24 63 102 141 180 219 258 260 299 338 377 416 455 494 533 572 611 650 689 728 767 806 845 884 923 962 964 1003 1042 1081 1120 1159 1198 1199 1238 1277 1316 1355 25 64 103 142 181 220 259 261 300 339 378 417 456 495 534 573 612 651 690 729 768 807 846 885 924 926 965 1004 1043 1082 1121 1160 1161 1200 1239 1278 1317 1356 26 65 104 143 182 221 223 262 301 340 379 418 457 496 535 574 613 652 691 730 769 808 847 886 925 927 966 1005 1044 1083 1122 1123 1162 1201 1240 1279 1318 1357 27 66 105 144 183 222 224 263 302 341 380 419 458 497 536 575 614 653 692 731 770 809 848 887 889 928 967 1006 1045 1084 1085 1124 1163 1202 1241 1280 1319 1358 28 67 106 145 184 186 225 264 303 342 381 420 459 498 537 576 615 654 693 732 771 810 849 888 890 929 968 1007 1046 1047 1086 1125 1164 1203 1242 1281 1320 1359 29 68 107 146 185 187 226 265 304 343 382 421 460 499 538 577 616 655 694 733 772 811 850 852 891 930 969 1008 1009 1048 1087 1126 1165 1204 1243 1282 1321 1360 30 69 108 147 149 188 227 266 305 344 383 422 461 500 539 578 617 656 695 734 773 812 851 853 892 931 970 971 1010 1049 1088 1127 1166 1205 1244 1283 1322 1361 31 70 109 148 150 189 228 267 306 345 384 423 462 501 540 579 618 657 696 735 774 813 815 854 893 932 933 972 1011 1050 1089 1128 1167 1206 1245 1284 1323 1362 32 71 110 112 151 190 229 268 307 346 385 424 463 502 541 580 619 658 697 736 775 814 816 855 894 895 934 973 1012 1051 1090 1129 1168 1207 1246 1285 1324 1363 33 72 111 113 152 191 230 269 308 347 386 425 464 503 542 581 620 659 698 737 776 778 817 856 857 896 935 974 1013 1052 1091 1130 1169 1208 1247 1286 1325 1364 34 73 75 114 153 192 231 270 309 348 387 426 465 504 543 582 621 660 699 738 777 779 818 819 858 897 936 975 1014 1053 1092 1131 1170 1209 1248 1287 1326 1365 35 74 76 115 154 193 232 271 310 349 388 427 466 505 544 583 622 661 700 739 741 780 781 820 859 898 937 976 1015 1054 1093 1132 1171 1210 1249 1288 1327 1366 36 38 77 116 155 194 233 272 311 350 389 428 467 506 545 584 623 662 701 740 742 743 782 821 860 899 938 977 1016 1055 1094 1133 1172 1211 1250 1289 1328 1367 37 39 78 117 156 195 234 273 312 351 390 429 468 507 546 585 624 663 702 704 705 744 783 822 861 900 939 978 1017 1056 1095 1134 1173 1212 1251 1290 1329 1368 1 40 79 118 157 196 235 274 313 352 391 430 469 508 547 586 625 664 703 inputs that work: 1) 1 1 2) 3 2 7 6 9 5 1 4 3 8 3) 4 16 3 2 13 5 10 11 8 9 6 7 12 4 15 14 1 4) 5 11 18 25 2 9 10 12 19 21 3 4 6 13 20 22 23 5 7 14 16 17 24 1 8 15 5) 7 22 31 40 49 2 11 20 21 23 32 41 43 3 12 13 15 24 33 42 44 4 5 14 16 25 34 36 45 46 6 8 17 26 35 37 38 47 7 9 18 27 29 30 39 48 1 10 19 28 6) 9 37 48 59 70 81 2 13 24 35 36 38 49 60 71 73 3 14 25 26 28 39 50 61 72 74 4 15 16 27 29 40 51 62 64 75 5 6 17 19 30 41 52 63 65 76 77 7 18 20 31 42 53 55 66 67 78 8 10 21 32 43 54 56 57 68 79 9 11 22 33 44 46 47 58 69 80 1 12 23 34 45 this is my code for the algorithm explained above and the problem is on sum. pin[][] is the array with the square inputs in it and pin1[] is the array i made from pin to check if it has unique numbers or not. Your help will be greatly appreciated.(I havent included main and etc because the assigment asks for 2 things to check if a square is magic and to create one using arguements, i have included only the stuff i have done for the check process. MAXN is defined 100
[ "A few issues ...\n\nAt first I thought that sum et. al. was overflowing an int [and needed to be long long] but that's not the case\nThe pin array is [probably] too big to fit on the stack -- needs to be global scope.\nThe pin1 array is int pin1[MAXN]; but is accessed via (e.g.) pin1[i * N + j] so it needs to be pin1[MAXN * MAXN];. And, should also be global scope.\n\nThere were intermittant segfaults, so moving to global scope fixed those.\n\nHere is the corrected code. Because I needed to run the program under gdb, I hacked in an fopen call and changed the scanf to be fscanf.\n#include <stdio.h>\n#include <string.h>\n#include <stdlib.h>\n\n#define MAXN 100\n\n#define SCAN(_sym) \\\n fscanf(fin,\"%d\",&_sym)\n\n#ifndef INPUT\n#define INPUT \"inp37.txt\"\n#endif\n\n#if 1\ntypedef int TYPE;\n#else\ntypedef long long TYPE;\n#endif\n\n// NOTE/FIX: these are too large to be function scoped and won't fit on the\n// stack\n#if 1\nint pin[MAXN][MAXN];\nint pin1[MAXN * MAXN];\n#endif\n\nint\nmain(int argc, char *argv[])\n{\n// NOTE/FIX: hack for gdb/debug\n#if 1\n FILE *fin = fopen(INPUT,\"r\");\n if (fin == NULL) {\n perror(INPUT);\n exit(1);\n }\n#endif\n\n if (argc == 2 || argc == 3) {\n if (strcmp(\"-check\", argv[1]) == 0) {\n int p = 0;\n int i;\n int j;\n TYPE sum;\n TYPE sum2;\n int N;\n char ch[] = \"YES\";\n char unique[] = \"YES\";\n// NOTE/BUG: wrong/insufficient dimension\n#if 0\n int pin1[MAXN];\n#endif\n\n#if 0\n scanf(\"%d\", &N);\n#else\n SCAN(N);\n TYPE N2 = N * N;\n TYPE NGOOD = N;\n NGOOD = (NGOOD * (NGOOD * NGOOD + 1) / 2);\n#endif\n// NOTE/BUG: too large for stack variable\n#if 0\n int pin[MAXN][MAXN];\n#endif\n\n for (i = 0; i < N; i++) {\n for (j = 0; j < N; j++) {\n#if 0\n scanf(\"%d\", &pin[i][j]);\n#else\n SCAN(pin[i][j]);\n#endif\n pin1[i * N + j] = pin[i][j];\n }\n }\n i = 0;\n if (pin1[i] > N2) {\n strcpy(unique, \"NO\");\n }\n for (i = 0; i < N2; i++) {\n for (j = i + 1; j < N2; j++) {\n if (pin1[i] == pin1[j] || pin1[i] > N2) {\n strcpy(unique, \"NO\");\n }\n }\n }\n for (i = 0; i < N; i++) {\n sum = 0;\n strcpy(ch, \"YES\");\n for (j = 0; j < N; j++) {\n sum = sum + pin[i][j];\n }\n if (sum != NGOOD) {\n strcpy(ch, \"NO\");\n p = 1;\n }\n printf(\"ROW %d %s\\n\", i + 1, ch);\n }\n for (j = 0; j < N; j++) {\n sum = 0;\n strcpy(ch, \"YES\");\n for (i = 0; i < N; i++) {\n sum = sum + pin[i][j];\n }\n if (sum != NGOOD) {\n strcpy(ch, \"NO\");\n p = 1;\n }\n printf(\"COLUMN %d %s\\n\", j + 1, ch);\n }\n sum = 0;\n sum2 = 0;\n strcpy(ch, \"YES\");\n for (i = 0; i < N; i++) {\n sum = sum + pin[i][i];\n sum2 = sum2 + pin[N - 1 - i][i];\n }\n if (sum != NGOOD) {\n strcpy(ch, \"NO\");\n p = 1;\n }\n printf(\"DIAG1 %s\\n\", ch);\n strcpy(ch, \"YES\");\n if (sum2 != NGOOD) {\n strcpy(ch, \"NO\");\n p = 1;\n }\n printf(\"DIAG2 %s\\n\", ch);\n printf(\"UNIQUE %s\\n\", unique);\n strcpy(ch, \"YES\");\n if (p == 1) {\n strcpy(ch, \"NO\");\n }\n printf(\"MAGIC %s\\n\", ch);\n }\n\n else if (strcmp(\"-create\", argv[1]) == 0) {\n int N = atoi(argv[2]);\n#if 0\n int pin[MAXN][MAXN];\n#endif\n int row = N - 1;\n int col = N / 2;\n\n for (int i = 1; i <= N * N; i++) {\n int tempr = row;\n int tempc = col;\n\n pin[row][col] = i;\n row++;\n col++;\n if (row == N) {\n row = 0;\n }\n if (col == N) {\n col = 0;\n }\n if ((pin[row][col] >= 1) & (pin[row][col] <= N * N)) {\n row = tempr - 1;\n col = tempc;\n }\n }\n printf(\"%d\\n\", N);\n for (int i = 0; i < N; i++) {\n for (int j = 0; j < N; j++) {\n printf(\"%d \", pin[i][j]);\n }\n printf(\"\\n\");\n }\n }\n else {\n printf(\"Such action cannot be done\");\n }\n }\n else if (argc > 3) {\n printf(\"Too many arguments supplied.\\n\");\n }\n else {\n printf(\"At least one argument expected.\\n\");\n }\n\n#if 1\n fclose(fin);\n#endif\n\n return 0;\n}\n\n\nIn the code above, I've used cpp conditionals to denote old vs. new code:\n#if 0\n// old code\n#else\n// new code\n#endif\n\n#if 1\n// new code\n#endif\n\nNote: this can be cleaned up by running the file through unifdef -k\n\nHere is the program output for your posted \"big\" input:\nROW 1 YES\nROW 2 YES\nROW 3 YES\nROW 4 YES\nROW 5 YES\nROW 6 YES\nROW 7 YES\nROW 8 YES\nROW 9 YES\nROW 10 YES\nROW 11 YES\nROW 12 YES\nROW 13 YES\nROW 14 YES\nROW 15 YES\nROW 16 YES\nROW 17 YES\nROW 18 YES\nROW 19 YES\nROW 20 YES\nROW 21 YES\nROW 22 YES\nROW 23 YES\nROW 24 YES\nROW 25 YES\nROW 26 YES\nROW 27 YES\nROW 28 YES\nROW 29 YES\nROW 30 YES\nROW 31 YES\nROW 32 YES\nROW 33 YES\nROW 34 YES\nROW 35 YES\nROW 36 YES\nROW 37 YES\nCOLUMN 1 YES\nCOLUMN 2 YES\nCOLUMN 3 YES\nCOLUMN 4 YES\nCOLUMN 5 YES\nCOLUMN 6 YES\nCOLUMN 7 YES\nCOLUMN 8 YES\nCOLUMN 9 YES\nCOLUMN 10 YES\nCOLUMN 11 YES\nCOLUMN 12 YES\nCOLUMN 13 YES\nCOLUMN 14 YES\nCOLUMN 15 YES\nCOLUMN 16 YES\nCOLUMN 17 YES\nCOLUMN 18 YES\nCOLUMN 19 YES\nCOLUMN 20 YES\nCOLUMN 21 YES\nCOLUMN 22 YES\nCOLUMN 23 YES\nCOLUMN 24 YES\nCOLUMN 25 YES\nCOLUMN 26 YES\nCOLUMN 27 YES\nCOLUMN 28 YES\nCOLUMN 29 YES\nCOLUMN 30 YES\nCOLUMN 31 YES\nCOLUMN 32 YES\nCOLUMN 33 YES\nCOLUMN 34 YES\nCOLUMN 35 YES\nCOLUMN 36 YES\nCOLUMN 37 YES\nDIAG1 YES\nDIAG2 YES\nUNIQUE YES\nMAGIC YES\n\n" ]
[ 0 ]
[]
[]
[ "arrays", "c", "magic_square", "sum" ]
stackoverflow_0074679676_arrays_c_magic_square_sum.txt
Q: Problem to read from PIPE in posix Linux when the content size is bigger than 65536 Why I can't strings contents from file descriptor bigger than 65536 with my code? void SetFdAsync(int fd) { int flags; if (-1 == (flags = fcntl(fd, F_GETFL, 0))) flags = 0; fcntl(fd, F_SETFL, flags | O_NONBLOCK); } std::tuple<std::string, std::string> ReadPipe(int pipe_out, int pipe_err) { char buf[PIPE_BUF]; char buf_err[PIPE_BUF]; int rd = 0; int rd_err = 0; SetFdAsync(pipe_out); SetFdAsync(pipe_err); std::string str_out; std::string str_err; int i = 0; while ((rd = read(pipe_out, buf, PIPE_BUF)) > 0) { std::cout << "loop: " << i++ << std::endl; str_out.append(buf, rd); } while ((rd_err = read(pipe_err, buf_err, PIPE_BUF)) > 0) { str_err.append(buf_err, rd_err); } return std::tuple<std::string, std::string>(str_out, str_err); } Why I can't read contents when the length is bigger than 65536? What must I change?
Problem to read from PIPE in posix Linux when the content size is bigger than 65536
Why I can't strings contents from file descriptor bigger than 65536 with my code? void SetFdAsync(int fd) { int flags; if (-1 == (flags = fcntl(fd, F_GETFL, 0))) flags = 0; fcntl(fd, F_SETFL, flags | O_NONBLOCK); } std::tuple<std::string, std::string> ReadPipe(int pipe_out, int pipe_err) { char buf[PIPE_BUF]; char buf_err[PIPE_BUF]; int rd = 0; int rd_err = 0; SetFdAsync(pipe_out); SetFdAsync(pipe_err); std::string str_out; std::string str_err; int i = 0; while ((rd = read(pipe_out, buf, PIPE_BUF)) > 0) { std::cout << "loop: " << i++ << std::endl; str_out.append(buf, rd); } while ((rd_err = read(pipe_err, buf_err, PIPE_BUF)) > 0) { str_err.append(buf_err, rd_err); } return std::tuple<std::string, std::string>(str_out, str_err); } Why I can't read contents when the length is bigger than 65536? What must I change?
[]
[]
[ "It looks like the issue is with the size of the buf and buf_err buffers. The maximum value of PIPE_BUF is defined by the POSIX standard to be 65536, which means that the buf and buf_err arrays will never be able to hold more than 65536 bytes of data. If the file descriptor contains more data than that, the read function will only be able to read the first 65536 bytes, and the rest of the data will be lost.\nTo fix this issue, you can either increase the size of the buf and buf_err arrays, or you can use a loop to read the data in chunks, appending each chunk to the str_out and str_err strings as you go. Here's how you could do that:\nstd::tuple<std::string, std::string> ReadPipe(int pipe_out, int pipe_err) {\n // Use a larger buffer size\n const int BUF_SIZE = 65536 * 4;\n char buf[BUF_SIZE];\n char buf_err[BUF_SIZE];\n\n int rd = 0;\n int rd_err = 0;\n\n SetFdAsync(pipe_out);\n SetFdAsync(pipe_err);\n\n std::string str_out;\n std::string str_err;\n\n int i = 0;\n // Use a loop to read the data in chunks\n while ((rd = read(pipe_out, buf, BUF_SIZE)) > 0) {\n std::cout << \"loop: \" << i++ << std::endl;\n str_out.append(buf, rd);\n }\n\n while ((rd_err = read(pipe_err, buf_err, BUF_SIZE)) > 0) {\n str_err.append(buf_err, rd_err);\n }\n\n return std::tuple<std::string, std::string>(str_out, str_err);\n}\n\nI hope this fix ur issue!\n" ]
[ -1 ]
[ "c++", "linux", "operating_system", "pipe", "posix" ]
stackoverflow_0074679305_c++_linux_operating_system_pipe_posix.txt
Q: Why does accessing another model attribute in an appended attribute of a model cause a "possible infinite loop"? My Laravel 9 application has two models: A brand model and a product model. Each product belongs to one brand and one brand has many products (1:n relation). The product model should offer a "calculated" (appended) attribute called title_medium, that concatenates the title of the brand and the title of the product on request. As soon as I try to access $this->brand in the getTitleMediumAttribute() method of the product model, xdebug throws a possible infinite loop exception and cancels execution (after N iterations). I think it has something to do with relations and loading sequences (eager loading), but I couldn´t find a solution so far. The brand model The brand model has an attribute title and has many products which belong to a brand. namespace App\Models; use Illuminate\Database\Eloquent\Factories\HasFactory; use Illuminate\Database\Eloquent\Model; use Illuminate\Database\Eloquent\Relations\HasMany; use Illuminate\Support\Str; class Brand extends Model { use HasFactory; /** * Additional attributes for this model */ protected $appends = [ 'prices' ]; protected $fillable = [ 'title' ]; /** * The "booted" method of the model. * * @return void */ protected static function booted() { static::creating(function ($brand) { $brand->slug = Str::slug($brand->title, '-', 'de'); }); } /** * Returns all products for a brand * * @return HasMany */ public function products(): HasMany { return $this->hasMany(Product::class); } } The product model Each product belongs to a brand. An appended attribute title_medium should concatenate the brand title and the product title. namespace App\Models; class Product extends Model { use HasFactory, Searchable, Filterable; protected $fillable = [ 'title', 'brand_id', 'image' ]; /** * Additional attributes for this model */ protected $appends = [ 'title_long', 'lowest_price', 'highest_discount_percent_price', 'latest_price_date', 'price_count' ]; /** * The "booted" method of the model. * * @return void */ protected static function booted() { static::creating(function ($product) { $product->slug = Str::slug($product->title_long, '-', 'de'); }); } /** * Product belongs to one brand */ public function brand(): BelongsTo { return $this->belongsTo(Brand::class); } /** * Get the combined title from product and brand */ public function getTitleMediumAttribute(): string { // THIS CAUSES A "POSSIBLE INFINITE LOOP EXCEPTION" in xdebug return $this->brand->title . ' ' . $this->title; } } A: Try using an Attribute instead of getTitleMediumAttribute, like this and tell me if you still get the same error (use this method instead of `getTitleMediumAttribute): public function titleMedium(): Attribute { return Attribute::get( fn () => "{$this->brand->title} $this->title", ); } Attribute is \Illuminate\Database\Eloquent\Casts\Attribute
Why does accessing another model attribute in an appended attribute of a model cause a "possible infinite loop"?
My Laravel 9 application has two models: A brand model and a product model. Each product belongs to one brand and one brand has many products (1:n relation). The product model should offer a "calculated" (appended) attribute called title_medium, that concatenates the title of the brand and the title of the product on request. As soon as I try to access $this->brand in the getTitleMediumAttribute() method of the product model, xdebug throws a possible infinite loop exception and cancels execution (after N iterations). I think it has something to do with relations and loading sequences (eager loading), but I couldn´t find a solution so far. The brand model The brand model has an attribute title and has many products which belong to a brand. namespace App\Models; use Illuminate\Database\Eloquent\Factories\HasFactory; use Illuminate\Database\Eloquent\Model; use Illuminate\Database\Eloquent\Relations\HasMany; use Illuminate\Support\Str; class Brand extends Model { use HasFactory; /** * Additional attributes for this model */ protected $appends = [ 'prices' ]; protected $fillable = [ 'title' ]; /** * The "booted" method of the model. * * @return void */ protected static function booted() { static::creating(function ($brand) { $brand->slug = Str::slug($brand->title, '-', 'de'); }); } /** * Returns all products for a brand * * @return HasMany */ public function products(): HasMany { return $this->hasMany(Product::class); } } The product model Each product belongs to a brand. An appended attribute title_medium should concatenate the brand title and the product title. namespace App\Models; class Product extends Model { use HasFactory, Searchable, Filterable; protected $fillable = [ 'title', 'brand_id', 'image' ]; /** * Additional attributes for this model */ protected $appends = [ 'title_long', 'lowest_price', 'highest_discount_percent_price', 'latest_price_date', 'price_count' ]; /** * The "booted" method of the model. * * @return void */ protected static function booted() { static::creating(function ($product) { $product->slug = Str::slug($product->title_long, '-', 'de'); }); } /** * Product belongs to one brand */ public function brand(): BelongsTo { return $this->belongsTo(Brand::class); } /** * Get the combined title from product and brand */ public function getTitleMediumAttribute(): string { // THIS CAUSES A "POSSIBLE INFINITE LOOP EXCEPTION" in xdebug return $this->brand->title . ' ' . $this->title; } }
[ "Try using an Attribute instead of getTitleMediumAttribute, like this and tell me if you still get the same error (use this method instead of `getTitleMediumAttribute):\npublic function titleMedium(): Attribute\n{\n return Attribute::get(\n fn () => \"{$this->brand->title} $this->title\",\n );\n}\n\nAttribute is \\Illuminate\\Database\\Eloquent\\Casts\\Attribute\n" ]
[ 0 ]
[]
[]
[ "eloquent", "laravel", "php" ]
stackoverflow_0074679872_eloquent_laravel_php.txt