qid
int64
46k
74.7M
question
stringlengths
54
37.8k
date
stringlengths
10
10
metadata
sequencelengths
3
3
response_j
stringlengths
17
26k
response_k
stringlengths
26
26k
6,738,987
is it possible to add extension method to python built-in types? I know that I can add extension method to defined type by simply adding new method by . as following: ``` class myClass: pass myClass.myExtensionMethod = lambda self,x:x * 2 z = myClass() print z.myExtensionMethod(10) ``` But is any way to adding extension method to python built'in types like list, dict, ... ``` list.myExtension = lambda self,x:x * 2 list.myExtension(10) ```
2011/07/18
[ "https://Stackoverflow.com/questions/6738987", "https://Stackoverflow.com", "https://Stackoverflow.com/users/614953/" ]
It can be done in pure Python with this incredibly clever module: <https://pypi.python.org/pypi/forbiddenfruit> For example: ``` import functools import ctypes import __builtin__ import operator class PyObject(ctypes.Structure): pass Py_ssize_t = hasattr(ctypes.pythonapi, 'Py_InitModule4_64') and ctypes.c_int64 or ctypes.c_int PyObject._fields_ = [ ('ob_refcnt', Py_ssize_t), ('ob_type', ctypes.POINTER(PyObject)), ] class SlotsPointer(PyObject): _fields_ = [('dict', ctypes.POINTER(PyObject))] def proxy_builtin(klass): name = klass.__name__ slots = getattr(klass, '__dict__', name) pointer = SlotsPointer.from_address(id(slots)) namespace = {} ctypes.pythonapi.PyDict_SetItem( ctypes.py_object(namespace), ctypes.py_object(name), pointer.dict, ) return namespace[name] def die(message, cls=Exception): """ Raise an exception, allows you to use logical shortcut operators to test for object existence succinctly. User.by_name('username') or die('Failed to find user') """ raise cls(message) def unguido(self, key): """ Attempt to find methods which should really exist on the object instance. """ return functools.partial((getattr(__builtin__, key, None) if hasattr(__builtin__, key) else getattr(operator, key, None)) or die(key, KeyError), self) class mapper(object): def __init__(self, iterator, key): self.iterator = iterator self.key = key self.fn = lambda o: getattr(o, key) def __getattribute__(self, key): if key in ('iterator', 'fn', 'key'): return object.__getattribute__(self, key) return mapper(self, key) def __call__(self, *args, **kwargs): self.fn = lambda o: (getattr(o, self.key, None) or unguido(o, self.key))(*args, **kwargs) return self def __iter__(self): for value in self.iterator: yield self.fn(value) class foreach(object): """ Creates an output iterator which will apply any functions called on it to every element in the input iterator. A kind of chainable version of filter(). E.g: foreach([1, 2, 3]).__add__(2).__str__().replace('3', 'a').upper() is equivalent to: (str(o + 2).replace('3', 'a').upper() for o in iterator) Obviously this is not 'Pythonic'. """ def __init__(self, iterator): self.iterator = iterator def __getattribute__(self, key): if key in ('iterator',): return object.__getattribute__(self, key) return mapper(self.iterator, key) def __iter__(self): for value in self.iterator: yield value proxy_builtin(list)['foreach'] = property(foreach) import string print string.join([1, 2, 3].foreach.add(2).str().add(' cookies').upper(), ', ') >>> 3 COOKIES, 4 COOKIES, 5 COOKIES ``` There, doesn't that feel good?
The best you can do appears to be deriving a class from the built-in type. For example: ``` class mylist(list): def myfunc(self, x): self.append(x) test = mylist([1,2,3,4]) test.myfunc(99) ``` (You could even name it "list" so as to get the same constructor, if you wanted.) However, you cannot directly modify a built-in type like the example in your question.
6,738,987
is it possible to add extension method to python built-in types? I know that I can add extension method to defined type by simply adding new method by . as following: ``` class myClass: pass myClass.myExtensionMethod = lambda self,x:x * 2 z = myClass() print z.myExtensionMethod(10) ``` But is any way to adding extension method to python built'in types like list, dict, ... ``` list.myExtension = lambda self,x:x * 2 list.myExtension(10) ```
2011/07/18
[ "https://Stackoverflow.com/questions/6738987", "https://Stackoverflow.com", "https://Stackoverflow.com/users/614953/" ]
It can be done in pure Python with this incredibly clever module: <https://pypi.python.org/pypi/forbiddenfruit> For example: ``` import functools import ctypes import __builtin__ import operator class PyObject(ctypes.Structure): pass Py_ssize_t = hasattr(ctypes.pythonapi, 'Py_InitModule4_64') and ctypes.c_int64 or ctypes.c_int PyObject._fields_ = [ ('ob_refcnt', Py_ssize_t), ('ob_type', ctypes.POINTER(PyObject)), ] class SlotsPointer(PyObject): _fields_ = [('dict', ctypes.POINTER(PyObject))] def proxy_builtin(klass): name = klass.__name__ slots = getattr(klass, '__dict__', name) pointer = SlotsPointer.from_address(id(slots)) namespace = {} ctypes.pythonapi.PyDict_SetItem( ctypes.py_object(namespace), ctypes.py_object(name), pointer.dict, ) return namespace[name] def die(message, cls=Exception): """ Raise an exception, allows you to use logical shortcut operators to test for object existence succinctly. User.by_name('username') or die('Failed to find user') """ raise cls(message) def unguido(self, key): """ Attempt to find methods which should really exist on the object instance. """ return functools.partial((getattr(__builtin__, key, None) if hasattr(__builtin__, key) else getattr(operator, key, None)) or die(key, KeyError), self) class mapper(object): def __init__(self, iterator, key): self.iterator = iterator self.key = key self.fn = lambda o: getattr(o, key) def __getattribute__(self, key): if key in ('iterator', 'fn', 'key'): return object.__getattribute__(self, key) return mapper(self, key) def __call__(self, *args, **kwargs): self.fn = lambda o: (getattr(o, self.key, None) or unguido(o, self.key))(*args, **kwargs) return self def __iter__(self): for value in self.iterator: yield self.fn(value) class foreach(object): """ Creates an output iterator which will apply any functions called on it to every element in the input iterator. A kind of chainable version of filter(). E.g: foreach([1, 2, 3]).__add__(2).__str__().replace('3', 'a').upper() is equivalent to: (str(o + 2).replace('3', 'a').upper() for o in iterator) Obviously this is not 'Pythonic'. """ def __init__(self, iterator): self.iterator = iterator def __getattribute__(self, key): if key in ('iterator',): return object.__getattribute__(self, key) return mapper(self.iterator, key) def __iter__(self): for value in self.iterator: yield value proxy_builtin(list)['foreach'] = property(foreach) import string print string.join([1, 2, 3].foreach.add(2).str().add(' cookies').upper(), ', ') >>> 3 COOKIES, 4 COOKIES, 5 COOKIES ``` There, doesn't that feel good?
Nope, you gotta subclass! ``` >>> import string >>> class MyString(str): ... def disemvowel(self): ... return MyString(string.translate(self, None, "aeiou")) ... >>> s = MyString("this is only a test") >>> s.disemvowel() 'ths s nly tst' ``` --- Or more specific to your example ``` >>> class MyList(list): ... pass ... >>> MyList.myExtension = lambda self,x:x * 2 >>> l = MyList() >>> l.myExtension(10) 20 ```
18,038,492
I have a PHP script that needs to take one command-line argument. I need to call this script from inside my python script. ``` Popen('php simplepush.php "Here's the argument"', shell=True, cwd="/home/ubuntu/web/firestopapp.com/app") ``` ^That works. However, I want to pass a variable in the Python script instead of "Here's the argument". But when I try: ``` var1 = "yes" Popen(['php', 'simplepush.php', var1], shell=True, cwd="/home/ubuntu/web/firestopapp.com/app") ``` it no longer works. And this is run through crontab, which is what led me to have to include the cwd argument. I'd really appreciate any help, seems like a fairly straightforward syntactical issue. After Eric's suggestion: `Traceback (most recent call last): File "/home/ubuntu/web/mywebsite.com/app/email_parse.py", line 25, in <module> Popen('php simplepush.php "Here's the argument"', shell=False, cwd="/home/ubuntu/web/mywebsite.com/app") File "/usr/lib/python2.7/subprocess.py", line 711, in __init__ errread, errwrite) File "/usr/lib/python2.7/subprocess.py", line 1308, in _execute_child raise child_exception OSError: [Errno 2] No such file or directory` Sihrc's solution gets me the following, so it's not a full fix. `/bin/sh: 1: cannot open my2ndemail@gmail.com: No such file` and here's the rest of the code. ``` #!/usr/bin/python import email, getpass, imaplib, os, subprocess from subprocess import Popen detach_dir = '.' m = imaplib.IMAP4_SSL("imap.gmail.com") m.login("myemail@gmail.com","mypassword") m.select('mailbox') resp, items = m.search(None, "(UNSEEN)") message = "" items = items[0].split() for emailid in items: resp, data = m.fetch(emailid, "(RFC822)") email_body = data[0][1] mail = email.message_from_string(email_body) message += "["+mail["From"]+"] :" + mail["Subject"] + "\n" for part in mail.walk(): if part.get_content_type() == 'text/plain': message += part.get_payload() else: continue Popen('php simplepush.php ' + str(eval('message')), shell=True, cwd="/home/ubuntu/web/firestopapp.com/app") ```
2013/08/04
[ "https://Stackoverflow.com/questions/18038492", "https://Stackoverflow.com", "https://Stackoverflow.com/users/2476581/" ]
You will not be able to get the JSON in controller. In ASP.NET Web API pipeline, binding happens before the action method executes. Media formatter would have read the request body JSON (which is a read-once stream) and emptied the contents by the time the execution comes to your action method. But if you read the JSON from a component running in the pipeline before the binding, say a message handler, you will be able to read it like this. If you must get the JSON in action method, you can store it in the properties dictionary. ``` public class MessageContentReadingHandler : DelegatingHandler { protected override async Task<HttpResponseMessage> SendAsync( HttpRequestMessage request, CancellationToken cancellationToken) { var content = await request.Content.ReadAsStringAsync(); // At this point 'content' variable has the raw message body request.Properties["json"] = content; return await base.SendAsync(request, cancellationToken); } } ``` From the action method, you can retrieve JSON string like this: ``` public HttpResponseMessage PostCustomer(Customer customer) { string json = (string)Request.Properties["json"]; } ```
You can't get the parsed JSON, but you can get the content and parse it yourself. Try this: ``` public async Task PostCustomer(Customer customer) { var json = Newtonsoft.Json.JsonConvert.DeserializeObject(await this.Request.Content.ReadAsStringAsync()); ///You can deserialize to any object you need or simply a Dictionary<string,object> so you can check the key value pairs. } ```
18,038,492
I have a PHP script that needs to take one command-line argument. I need to call this script from inside my python script. ``` Popen('php simplepush.php "Here's the argument"', shell=True, cwd="/home/ubuntu/web/firestopapp.com/app") ``` ^That works. However, I want to pass a variable in the Python script instead of "Here's the argument". But when I try: ``` var1 = "yes" Popen(['php', 'simplepush.php', var1], shell=True, cwd="/home/ubuntu/web/firestopapp.com/app") ``` it no longer works. And this is run through crontab, which is what led me to have to include the cwd argument. I'd really appreciate any help, seems like a fairly straightforward syntactical issue. After Eric's suggestion: `Traceback (most recent call last): File "/home/ubuntu/web/mywebsite.com/app/email_parse.py", line 25, in <module> Popen('php simplepush.php "Here's the argument"', shell=False, cwd="/home/ubuntu/web/mywebsite.com/app") File "/usr/lib/python2.7/subprocess.py", line 711, in __init__ errread, errwrite) File "/usr/lib/python2.7/subprocess.py", line 1308, in _execute_child raise child_exception OSError: [Errno 2] No such file or directory` Sihrc's solution gets me the following, so it's not a full fix. `/bin/sh: 1: cannot open my2ndemail@gmail.com: No such file` and here's the rest of the code. ``` #!/usr/bin/python import email, getpass, imaplib, os, subprocess from subprocess import Popen detach_dir = '.' m = imaplib.IMAP4_SSL("imap.gmail.com") m.login("myemail@gmail.com","mypassword") m.select('mailbox') resp, items = m.search(None, "(UNSEEN)") message = "" items = items[0].split() for emailid in items: resp, data = m.fetch(emailid, "(RFC822)") email_body = data[0][1] mail = email.message_from_string(email_body) message += "["+mail["From"]+"] :" + mail["Subject"] + "\n" for part in mail.walk(): if part.get_content_type() == 'text/plain': message += part.get_payload() else: continue Popen('php simplepush.php ' + str(eval('message')), shell=True, cwd="/home/ubuntu/web/firestopapp.com/app") ```
2013/08/04
[ "https://Stackoverflow.com/questions/18038492", "https://Stackoverflow.com", "https://Stackoverflow.com/users/2476581/" ]
You will not be able to get the JSON in controller. In ASP.NET Web API pipeline, binding happens before the action method executes. Media formatter would have read the request body JSON (which is a read-once stream) and emptied the contents by the time the execution comes to your action method. But if you read the JSON from a component running in the pipeline before the binding, say a message handler, you will be able to read it like this. If you must get the JSON in action method, you can store it in the properties dictionary. ``` public class MessageContentReadingHandler : DelegatingHandler { protected override async Task<HttpResponseMessage> SendAsync( HttpRequestMessage request, CancellationToken cancellationToken) { var content = await request.Content.ReadAsStringAsync(); // At this point 'content' variable has the raw message body request.Properties["json"] = content; return await base.SendAsync(request, cancellationToken); } } ``` From the action method, you can retrieve JSON string like this: ``` public HttpResponseMessage PostCustomer(Customer customer) { string json = (string)Request.Properties["json"]; } ```
I was trying to do something very similar, but wasn't able to find a way to inject a handler directly into Web API in the proper place. It seems delegated message handlers fall between the deserialize/serialize step and the routing step (something they don't show you in all those Web API pipeline diagrams). However I found that the OWIN pipeline precedes the Web API pipeline. So by adding OWIN to your Web API project and creating a custom middleware class, you can handle requests before they hit the Web API pipeline and after they leave the Web API pipeline, which is very handy. And will definitely get you the results you're looking for. Hope this helps.
40,445,390
I have a list composed by tuples. Each tuple is in the following tuple format: (String, Integer). I want to merge the tuples that have the same head (String) as follows: ``` [("Foo", 2), ("Bar", 4), ("Foo", 2), ("Bar", 4), ("Foo", 2)] ``` should become: ``` [("Foo", 6), ("Bar",8)]. ``` What is a good python algorithm for this?
2016/11/06
[ "https://Stackoverflow.com/questions/40445390", "https://Stackoverflow.com", "https://Stackoverflow.com/users/4383494/" ]
How about collecting the sums in a [`defaultdict`](https://docs.python.org/3.6/library/collections.html#collections.defaultdict)? ``` from collections import defaultdict d = defaultdict(int) for (key, value) in items: d[key] += value ``` And then turn them back to a list of tuples: ``` list(d.items()) ``` The `defaultdict` in this example uses the `int` function to fill in unknown values as `0`. So the first time a particular `d[key]` is added to, it assumes an initial value of `0` and gets summed from there.
``` d = {} map(lambda (x,y):d.setdefault(x,[]).append(y),a) print [(k,sum(v)) for k,v in d.items()] ```
40,445,390
I have a list composed by tuples. Each tuple is in the following tuple format: (String, Integer). I want to merge the tuples that have the same head (String) as follows: ``` [("Foo", 2), ("Bar", 4), ("Foo", 2), ("Bar", 4), ("Foo", 2)] ``` should become: ``` [("Foo", 6), ("Bar",8)]. ``` What is a good python algorithm for this?
2016/11/06
[ "https://Stackoverflow.com/questions/40445390", "https://Stackoverflow.com", "https://Stackoverflow.com/users/4383494/" ]
How about collecting the sums in a [`defaultdict`](https://docs.python.org/3.6/library/collections.html#collections.defaultdict)? ``` from collections import defaultdict d = defaultdict(int) for (key, value) in items: d[key] += value ``` And then turn them back to a list of tuples: ``` list(d.items()) ``` The `defaultdict` in this example uses the `int` function to fill in unknown values as `0`. So the first time a particular `d[key]` is added to, it assumes an initial value of `0` and gets summed from there.
You can try this: ``` from collections import defaultdict my_list = [("Foo", 2), ("Bar", 4), ("Foo", 2), ("Bar", 4), ("Foo", 2)] d = defaultdict(list) for tup in my_list: key, value = tup[0], tup[1] d[key].append(value) lst = [(key, sum(value)) for key, value in d.items()] result = sorted(lst, key = lambda x: x[1]) print(result) ```
17,581,418
I'm trying to build OpenCV with MSYS / MinGW so I can use the cv2 module in python. I'm on Windows 7 64-bit and using 32 bit Python 2.7. Building OpenCV works, but I cannot seem to use it without getting an "ImportError: DLL load failed: The specified module could not be found." after importing cv2. I've been debugging it for awhile, and the common google solutions seem not to work. These are the steps I have taken: * Removed all other instance of opencv from my computer (there were a few) * Built opencv with CMake and I manually set these flags: ``` 'CMAKE_INSTALL_PREFIX' : 'C:/Program Files (x86)/OpenCV' 'CMAKE_BUILD_TYPE' : 'Release', 'CMAKE_C_FLAGS' : '-m32', 'CMAKE_CXX_FLAGS' : '-m32', 'BUILD_opencv_gpu' : False, 'BUILD_opencv_gpuarithm' : False, 'BUILD_opencv_gpubgsegm' : False, 'BUILD_opencv_gpucodec' : False, 'BUILD_opencv_gpufeatures2d' : False, 'BUILD_opencv_gpufilters' : False, 'BUILD_opencv_gpuimgproc' : False, 'BUILD_opencv_gpuoptflow' : False, 'BUILD_opencv_gpustereo' : False, 'BUILD_opencv_gpuwarping' : False ``` * I do a cmake -G "MSYS Makefiles" ..., make -j9, and finally a make install. The printouts show that the libraries go where I expect them to. * My PATH includes ``` C:\MinGW\bin C:\MinGW\msys\1.0\bin C:\MinGW\libexec\gcc\mingw32\4.6.2 C:\Python27\ C:\Python27\Scripts C:\Program Files (x86)\OpenCV C:\Program Files (x86)\OpenCV\lib ``` * My PYTHONPATH includes ``` C:\Python27\Lib\site-packages C:\Python27 ``` * I then copy + cv2.pyd and libcv2.dll.a from C:\Python\Lib\site-packages + all libopencv\_\*249.dll.a from C:\Program Files (x86)\OpenCV\lib + libgcc\_s\_dw2-1.dll libstdc++-6.dll from C:\MinGW\bin + I MOVE ALL OF THESE into a directory I named cv2 * In the cv2 directory I made an \_\_init\_\_.py file containing the following code: ``` import os, sys from os.path import realpath, dirname tpl_cv2 = realpath(dirname(__file__)) print(tpl_cv2) sys.path.insert(0,tpl_cv2) os.environ['PATH'] = tpl_cv2 + os.pathsep + os.environ['PATH'] try: from cv2 import * except Exception as ex: print(repr(ex)) print(os.environ['PATH']) print(sys.path) raise ``` * I then open a IPython terminal and enter the command "import cv2" And I get the error: ImportError('DLL load failed: The specified module could not be found.',) --- To debug this I've: * I load up the cv2.pyd file in depends.exe It shows that LIBOPENCV\_CALIB3D249.DLL and all the other opencv libs are not found. However, these are all in the same directory as cv2.pyd as well as in both the PATH and PYTHONPATH. But they are named .dll.a because I compiled with MSYS / MinGW. I don't understand why its looking for the .dll without the .a suffix. I think this probably has something to do with the error, but I don't know where to go from here. * I've also built a small C++ application which reads and image using the OpenCV that I've built. It builds just fine, but I get the error: The program can't start because libopencv\_core249.dll is missing from your computer. So, I'm more convinced this .dll.a thing is the issue. * I've tried simply renaming the libs from .dll.a to .dll, but that didn't work. I'm hoping someone can shed light on this issue.
2013/07/10
[ "https://Stackoverflow.com/questions/17581418", "https://Stackoverflow.com", "https://Stackoverflow.com/users/887074/" ]
I feel really stupid. The dlls were in "C:\Program Files (x86)\bin" not "C:\Program Files (x86)\lib" It seems to work now.
Just to make sure other users can be helped with this answer: Imagine you have compiled OpenCV and have several \*.dll and the cv2.pyd file. You need to copy those files to 'DLLs' folder within the python directory. Then import the module to check wether it is ok. I have also copied the \*.lib files into the appropriate folder. Best regards.
17,581,418
I'm trying to build OpenCV with MSYS / MinGW so I can use the cv2 module in python. I'm on Windows 7 64-bit and using 32 bit Python 2.7. Building OpenCV works, but I cannot seem to use it without getting an "ImportError: DLL load failed: The specified module could not be found." after importing cv2. I've been debugging it for awhile, and the common google solutions seem not to work. These are the steps I have taken: * Removed all other instance of opencv from my computer (there were a few) * Built opencv with CMake and I manually set these flags: ``` 'CMAKE_INSTALL_PREFIX' : 'C:/Program Files (x86)/OpenCV' 'CMAKE_BUILD_TYPE' : 'Release', 'CMAKE_C_FLAGS' : '-m32', 'CMAKE_CXX_FLAGS' : '-m32', 'BUILD_opencv_gpu' : False, 'BUILD_opencv_gpuarithm' : False, 'BUILD_opencv_gpubgsegm' : False, 'BUILD_opencv_gpucodec' : False, 'BUILD_opencv_gpufeatures2d' : False, 'BUILD_opencv_gpufilters' : False, 'BUILD_opencv_gpuimgproc' : False, 'BUILD_opencv_gpuoptflow' : False, 'BUILD_opencv_gpustereo' : False, 'BUILD_opencv_gpuwarping' : False ``` * I do a cmake -G "MSYS Makefiles" ..., make -j9, and finally a make install. The printouts show that the libraries go where I expect them to. * My PATH includes ``` C:\MinGW\bin C:\MinGW\msys\1.0\bin C:\MinGW\libexec\gcc\mingw32\4.6.2 C:\Python27\ C:\Python27\Scripts C:\Program Files (x86)\OpenCV C:\Program Files (x86)\OpenCV\lib ``` * My PYTHONPATH includes ``` C:\Python27\Lib\site-packages C:\Python27 ``` * I then copy + cv2.pyd and libcv2.dll.a from C:\Python\Lib\site-packages + all libopencv\_\*249.dll.a from C:\Program Files (x86)\OpenCV\lib + libgcc\_s\_dw2-1.dll libstdc++-6.dll from C:\MinGW\bin + I MOVE ALL OF THESE into a directory I named cv2 * In the cv2 directory I made an \_\_init\_\_.py file containing the following code: ``` import os, sys from os.path import realpath, dirname tpl_cv2 = realpath(dirname(__file__)) print(tpl_cv2) sys.path.insert(0,tpl_cv2) os.environ['PATH'] = tpl_cv2 + os.pathsep + os.environ['PATH'] try: from cv2 import * except Exception as ex: print(repr(ex)) print(os.environ['PATH']) print(sys.path) raise ``` * I then open a IPython terminal and enter the command "import cv2" And I get the error: ImportError('DLL load failed: The specified module could not be found.',) --- To debug this I've: * I load up the cv2.pyd file in depends.exe It shows that LIBOPENCV\_CALIB3D249.DLL and all the other opencv libs are not found. However, these are all in the same directory as cv2.pyd as well as in both the PATH and PYTHONPATH. But they are named .dll.a because I compiled with MSYS / MinGW. I don't understand why its looking for the .dll without the .a suffix. I think this probably has something to do with the error, but I don't know where to go from here. * I've also built a small C++ application which reads and image using the OpenCV that I've built. It builds just fine, but I get the error: The program can't start because libopencv\_core249.dll is missing from your computer. So, I'm more convinced this .dll.a thing is the issue. * I've tried simply renaming the libs from .dll.a to .dll, but that didn't work. I'm hoping someone can shed light on this issue.
2013/07/10
[ "https://Stackoverflow.com/questions/17581418", "https://Stackoverflow.com", "https://Stackoverflow.com/users/887074/" ]
I feel really stupid. The dlls were in "C:\Program Files (x86)\bin" not "C:\Program Files (x86)\lib" It seems to work now.
This post helped me a lot. The answer I found was to make sure the compiled bin files were part of my PATH variable. My CMAKE\_INSTALL\_PREFIX was C:\opencv\src\build\install and adding C:\opencv\src\build\install\x86\vc11\bin to my PATH variable made cv2 start working.
17,581,418
I'm trying to build OpenCV with MSYS / MinGW so I can use the cv2 module in python. I'm on Windows 7 64-bit and using 32 bit Python 2.7. Building OpenCV works, but I cannot seem to use it without getting an "ImportError: DLL load failed: The specified module could not be found." after importing cv2. I've been debugging it for awhile, and the common google solutions seem not to work. These are the steps I have taken: * Removed all other instance of opencv from my computer (there were a few) * Built opencv with CMake and I manually set these flags: ``` 'CMAKE_INSTALL_PREFIX' : 'C:/Program Files (x86)/OpenCV' 'CMAKE_BUILD_TYPE' : 'Release', 'CMAKE_C_FLAGS' : '-m32', 'CMAKE_CXX_FLAGS' : '-m32', 'BUILD_opencv_gpu' : False, 'BUILD_opencv_gpuarithm' : False, 'BUILD_opencv_gpubgsegm' : False, 'BUILD_opencv_gpucodec' : False, 'BUILD_opencv_gpufeatures2d' : False, 'BUILD_opencv_gpufilters' : False, 'BUILD_opencv_gpuimgproc' : False, 'BUILD_opencv_gpuoptflow' : False, 'BUILD_opencv_gpustereo' : False, 'BUILD_opencv_gpuwarping' : False ``` * I do a cmake -G "MSYS Makefiles" ..., make -j9, and finally a make install. The printouts show that the libraries go where I expect them to. * My PATH includes ``` C:\MinGW\bin C:\MinGW\msys\1.0\bin C:\MinGW\libexec\gcc\mingw32\4.6.2 C:\Python27\ C:\Python27\Scripts C:\Program Files (x86)\OpenCV C:\Program Files (x86)\OpenCV\lib ``` * My PYTHONPATH includes ``` C:\Python27\Lib\site-packages C:\Python27 ``` * I then copy + cv2.pyd and libcv2.dll.a from C:\Python\Lib\site-packages + all libopencv\_\*249.dll.a from C:\Program Files (x86)\OpenCV\lib + libgcc\_s\_dw2-1.dll libstdc++-6.dll from C:\MinGW\bin + I MOVE ALL OF THESE into a directory I named cv2 * In the cv2 directory I made an \_\_init\_\_.py file containing the following code: ``` import os, sys from os.path import realpath, dirname tpl_cv2 = realpath(dirname(__file__)) print(tpl_cv2) sys.path.insert(0,tpl_cv2) os.environ['PATH'] = tpl_cv2 + os.pathsep + os.environ['PATH'] try: from cv2 import * except Exception as ex: print(repr(ex)) print(os.environ['PATH']) print(sys.path) raise ``` * I then open a IPython terminal and enter the command "import cv2" And I get the error: ImportError('DLL load failed: The specified module could not be found.',) --- To debug this I've: * I load up the cv2.pyd file in depends.exe It shows that LIBOPENCV\_CALIB3D249.DLL and all the other opencv libs are not found. However, these are all in the same directory as cv2.pyd as well as in both the PATH and PYTHONPATH. But they are named .dll.a because I compiled with MSYS / MinGW. I don't understand why its looking for the .dll without the .a suffix. I think this probably has something to do with the error, but I don't know where to go from here. * I've also built a small C++ application which reads and image using the OpenCV that I've built. It builds just fine, but I get the error: The program can't start because libopencv\_core249.dll is missing from your computer. So, I'm more convinced this .dll.a thing is the issue. * I've tried simply renaming the libs from .dll.a to .dll, but that didn't work. I'm hoping someone can shed light on this issue.
2013/07/10
[ "https://Stackoverflow.com/questions/17581418", "https://Stackoverflow.com", "https://Stackoverflow.com/users/887074/" ]
This post helped me a lot. The answer I found was to make sure the compiled bin files were part of my PATH variable. My CMAKE\_INSTALL\_PREFIX was C:\opencv\src\build\install and adding C:\opencv\src\build\install\x86\vc11\bin to my PATH variable made cv2 start working.
Just to make sure other users can be helped with this answer: Imagine you have compiled OpenCV and have several \*.dll and the cv2.pyd file. You need to copy those files to 'DLLs' folder within the python directory. Then import the module to check wether it is ok. I have also copied the \*.lib files into the appropriate folder. Best regards.
28,677,012
the code: ``` import os from time import * import socket import time global diskspace ##################### #display temp #uses shell script to find out temp then uses python to display it #python uses os module to run line of shell script os.system("cat /sys/class/thermal/thermal_zone0/temp > sysTemp") temp = open("sysTemp") # Open a file str = temp.read(); # read characters in sysTemp temp.close() # close opened file t=eval(str) # convert string into number t2=t*2 # multiply by 2, evaluated number t3=(t/1000.00) # convert five figure temp (milli-degrees) to degrees to two decimal places print ("temp is:") temperature = int(t3) print(temperature) def temp(): if temperature > 60: print("The temp is over 60. Cool down") elif temperature < 40: print("temp is below 40") check() #find name ################## #check for internet connection ################### #Display disk space ################### def getDiskSpace(): p = os.popen("df -h /") i = 0 while 1: i = i +1 line = p.readline() if i==2: diskspace = (line.split()[4:5]) ds = diskspace[0] print("The disk space used is:") print(ds) global ds #Display CPU usage ################### def getCPUuse(): print(os.popen("top -n1 | awk '/Cpu\(s\):/ {print $2}'").readline().strip()) #Display IP ################### s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(("gmail.com",80)) IP = (s.getsockname()[0]) s.close() print("The Ip is:" + IP) getDiskSpace() ################### #writing it to a .txt file f = open("data.txt","w") #opens file with name of "test.txt" f.write("raspberry pi data.\n") f.write("ip:\n") f.write(IP + "\n") #f.write("Disk usage:" + str(ds)) f.write("temp: {0}".format(t3)) f.write("disk usage: {0}".format(ds)) f.close() temp() getCPUuse() print("...") time.sleep(10) ``` It is a program that monitors the temp, disk space, cpu usage and ip of the pi and writes it to a txt file The main problem is with this line ``` f.write("Disk usage:" + diskspace + "\n") ``` and it says that it is not defined, I have tried many things like creating it before the def as blank but then nothing gets written for diskspace on the text file. The other things write to the text file but not this one the output: temp is: 58 The Ip is:192.168.1.36 Traceback (most recent call last): File "temp.py", line 74, in f.write("Disk usage:" + diskspace) NameError: global name 'diskspace' is not defined if you remove the bit about the writing bit, {'40%'] usually gets printed for diskspace. ive added some changed code that prints the data, throws not errors but doesnt write it.
2015/02/23
[ "https://Stackoverflow.com/questions/28677012", "https://Stackoverflow.com", "https://Stackoverflow.com/users/3910964/" ]
You have few options: 1. Initialize arrays in constructor MesssageParsingTest using syntax : `firstMessage{0x24,0x54,0x3b,0x72,0x8b,0x03,0x24,0x29,0x23,0x43,0x66,0x22,0x53,0x41,0x11,0x62,0x10}` in initializer list. 2. Create static const array containing your message, and either copy it to member variable using memcpy, or use static member and get rid of firstMessage member variable. Declare const static member in .h inside class definition: ``` static const unsigned char kFirstMessage[]; ``` and define + initialize it in .ccp ``` const unsigned char MessageParsingTest::kFirstMessage[] = "\0x24\0x54\0x3b\0x72\0x8b\0x03\0x24\0x29\0x23\0x43\0x66\0x22\0x53\0x41\0x11\0x62\0x10"; ``` I would prefer static const member if you do not intend to modify this array later, since it makes the intention cleaner.
You can use a temporary buffer and then copy into you member as this: ``` void MessageParsingTest::setUp() { unsigned char tmp[1500] = {0x24,0x54,0x3b,0x72,0x8b,0x03,0x24,0x29,0x23,0x43,0x66,0x22,0x53,0x41,0x11,0x62,0x10}; memcpy(firstMessage, tmp, 1500); } ```
28,677,012
the code: ``` import os from time import * import socket import time global diskspace ##################### #display temp #uses shell script to find out temp then uses python to display it #python uses os module to run line of shell script os.system("cat /sys/class/thermal/thermal_zone0/temp > sysTemp") temp = open("sysTemp") # Open a file str = temp.read(); # read characters in sysTemp temp.close() # close opened file t=eval(str) # convert string into number t2=t*2 # multiply by 2, evaluated number t3=(t/1000.00) # convert five figure temp (milli-degrees) to degrees to two decimal places print ("temp is:") temperature = int(t3) print(temperature) def temp(): if temperature > 60: print("The temp is over 60. Cool down") elif temperature < 40: print("temp is below 40") check() #find name ################## #check for internet connection ################### #Display disk space ################### def getDiskSpace(): p = os.popen("df -h /") i = 0 while 1: i = i +1 line = p.readline() if i==2: diskspace = (line.split()[4:5]) ds = diskspace[0] print("The disk space used is:") print(ds) global ds #Display CPU usage ################### def getCPUuse(): print(os.popen("top -n1 | awk '/Cpu\(s\):/ {print $2}'").readline().strip()) #Display IP ################### s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(("gmail.com",80)) IP = (s.getsockname()[0]) s.close() print("The Ip is:" + IP) getDiskSpace() ################### #writing it to a .txt file f = open("data.txt","w") #opens file with name of "test.txt" f.write("raspberry pi data.\n") f.write("ip:\n") f.write(IP + "\n") #f.write("Disk usage:" + str(ds)) f.write("temp: {0}".format(t3)) f.write("disk usage: {0}".format(ds)) f.close() temp() getCPUuse() print("...") time.sleep(10) ``` It is a program that monitors the temp, disk space, cpu usage and ip of the pi and writes it to a txt file The main problem is with this line ``` f.write("Disk usage:" + diskspace + "\n") ``` and it says that it is not defined, I have tried many things like creating it before the def as blank but then nothing gets written for diskspace on the text file. The other things write to the text file but not this one the output: temp is: 58 The Ip is:192.168.1.36 Traceback (most recent call last): File "temp.py", line 74, in f.write("Disk usage:" + diskspace) NameError: global name 'diskspace' is not defined if you remove the bit about the writing bit, {'40%'] usually gets printed for diskspace. ive added some changed code that prints the data, throws not errors but doesnt write it.
2015/02/23
[ "https://Stackoverflow.com/questions/28677012", "https://Stackoverflow.com", "https://Stackoverflow.com/users/3910964/" ]
You have few options: 1. Initialize arrays in constructor MesssageParsingTest using syntax : `firstMessage{0x24,0x54,0x3b,0x72,0x8b,0x03,0x24,0x29,0x23,0x43,0x66,0x22,0x53,0x41,0x11,0x62,0x10}` in initializer list. 2. Create static const array containing your message, and either copy it to member variable using memcpy, or use static member and get rid of firstMessage member variable. Declare const static member in .h inside class definition: ``` static const unsigned char kFirstMessage[]; ``` and define + initialize it in .ccp ``` const unsigned char MessageParsingTest::kFirstMessage[] = "\0x24\0x54\0x3b\0x72\0x8b\0x03\0x24\0x29\0x23\0x43\0x66\0x22\0x53\0x41\0x11\0x62\0x10"; ``` I would prefer static const member if you do not intend to modify this array later, since it makes the intention cleaner.
Here is one way to do it. ``` void MessageParsingTest::setUp() { unsigned char x[] = {0x24,0x54,0x3b,0x72,0x8b,0x03,0x24,0x29,0x23,0x43,0x66,0x22,0x53,0x41,0x11,0x62,0x10}; ::memcpy(firstMessage, x, sizeof(x)); } ``` If you are using C++11, you can also initialize the firstMessage in the class member initialization list as ``` MessageParsingTest::MessageParsingTest() : firstMessage{0x24,0x54,0x3b,0x72,0x8b,0x03,0x24,0x29,0x23,0x43,0x66,0x22,0x53,0x41,0x11,0x62,0x10}, ... ```
28,677,012
the code: ``` import os from time import * import socket import time global diskspace ##################### #display temp #uses shell script to find out temp then uses python to display it #python uses os module to run line of shell script os.system("cat /sys/class/thermal/thermal_zone0/temp > sysTemp") temp = open("sysTemp") # Open a file str = temp.read(); # read characters in sysTemp temp.close() # close opened file t=eval(str) # convert string into number t2=t*2 # multiply by 2, evaluated number t3=(t/1000.00) # convert five figure temp (milli-degrees) to degrees to two decimal places print ("temp is:") temperature = int(t3) print(temperature) def temp(): if temperature > 60: print("The temp is over 60. Cool down") elif temperature < 40: print("temp is below 40") check() #find name ################## #check for internet connection ################### #Display disk space ################### def getDiskSpace(): p = os.popen("df -h /") i = 0 while 1: i = i +1 line = p.readline() if i==2: diskspace = (line.split()[4:5]) ds = diskspace[0] print("The disk space used is:") print(ds) global ds #Display CPU usage ################### def getCPUuse(): print(os.popen("top -n1 | awk '/Cpu\(s\):/ {print $2}'").readline().strip()) #Display IP ################### s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(("gmail.com",80)) IP = (s.getsockname()[0]) s.close() print("The Ip is:" + IP) getDiskSpace() ################### #writing it to a .txt file f = open("data.txt","w") #opens file with name of "test.txt" f.write("raspberry pi data.\n") f.write("ip:\n") f.write(IP + "\n") #f.write("Disk usage:" + str(ds)) f.write("temp: {0}".format(t3)) f.write("disk usage: {0}".format(ds)) f.close() temp() getCPUuse() print("...") time.sleep(10) ``` It is a program that monitors the temp, disk space, cpu usage and ip of the pi and writes it to a txt file The main problem is with this line ``` f.write("Disk usage:" + diskspace + "\n") ``` and it says that it is not defined, I have tried many things like creating it before the def as blank but then nothing gets written for diskspace on the text file. The other things write to the text file but not this one the output: temp is: 58 The Ip is:192.168.1.36 Traceback (most recent call last): File "temp.py", line 74, in f.write("Disk usage:" + diskspace) NameError: global name 'diskspace' is not defined if you remove the bit about the writing bit, {'40%'] usually gets printed for diskspace. ive added some changed code that prints the data, throws not errors but doesnt write it.
2015/02/23
[ "https://Stackoverflow.com/questions/28677012", "https://Stackoverflow.com", "https://Stackoverflow.com/users/3910964/" ]
Here is one way to do it. ``` void MessageParsingTest::setUp() { unsigned char x[] = {0x24,0x54,0x3b,0x72,0x8b,0x03,0x24,0x29,0x23,0x43,0x66,0x22,0x53,0x41,0x11,0x62,0x10}; ::memcpy(firstMessage, x, sizeof(x)); } ``` If you are using C++11, you can also initialize the firstMessage in the class member initialization list as ``` MessageParsingTest::MessageParsingTest() : firstMessage{0x24,0x54,0x3b,0x72,0x8b,0x03,0x24,0x29,0x23,0x43,0x66,0x22,0x53,0x41,0x11,0x62,0x10}, ... ```
You can use a temporary buffer and then copy into you member as this: ``` void MessageParsingTest::setUp() { unsigned char tmp[1500] = {0x24,0x54,0x3b,0x72,0x8b,0x03,0x24,0x29,0x23,0x43,0x66,0x22,0x53,0x41,0x11,0x62,0x10}; memcpy(firstMessage, tmp, 1500); } ```
26,746,127
I'm in an interactive Python 2.7 Terminal (Terminal default output is "utf-8"). I have a string from the internet, lets call it `a` ``` >>> a u'M\xfcssen' >>> a[1] u'\xfc' ``` I wonder why its value is not `ü` so I try ``` >>> print(a) Müssen >>> print(a[1]) ü ``` which works as intended. So my first question is, what does `print a` do, which is missing if i just type `a`? and out of curiosity: Why is it that I get another output for the following in the same python terminal session? ``` >>> "ü" '\xc3\xbc' >>> print "ü" Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/usr/lib/python2.7/codecs.py", line 351, in write data, consumed = self.encode(object, self.errors) UnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position 0: ordinal not in range(128) >>> print u"ü" ü ```
2014/11/04
[ "https://Stackoverflow.com/questions/26746127", "https://Stackoverflow.com", "https://Stackoverflow.com/users/620053/" ]
Can you try this ``` updateToServer: function(e) { e.preventDefault(); var id = e.target.getAttribute('data-id'); var file = this.collection.get(id); var data = {}; $(e.target).serializeArray().map(function(x) {data[x.name] = x.value;}); this.$el.modal('hide'); setTimeout(function(){ file.save(data); }, 200); //try with different values for timer } ``` I have added a 200 millisecond timer. This might not be your actual solution but at least you will come to know if there is some asynchronous stuff going on before 'file' is actually formed. Try different values for the timer. I mean keep increasing the timer and see if you are still not able to get rid of the error. Once you are sure that 'file' is formed asynchronously then you can look into why that's happening. And try `console.log`s instead of debuggers for debugging so that you can test without pausing the execution. Hope that helps.
This was not at all what I suspected, and I hadn't given enough information in the question without realizing it. The line in my code that triggered the exception was `file.save()`, but the actual exception was happening inside Backgrid. I provide a form to allow users to update models from the collection displayed in a grid. A particular column is defined as an integer column, but I hadn't converted the value coming from the form to an integer. As a result, Backgrid was trying to run `toFixed` on a string. I modified my form serialization code to convert strings containing only integers into integers. Now, everything works as expected. Here's that serialization code: ``` $(e.target).serializeArray().map(function(x) { data[x.name] = x.value === 'on' ? true : x.value; if (!isNaN(parseInt(data[x.name])) && isFinite(data[x.name])) { data[x.name] = parseInt(data[x.name]); } }); ``` If I had to guess, I'd say that's probably a bit naive, but it seems to be working well in my application. Thanks to everyone for the help!
26,504,852
On `python/flask/gunicorn/heroku` stack, I need to set an environment variable based on the content of another env variable. For background, I run a python/Flask app on heroku. I communicate with an addon via a environment variable that contains credentials and url. The library I use to communicate with the addon needs that data, but needs it in a different format. Also, it needs it as an environment variable. So far, I had cloned and reformatted the environment variable manually, but that just brought disaster because the add-on provider was changing passwords. OK, so I need to automate reading one environment variable and setting another, before the library starts looking for it. The naive approach I tried was (file `app.py`): ``` app = Flask(__name__, ...) env_in = os.environ['ADDON_ENV_VAR'] os.environ['LIB_ENV_VAR'] = some_processing(env_in) ... if __name__ == '__main__': app.run(host='0.0.0.0', port='5000') ``` That works fine when doing `python app.py` for debugging, but it fails when running via `gunicorn app:app -b '0.0.0.0:5000'` (as a `Procfile`for `foreman`) for deploying a real webserver. In the second case, the env var doesn't seem to make it to the OS level. I'm not sure about how wsgi works, but maybe the environment changes once gunicorn starts running the app. What can I do to have the environment variable set at the place it's needed?
2014/10/22
[ "https://Stackoverflow.com/questions/26504852", "https://Stackoverflow.com", "https://Stackoverflow.com/users/1827442/" ]
you could also set the enviroment variables at run time as such ``` gunicorn -b 0.0.0.0:5000 -e env_var1=enviroment1 -e env_var2=environment2 ```
OK, so the answer (via Kenneth R, Heroku) is to set the environment before running gunicorn. I.e. write a Procfile like ``` web: sh appstarter.sh ``` which calls a wrapper (shell, python, ..) that sets up the environment variable and then runs the gunicorn command, like for example appstarter.sh: ``` export LIB_ENV_VAR=${ADDON_ENV_VAR}/some/additional_string gunicorn app:app -b '0.0.0.0:5000' ``` Just in case it helps anyone else out there.
26,504,852
On `python/flask/gunicorn/heroku` stack, I need to set an environment variable based on the content of another env variable. For background, I run a python/Flask app on heroku. I communicate with an addon via a environment variable that contains credentials and url. The library I use to communicate with the addon needs that data, but needs it in a different format. Also, it needs it as an environment variable. So far, I had cloned and reformatted the environment variable manually, but that just brought disaster because the add-on provider was changing passwords. OK, so I need to automate reading one environment variable and setting another, before the library starts looking for it. The naive approach I tried was (file `app.py`): ``` app = Flask(__name__, ...) env_in = os.environ['ADDON_ENV_VAR'] os.environ['LIB_ENV_VAR'] = some_processing(env_in) ... if __name__ == '__main__': app.run(host='0.0.0.0', port='5000') ``` That works fine when doing `python app.py` for debugging, but it fails when running via `gunicorn app:app -b '0.0.0.0:5000'` (as a `Procfile`for `foreman`) for deploying a real webserver. In the second case, the env var doesn't seem to make it to the OS level. I'm not sure about how wsgi works, but maybe the environment changes once gunicorn starts running the app. What can I do to have the environment variable set at the place it's needed?
2014/10/22
[ "https://Stackoverflow.com/questions/26504852", "https://Stackoverflow.com", "https://Stackoverflow.com/users/1827442/" ]
OK, so the answer (via Kenneth R, Heroku) is to set the environment before running gunicorn. I.e. write a Procfile like ``` web: sh appstarter.sh ``` which calls a wrapper (shell, python, ..) that sets up the environment variable and then runs the gunicorn command, like for example appstarter.sh: ``` export LIB_ENV_VAR=${ADDON_ENV_VAR}/some/additional_string gunicorn app:app -b '0.0.0.0:5000' ``` Just in case it helps anyone else out there.
Set environment variable (key=value). Pass variables to the execution environment. Ex.: $ gunicorn -b 127.0.0.1:8000 --env FOO=1 test:app and test for the foo variable environment in your application. from: <http://docs.gunicorn.org/en/stable/settings.html>
26,504,852
On `python/flask/gunicorn/heroku` stack, I need to set an environment variable based on the content of another env variable. For background, I run a python/Flask app on heroku. I communicate with an addon via a environment variable that contains credentials and url. The library I use to communicate with the addon needs that data, but needs it in a different format. Also, it needs it as an environment variable. So far, I had cloned and reformatted the environment variable manually, but that just brought disaster because the add-on provider was changing passwords. OK, so I need to automate reading one environment variable and setting another, before the library starts looking for it. The naive approach I tried was (file `app.py`): ``` app = Flask(__name__, ...) env_in = os.environ['ADDON_ENV_VAR'] os.environ['LIB_ENV_VAR'] = some_processing(env_in) ... if __name__ == '__main__': app.run(host='0.0.0.0', port='5000') ``` That works fine when doing `python app.py` for debugging, but it fails when running via `gunicorn app:app -b '0.0.0.0:5000'` (as a `Procfile`for `foreman`) for deploying a real webserver. In the second case, the env var doesn't seem to make it to the OS level. I'm not sure about how wsgi works, but maybe the environment changes once gunicorn starts running the app. What can I do to have the environment variable set at the place it's needed?
2014/10/22
[ "https://Stackoverflow.com/questions/26504852", "https://Stackoverflow.com", "https://Stackoverflow.com/users/1827442/" ]
you could also set the enviroment variables at run time as such ``` gunicorn -b 0.0.0.0:5000 -e env_var1=enviroment1 -e env_var2=environment2 ```
Set environment variable (key=value). Pass variables to the execution environment. Ex.: $ gunicorn -b 127.0.0.1:8000 --env FOO=1 test:app and test for the foo variable environment in your application. from: <http://docs.gunicorn.org/en/stable/settings.html>
14,592,879
I cannot run any script by pressing F5 or selecting run from the menus in IDLE. It stopped working suddenly. No errors are coughed up. IDLE simply does nothing at all. Tried reinstalling python to no effect. Cannot run even the simplest script. Thank you for any help or suggestions you have. Running Python 2.6.5 on windows 7. Could not resolve the problem with idle. I have switched to using pyDev in Aptana Studio 3.
2013/01/29
[ "https://Stackoverflow.com/questions/14592879", "https://Stackoverflow.com", "https://Stackoverflow.com/users/2022926/" ]
I am using a Dell laptop, and ran into this issue. I found that if I pressed Function + F5, the program would run. On my laptop keyboard, functions key items are in blue (main functions in white). The Esc (escape) key has a blue lock with 'Fn' on it. I pressed Esc + F5, and it unlocked my function keys. I can now run a program in the editor by only pressing F5 Note: Running Python 3 - but I do not think this is an issue with Idle or Python - I think this is a keyboard issue.
Your function keys are locked,I think so. Function keys can be unlocked by fn key + esc. Then f5 will work without any issue.
6,080,930
I have a problem setting up a Virtualenv on my web host server (to install python modules later on) So far I tried this using SSH-access: ``` wget http://pypi.python.org/packages/source/v/virtualenv/virtualenv-1.5.2.tar.gz tar xzf virtualenv-1.5.2.tar.gz ~/usr/lib/python2.4 virtualenv-1.5.2/virtualenv.py ~/data/env ``` or ``` ~/usr/lib/python2.4 setup.py install ``` I don't get any errors or output and cannot find the ~/data/env-directory that should be created. What is going wrong? Or what is the next step? Thanks, Wienel
2011/05/21
[ "https://Stackoverflow.com/questions/6080930", "https://Stackoverflow.com", "https://Stackoverflow.com/users/763840/" ]
Same in Ruby. ``` require 'lib/yourlibrary.rb' ``` Or: ``` $LOAD_PATH << File.expand_path(File.dirname(FILE) + “/../lib”)) require 'yourlibrary.rb' ```
To include a gem in your project, you could download the module and place it in the same folder as your code and then do a 'require'. You can also download the module with Rubygems copy, or you can download the module from it's project page.
11,871,221
I have this piece of code which creates a new note..WHen I try to print I get the following error even though it prints the output ``` Error: C:\Python27\Basics\OOP\formytesting>python notebook.py Memo=This is my first memo, Tag=example Traceback (most recent call last): File "notebook.py", line 14, in <module> print(firstnote) TypeError: __str__ returned non-string (type NoneType) ``` note.py ``` import datetime class Note: def __init__(self, memo, tags): self.memo = memo self.tags = tags self.creation_date = datetime.date.today() def __str__(self): print('Memo={0}, Tag={1}').format(self.memo, self.tags) if __name__ == "__main__": firstnote = Note('This is my first memo','example') print(firstnote) ```
2012/08/08
[ "https://Stackoverflow.com/questions/11871221", "https://Stackoverflow.com", "https://Stackoverflow.com/users/1050619/" ]
In the Model's `__str__` method, you are returning a value which is `null`. For example: ```py class X(models.Model): name = models.CharField(_('Name'), null=True, blank=True, max_length=150) date_of_birth = models.DateField(_('Date of birth'), null=True, blank=True) street = models.CharField(_('Street'), max_length=150, blank=True) def __str__(self): return self.name # here the value of name field might be null, so the ``` error is showing. Correct `__str__` method will be: ```py def __str__(self): return str(self.name) ```
You probably have some null value in your table. Enter to mysql and delete null value in table.
11,871,221
I have this piece of code which creates a new note..WHen I try to print I get the following error even though it prints the output ``` Error: C:\Python27\Basics\OOP\formytesting>python notebook.py Memo=This is my first memo, Tag=example Traceback (most recent call last): File "notebook.py", line 14, in <module> print(firstnote) TypeError: __str__ returned non-string (type NoneType) ``` note.py ``` import datetime class Note: def __init__(self, memo, tags): self.memo = memo self.tags = tags self.creation_date = datetime.date.today() def __str__(self): print('Memo={0}, Tag={1}').format(self.memo, self.tags) if __name__ == "__main__": firstnote = Note('This is my first memo','example') print(firstnote) ```
2012/08/08
[ "https://Stackoverflow.com/questions/11871221", "https://Stackoverflow.com", "https://Stackoverflow.com/users/1050619/" ]
Method [\_\_str\_\_](http://docs.python.org/reference/datamodel.html#object.__str__) should return string, not print. ``` def __str__(self): return 'Memo={0}, Tag={1}'.format(self.memo, self.tags) ```
In the Model's `__str__` method, you are returning a value which is `null`. For example: ```py class X(models.Model): name = models.CharField(_('Name'), null=True, blank=True, max_length=150) date_of_birth = models.DateField(_('Date of birth'), null=True, blank=True) street = models.CharField(_('Street'), max_length=150, blank=True) def __str__(self): return self.name # here the value of name field might be null, so the ``` error is showing. Correct `__str__` method will be: ```py def __str__(self): return str(self.name) ```
11,871,221
I have this piece of code which creates a new note..WHen I try to print I get the following error even though it prints the output ``` Error: C:\Python27\Basics\OOP\formytesting>python notebook.py Memo=This is my first memo, Tag=example Traceback (most recent call last): File "notebook.py", line 14, in <module> print(firstnote) TypeError: __str__ returned non-string (type NoneType) ``` note.py ``` import datetime class Note: def __init__(self, memo, tags): self.memo = memo self.tags = tags self.creation_date = datetime.date.today() def __str__(self): print('Memo={0}, Tag={1}').format(self.memo, self.tags) if __name__ == "__main__": firstnote = Note('This is my first memo','example') print(firstnote) ```
2012/08/08
[ "https://Stackoverflow.com/questions/11871221", "https://Stackoverflow.com", "https://Stackoverflow.com/users/1050619/" ]
In the Model's `__str__` method, you are returning a value which is `null`. For example: ```py class X(models.Model): name = models.CharField(_('Name'), null=True, blank=True, max_length=150) date_of_birth = models.DateField(_('Date of birth'), null=True, blank=True) street = models.CharField(_('Street'), max_length=150, blank=True) def __str__(self): return self.name # here the value of name field might be null, so the ``` error is showing. Correct `__str__` method will be: ```py def __str__(self): return str(self.name) ```
Just Try this: ``` def __str__(self): return f'Memo={self.memo}, Tag={self.tags}' ```
11,871,221
I have this piece of code which creates a new note..WHen I try to print I get the following error even though it prints the output ``` Error: C:\Python27\Basics\OOP\formytesting>python notebook.py Memo=This is my first memo, Tag=example Traceback (most recent call last): File "notebook.py", line 14, in <module> print(firstnote) TypeError: __str__ returned non-string (type NoneType) ``` note.py ``` import datetime class Note: def __init__(self, memo, tags): self.memo = memo self.tags = tags self.creation_date = datetime.date.today() def __str__(self): print('Memo={0}, Tag={1}').format(self.memo, self.tags) if __name__ == "__main__": firstnote = Note('This is my first memo','example') print(firstnote) ```
2012/08/08
[ "https://Stackoverflow.com/questions/11871221", "https://Stackoverflow.com", "https://Stackoverflow.com/users/1050619/" ]
The problem that you are facing is : TypeError : **str** returned non-string (type NoneType) Here you have to understand the **str** function's working: the **str** fucntion,although is mostly used to print values but actually is designed to return a string,not to print one. In your class **str** function is calling the print directly while it is returning nothing ,that explains your error output.Since our formatted string is built, and since our function returns nothing, the None value is used. This was the explaination for your error You can solve this problem by using the return in **str** function like: \*simply returnig the string value instead of printing it ``` class Summary(models.Model): book = models.ForeignKey(Book,on_delete = models.CASCADE) summary = models.TextField(max_length=600) def __str__(self): return self.summary ``` but if the value you are returning in not of string type then you can do like this to return string value from your **str** function \*typeconverting the value to string that your **str** function returns ``` class Summary(models.Model): book = models.ForeignKey(Book,on_delete = models.CASCADE) summary = models.TextField(max_length=600) def __str__(self): return str(self.summary) ` ```
Just Try this: ``` def __str__(self): return f'Memo={self.memo}, Tag={self.tags}' ```
11,871,221
I have this piece of code which creates a new note..WHen I try to print I get the following error even though it prints the output ``` Error: C:\Python27\Basics\OOP\formytesting>python notebook.py Memo=This is my first memo, Tag=example Traceback (most recent call last): File "notebook.py", line 14, in <module> print(firstnote) TypeError: __str__ returned non-string (type NoneType) ``` note.py ``` import datetime class Note: def __init__(self, memo, tags): self.memo = memo self.tags = tags self.creation_date = datetime.date.today() def __str__(self): print('Memo={0}, Tag={1}').format(self.memo, self.tags) if __name__ == "__main__": firstnote = Note('This is my first memo','example') print(firstnote) ```
2012/08/08
[ "https://Stackoverflow.com/questions/11871221", "https://Stackoverflow.com", "https://Stackoverflow.com/users/1050619/" ]
Method [\_\_str\_\_](http://docs.python.org/reference/datamodel.html#object.__str__) should return string, not print. ``` def __str__(self): return 'Memo={0}, Tag={1}'.format(self.memo, self.tags) ```
The problem that you are facing is : TypeError : **str** returned non-string (type NoneType) Here you have to understand the **str** function's working: the **str** fucntion,although is mostly used to print values but actually is designed to return a string,not to print one. In your class **str** function is calling the print directly while it is returning nothing ,that explains your error output.Since our formatted string is built, and since our function returns nothing, the None value is used. This was the explaination for your error You can solve this problem by using the return in **str** function like: \*simply returnig the string value instead of printing it ``` class Summary(models.Model): book = models.ForeignKey(Book,on_delete = models.CASCADE) summary = models.TextField(max_length=600) def __str__(self): return self.summary ``` but if the value you are returning in not of string type then you can do like this to return string value from your **str** function \*typeconverting the value to string that your **str** function returns ``` class Summary(models.Model): book = models.ForeignKey(Book,on_delete = models.CASCADE) summary = models.TextField(max_length=600) def __str__(self): return str(self.summary) ` ```
11,871,221
I have this piece of code which creates a new note..WHen I try to print I get the following error even though it prints the output ``` Error: C:\Python27\Basics\OOP\formytesting>python notebook.py Memo=This is my first memo, Tag=example Traceback (most recent call last): File "notebook.py", line 14, in <module> print(firstnote) TypeError: __str__ returned non-string (type NoneType) ``` note.py ``` import datetime class Note: def __init__(self, memo, tags): self.memo = memo self.tags = tags self.creation_date = datetime.date.today() def __str__(self): print('Memo={0}, Tag={1}').format(self.memo, self.tags) if __name__ == "__main__": firstnote = Note('This is my first memo','example') print(firstnote) ```
2012/08/08
[ "https://Stackoverflow.com/questions/11871221", "https://Stackoverflow.com", "https://Stackoverflow.com/users/1050619/" ]
In the Model's `__str__` method, you are returning a value which is `null`. For example: ```py class X(models.Model): name = models.CharField(_('Name'), null=True, blank=True, max_length=150) date_of_birth = models.DateField(_('Date of birth'), null=True, blank=True) street = models.CharField(_('Street'), max_length=150, blank=True) def __str__(self): return self.name # here the value of name field might be null, so the ``` error is showing. Correct `__str__` method will be: ```py def __str__(self): return str(self.name) ```
I Had the same problem, in my case, was because i was returned a digit: ``` def __str__(self): return self.code ``` str is waiting for a str, not another. now work good with: ``` def __str__(self): return self.name ``` where name is a STRING.
11,871,221
I have this piece of code which creates a new note..WHen I try to print I get the following error even though it prints the output ``` Error: C:\Python27\Basics\OOP\formytesting>python notebook.py Memo=This is my first memo, Tag=example Traceback (most recent call last): File "notebook.py", line 14, in <module> print(firstnote) TypeError: __str__ returned non-string (type NoneType) ``` note.py ``` import datetime class Note: def __init__(self, memo, tags): self.memo = memo self.tags = tags self.creation_date = datetime.date.today() def __str__(self): print('Memo={0}, Tag={1}').format(self.memo, self.tags) if __name__ == "__main__": firstnote = Note('This is my first memo','example') print(firstnote) ```
2012/08/08
[ "https://Stackoverflow.com/questions/11871221", "https://Stackoverflow.com", "https://Stackoverflow.com/users/1050619/" ]
You can also surround the output with str(). I had this same problem because my model had the following (as a simplified example): ``` def __str__(self): return self.pressid ``` Where pressid was an IntegerField type object. Django (and python in general) expects a string for a **str** function, so returning an integer causes this error to be thrown. ``` def __str__(self): return str(self.pressid) ``` That solved the problems I was encountering on the Django management side of the house. Hope it helps with yours.
In the Model's `__str__` method, you are returning a value which is `null`. For example: ```py class X(models.Model): name = models.CharField(_('Name'), null=True, blank=True, max_length=150) date_of_birth = models.DateField(_('Date of birth'), null=True, blank=True) street = models.CharField(_('Street'), max_length=150, blank=True) def __str__(self): return self.name # here the value of name field might be null, so the ``` error is showing. Correct `__str__` method will be: ```py def __str__(self): return str(self.name) ```
11,871,221
I have this piece of code which creates a new note..WHen I try to print I get the following error even though it prints the output ``` Error: C:\Python27\Basics\OOP\formytesting>python notebook.py Memo=This is my first memo, Tag=example Traceback (most recent call last): File "notebook.py", line 14, in <module> print(firstnote) TypeError: __str__ returned non-string (type NoneType) ``` note.py ``` import datetime class Note: def __init__(self, memo, tags): self.memo = memo self.tags = tags self.creation_date = datetime.date.today() def __str__(self): print('Memo={0}, Tag={1}').format(self.memo, self.tags) if __name__ == "__main__": firstnote = Note('This is my first memo','example') print(firstnote) ```
2012/08/08
[ "https://Stackoverflow.com/questions/11871221", "https://Stackoverflow.com", "https://Stackoverflow.com/users/1050619/" ]
You can also surround the output with str(). I had this same problem because my model had the following (as a simplified example): ``` def __str__(self): return self.pressid ``` Where pressid was an IntegerField type object. Django (and python in general) expects a string for a **str** function, so returning an integer causes this error to be thrown. ``` def __str__(self): return str(self.pressid) ``` That solved the problems I was encountering on the Django management side of the house. Hope it helps with yours.
Just Try this: ``` def __str__(self): return f'Memo={self.memo}, Tag={self.tags}' ```
11,871,221
I have this piece of code which creates a new note..WHen I try to print I get the following error even though it prints the output ``` Error: C:\Python27\Basics\OOP\formytesting>python notebook.py Memo=This is my first memo, Tag=example Traceback (most recent call last): File "notebook.py", line 14, in <module> print(firstnote) TypeError: __str__ returned non-string (type NoneType) ``` note.py ``` import datetime class Note: def __init__(self, memo, tags): self.memo = memo self.tags = tags self.creation_date = datetime.date.today() def __str__(self): print('Memo={0}, Tag={1}').format(self.memo, self.tags) if __name__ == "__main__": firstnote = Note('This is my first memo','example') print(firstnote) ```
2012/08/08
[ "https://Stackoverflow.com/questions/11871221", "https://Stackoverflow.com", "https://Stackoverflow.com/users/1050619/" ]
Method [\_\_str\_\_](http://docs.python.org/reference/datamodel.html#object.__str__) should return string, not print. ``` def __str__(self): return 'Memo={0}, Tag={1}'.format(self.memo, self.tags) ```
You can also surround the output with str(). I had this same problem because my model had the following (as a simplified example): ``` def __str__(self): return self.pressid ``` Where pressid was an IntegerField type object. Django (and python in general) expects a string for a **str** function, so returning an integer causes this error to be thrown. ``` def __str__(self): return str(self.pressid) ``` That solved the problems I was encountering on the Django management side of the house. Hope it helps with yours.
11,871,221
I have this piece of code which creates a new note..WHen I try to print I get the following error even though it prints the output ``` Error: C:\Python27\Basics\OOP\formytesting>python notebook.py Memo=This is my first memo, Tag=example Traceback (most recent call last): File "notebook.py", line 14, in <module> print(firstnote) TypeError: __str__ returned non-string (type NoneType) ``` note.py ``` import datetime class Note: def __init__(self, memo, tags): self.memo = memo self.tags = tags self.creation_date = datetime.date.today() def __str__(self): print('Memo={0}, Tag={1}').format(self.memo, self.tags) if __name__ == "__main__": firstnote = Note('This is my first memo','example') print(firstnote) ```
2012/08/08
[ "https://Stackoverflow.com/questions/11871221", "https://Stackoverflow.com", "https://Stackoverflow.com/users/1050619/" ]
Method [\_\_str\_\_](http://docs.python.org/reference/datamodel.html#object.__str__) should return string, not print. ``` def __str__(self): return 'Memo={0}, Tag={1}'.format(self.memo, self.tags) ```
You probably have some null value in your table. Enter to mysql and delete null value in table.
48,375,937
I am new to python and web-scraping. I am trying to scrape a website (link is the url). I am getting an error as "'NoneType' object is not iterable", with the last line of below code. Could anyone point what could have gone wrong? ``` import requests from bs4 import BeautifulSoup from urllib.parse import urljoin url = 'https://labtestsonline.org/tests-index' soup = BeautifulSoup(requests.get(url).content, 'lxml') # Function to get hyper-links for all test components hyperlinks = [] def parseUrl(url): global hyperlinks page = requests.get(url).content soup = BeautifulSoup(page, 'lxml') for a in soup.findAll('div',{'class':'field-content'}): a = a.find('a') href = urlparse.urljoin(Url,a.get('href')) hyperlinks.append(href) parseUrl(url) # function to get header and common questions for each test component def header(url): page = requests.get(url).content soup = BeautifulSoup(page, 'lxml') h = [] commonquestions = [] for head in soup.find('div',{'class':'field-item'}).find('h1'): heading = head.get_text() h.append(heading) for q in soup.find('div',{'id':'Common_Questions'}): questions = q.get_text() commonquestions.append(questions) for i in range(0, len(hyperlinks)): header(hyperlinks[i]) ``` Below is the traceback error: ``` <ipython-input-50-d99e0af6db20> in <module>() 1 for i in range(0, len(hyperlinks)): 2 header(hyperlinks[i]) <ipython-input-49-15ac15f9071e> in header(url) 5 soup = BeautifulSoup(page, 'lxml') 6 h = [] for head in soup.find('div',{'class':'field-item'}).find('h1'): heading = head.get_text() h.append(heading) TypeError: 'NoneType' object is not iterable ```
2018/01/22
[ "https://Stackoverflow.com/questions/48375937", "https://Stackoverflow.com", "https://Stackoverflow.com/users/9238871/" ]
a bit late, but for any one else stumbling upon this. ``` const functions = require('firebase-functions'); const admin = require('firebase-admin'); admin.initializeApp(functions.config().firebase); exports.someMethod = functions.https.onRequest((req, res) => { var stuff = []; var db = admin.firestore(); db.collection("Users").doc("7vFjDJ63DmhcQiEHwl0M7hfL3Kt1").collection("blabla").get().then(snapshot => { snapshot.forEach(doc => { var newelement = { "id": doc.id, "xxxx": doc.data().xxx, "yyy": doc.data().yyy } stuff = stuff.concat(newelement); }); res.send(stuff) return ""; }).catch(reason => { res.send(reason) }) }); ```
Thanks to [Ruan's answer](https://stackoverflow.com/a/49516133/2162226), here's an example for `onCall(..)` variation: ``` exports.fireGetColors = functions.https.onCall((data, context) => { return new Promise((resolve, reject) => { var colors = {}; var db = admin.firestore(); db.collection('colors') .get() .then(snapshot => { snapshot.forEach(doc => { var key = doc.id; var color = doc.data(); color['key'] = key; colors[key] = color; }); var colorsStr = JSON.stringify(colors, null, '\t'); console.log('colors callback result : ' + colorsStr); resolve(colors); }) .catch(reason => { console.log('db.collection("colors").get gets err, reason: ' + reason); reject(reason); }); }); }); ```
48,375,937
I am new to python and web-scraping. I am trying to scrape a website (link is the url). I am getting an error as "'NoneType' object is not iterable", with the last line of below code. Could anyone point what could have gone wrong? ``` import requests from bs4 import BeautifulSoup from urllib.parse import urljoin url = 'https://labtestsonline.org/tests-index' soup = BeautifulSoup(requests.get(url).content, 'lxml') # Function to get hyper-links for all test components hyperlinks = [] def parseUrl(url): global hyperlinks page = requests.get(url).content soup = BeautifulSoup(page, 'lxml') for a in soup.findAll('div',{'class':'field-content'}): a = a.find('a') href = urlparse.urljoin(Url,a.get('href')) hyperlinks.append(href) parseUrl(url) # function to get header and common questions for each test component def header(url): page = requests.get(url).content soup = BeautifulSoup(page, 'lxml') h = [] commonquestions = [] for head in soup.find('div',{'class':'field-item'}).find('h1'): heading = head.get_text() h.append(heading) for q in soup.find('div',{'id':'Common_Questions'}): questions = q.get_text() commonquestions.append(questions) for i in range(0, len(hyperlinks)): header(hyperlinks[i]) ``` Below is the traceback error: ``` <ipython-input-50-d99e0af6db20> in <module>() 1 for i in range(0, len(hyperlinks)): 2 header(hyperlinks[i]) <ipython-input-49-15ac15f9071e> in header(url) 5 soup = BeautifulSoup(page, 'lxml') 6 h = [] for head in soup.find('div',{'class':'field-item'}).find('h1'): heading = head.get_text() h.append(heading) TypeError: 'NoneType' object is not iterable ```
2018/01/22
[ "https://Stackoverflow.com/questions/48375937", "https://Stackoverflow.com", "https://Stackoverflow.com/users/9238871/" ]
a bit late, but for any one else stumbling upon this. ``` const functions = require('firebase-functions'); const admin = require('firebase-admin'); admin.initializeApp(functions.config().firebase); exports.someMethod = functions.https.onRequest((req, res) => { var stuff = []; var db = admin.firestore(); db.collection("Users").doc("7vFjDJ63DmhcQiEHwl0M7hfL3Kt1").collection("blabla").get().then(snapshot => { snapshot.forEach(doc => { var newelement = { "id": doc.id, "xxxx": doc.data().xxx, "yyy": doc.data().yyy } stuff = stuff.concat(newelement); }); res.send(stuff) return ""; }).catch(reason => { res.send(reason) }) }); ```
In 2022, I am trying to do this thing in "Modular" way as what firebase has for version >= 9. Using typescript too as an addition :). Thanks to [Ruan](https://stackoverflow.com/users/1713519/ruan) for the inspiration. So, here is how I made it ( similar to the following ): ``` import * as functions from "firebase-functions"; import { getFirestore } from "firebase-admin/firestore"; import { initializeApp } from "firebase-admin/app"; initializeApp(functions.config().firebase); export const someMethod = functions.https.onRequest((req, res) => { let stuff: any[] = []; let db = getFirestore(); db.collection("Users").doc("7vFjDJ63DmhcQiEHwl0M7hfL3Kt1").collection("blabla").get().then(snapshot => { snapshot.forEach(doc => { var newelement = { "id": doc.id, "xxxx": doc.data().xxx, "yyy": doc.data().yyy } stuff = stuff.concat(newelement); }); res.send(stuff) return ""; }).catch(reason => { res.send(reason) }) }); ```
48,375,937
I am new to python and web-scraping. I am trying to scrape a website (link is the url). I am getting an error as "'NoneType' object is not iterable", with the last line of below code. Could anyone point what could have gone wrong? ``` import requests from bs4 import BeautifulSoup from urllib.parse import urljoin url = 'https://labtestsonline.org/tests-index' soup = BeautifulSoup(requests.get(url).content, 'lxml') # Function to get hyper-links for all test components hyperlinks = [] def parseUrl(url): global hyperlinks page = requests.get(url).content soup = BeautifulSoup(page, 'lxml') for a in soup.findAll('div',{'class':'field-content'}): a = a.find('a') href = urlparse.urljoin(Url,a.get('href')) hyperlinks.append(href) parseUrl(url) # function to get header and common questions for each test component def header(url): page = requests.get(url).content soup = BeautifulSoup(page, 'lxml') h = [] commonquestions = [] for head in soup.find('div',{'class':'field-item'}).find('h1'): heading = head.get_text() h.append(heading) for q in soup.find('div',{'id':'Common_Questions'}): questions = q.get_text() commonquestions.append(questions) for i in range(0, len(hyperlinks)): header(hyperlinks[i]) ``` Below is the traceback error: ``` <ipython-input-50-d99e0af6db20> in <module>() 1 for i in range(0, len(hyperlinks)): 2 header(hyperlinks[i]) <ipython-input-49-15ac15f9071e> in header(url) 5 soup = BeautifulSoup(page, 'lxml') 6 h = [] for head in soup.find('div',{'class':'field-item'}).find('h1'): heading = head.get_text() h.append(heading) TypeError: 'NoneType' object is not iterable ```
2018/01/22
[ "https://Stackoverflow.com/questions/48375937", "https://Stackoverflow.com", "https://Stackoverflow.com/users/9238871/" ]
Thanks to [Ruan's answer](https://stackoverflow.com/a/49516133/2162226), here's an example for `onCall(..)` variation: ``` exports.fireGetColors = functions.https.onCall((data, context) => { return new Promise((resolve, reject) => { var colors = {}; var db = admin.firestore(); db.collection('colors') .get() .then(snapshot => { snapshot.forEach(doc => { var key = doc.id; var color = doc.data(); color['key'] = key; colors[key] = color; }); var colorsStr = JSON.stringify(colors, null, '\t'); console.log('colors callback result : ' + colorsStr); resolve(colors); }) .catch(reason => { console.log('db.collection("colors").get gets err, reason: ' + reason); reject(reason); }); }); }); ```
In 2022, I am trying to do this thing in "Modular" way as what firebase has for version >= 9. Using typescript too as an addition :). Thanks to [Ruan](https://stackoverflow.com/users/1713519/ruan) for the inspiration. So, here is how I made it ( similar to the following ): ``` import * as functions from "firebase-functions"; import { getFirestore } from "firebase-admin/firestore"; import { initializeApp } from "firebase-admin/app"; initializeApp(functions.config().firebase); export const someMethod = functions.https.onRequest((req, res) => { let stuff: any[] = []; let db = getFirestore(); db.collection("Users").doc("7vFjDJ63DmhcQiEHwl0M7hfL3Kt1").collection("blabla").get().then(snapshot => { snapshot.forEach(doc => { var newelement = { "id": doc.id, "xxxx": doc.data().xxx, "yyy": doc.data().yyy } stuff = stuff.concat(newelement); }); res.send(stuff) return ""; }).catch(reason => { res.send(reason) }) }); ```
68,584,934
I want to append to a csv file, some data from redshift tables, using the `pandas` module in python. From python, I can successfully connect and retrieve rows from redshift tables using the `psycopg2` module. Now, I am storing datewise data on the csv. So I need to first create a new date column in the csv, then append the data retrieved in that new column. I am using the following commands to read from redshift tables: ``` conn=psycopg2.connect( host='my_db_hostname', port=`portnumber`, user='username', password='password', dbname='db') conn.autocommit = True cur=conn.cursor() cur.execute(""" select emp_name, emp_login_count from public.emp_login_detail where login_date=current_date """) records=cur.fetchall() cur.close() ``` Now, I want to append these emp\_name and emp\_login\_count columns to the existing csv. Below is a snapshot of csv: [![enter image description here](https://i.stack.imgur.com/QG6ln.png)](https://i.stack.imgur.com/QG6ln.png) Everyday I need to add new date column in csv and then I need to put the emp\_login\_count against respective person's name. I am new to Pandas and have no idea how to implement this. Can someone please help me out?
2021/07/30
[ "https://Stackoverflow.com/questions/68584934", "https://Stackoverflow.com", "https://Stackoverflow.com/users/15948815/" ]
I solved the problem with the following: ./tsconfig.json ``` { "compilerOptions": { "isolatedModules": true, ... }, "exclude": ["cypress/**/*"] } ``` ./cypress/tsconfig.json ``` { "extends": "../tsconfig.json", "compilerOptions": { "isolatedModules": false }, "include": [ "../node_modules/cypress" ] } ```
It seems to be a known issue of type conflicts between cypress and jest. Most accounts have detected that the problem started occurring from cypress v10.x onward. The *following links* corroborate the OP's own answer, suggesting the exclusion of `cypress.config.ts` from `tsconfig.json`. It may or may not be a workaround, however it has worked for me as well. **Link 1**: *<https://github.com/cypress-io/cypress/issues/22059>* **Link 2**: *<https://github.com/nrwl/nx/issues/863>* *It should be noted that the tests executed successfully despite the type clash, for me, despite not having yet excluded cypress from `tsconfig.json`.* If your own answer has solved your issue, then you can mark it as answered and close it.
68,584,934
I want to append to a csv file, some data from redshift tables, using the `pandas` module in python. From python, I can successfully connect and retrieve rows from redshift tables using the `psycopg2` module. Now, I am storing datewise data on the csv. So I need to first create a new date column in the csv, then append the data retrieved in that new column. I am using the following commands to read from redshift tables: ``` conn=psycopg2.connect( host='my_db_hostname', port=`portnumber`, user='username', password='password', dbname='db') conn.autocommit = True cur=conn.cursor() cur.execute(""" select emp_name, emp_login_count from public.emp_login_detail where login_date=current_date """) records=cur.fetchall() cur.close() ``` Now, I want to append these emp\_name and emp\_login\_count columns to the existing csv. Below is a snapshot of csv: [![enter image description here](https://i.stack.imgur.com/QG6ln.png)](https://i.stack.imgur.com/QG6ln.png) Everyday I need to add new date column in csv and then I need to put the emp\_login\_count against respective person's name. I am new to Pandas and have no idea how to implement this. Can someone please help me out?
2021/07/30
[ "https://Stackoverflow.com/questions/68584934", "https://Stackoverflow.com", "https://Stackoverflow.com/users/15948815/" ]
I solved the problem with the following: ./tsconfig.json ``` { "compilerOptions": { "isolatedModules": true, ... }, "exclude": ["cypress/**/*"] } ``` ./cypress/tsconfig.json ``` { "extends": "../tsconfig.json", "compilerOptions": { "isolatedModules": false }, "include": [ "../node_modules/cypress" ] } ```
In my case, it was enough to exclude solely the Cypress configuration file name in the project's `tsconfig.json`. Didn't need "isolatedModules" flag. Example: ``` "exclude": [ ... "cypress.config.ts", ... ] ``` Worked with webpack, jest, react-testing-library with extensions and Cypress stack. Note: if your Webpack uses a different `tsconfig.json` make sure you exclude it there too. Otherwise, you will not see any error in IDE but you will see them whilst making a build.
67,662,674
My Input JSON data: ``` { "data": [ { "config": "current", "id": "0" }, { "config": "current", "id": "1" }, { "config": "current", "id": "2" }, { "config": "current", "id": "3" }, { "config": "previous", "id": "4", }, { "config": "previous", "id": "5" }, { "config": "current", "id": "6" } ] } ``` I want to form a dictionary of lists out of above input data based on common key/value pair: ``` { "current": ["0", "1", "2", "3", "6"], "previous": ["4", "5"] } ``` How can this be achieved using python?
2021/05/23
[ "https://Stackoverflow.com/questions/67662674", "https://Stackoverflow.com", "https://Stackoverflow.com/users/11882985/" ]
Assuming you already know [how to parse JSON](https://stackoverflow.com/q/7771011/4518341), you can do this: ``` d = { "data": [ {"config": "current", "id": "0"}, {"config": "current", "id": "1"}, {"config": "current", "id": "2"}, {"config": "current", "id": "3"}, {"config": "previous", "id": "4"}, {"config": "previous", "id": "5"}, {"config": "current", "id": "6"}] } result = {} for d0 in d['data']: ids = result.setdefault(d0['config'], []) ids.append(d0['id']) print(result) # -> {'current': ['0', '1', '2', '3', '6'], 'previous': ['4', '5']} ``` * [`dict.setdefault()`](https://stackoverflow.com/q/7771011/4518341) is used to get the id list if it exists, or if not, set it to a default, which is an empty list here. It's functionally the same as this: ``` config = d0['config'] if config not in result: result[config] = [] result[config].append(d0['id']) ``` You could also use [`collections.defaultdict(list)`](https://docs.python.org/3/library/collections.html#collections.defaultdict) to do the same thing even more easily. *[This explanation is taken from [my answer here](https://stackoverflow.com/a/65587884/4518341).]*
``` jsn = { "data": [ {"config": "current", "id": "0"}, {"config": "current", "id": "1"}, {"config": "current", "id": "2"}, {"config": "current", "id": "3"}, {"config": "previous", "id": "4",}, {"config": "previous", "id": "5"}, {"config": "current", "id": "6"} ] } current = [x["id"] for x in jsn["data"] if x["config"] == "current"] previous = [x["id"] for x in jsn["data"] if x["config"] == "previous"] res = {"current": current, "previous": previous} print(res) # {'current': ['0', '1', '2', '3', '6'], 'previous': ['4', '5']} ``` Or the same algorigthm with a function: ``` def get_ids(json_string, key): return [x["id"] for x in json_string["data"] if x["config"] == key] res = { "current": get_ids(jsn, "current"), "previous": get_ids(jsn, "previous") } print(res) # {'current': ['0', '1', '2', '3', '6'], 'previous': ['4', '5']} ```
51,937,449
For my Coursework which I am desperately struggling with I have tried to set my inputs into a dictionary and then use this to format and print the string so that it is displayed as shown below. > > > ``` > Surname, Forename Payroll Department Salary > > ``` > > The name should be displayed using the format shown above so you will need to create a string containing the name in this format and print it in a fixed-width field. You may assume that no name will contain more than 30 characters when displayed in this format, no department will contain more than 15 characters, payroll numbers will contain at most 5 digits and all salaries will be integers less than 100,000. > > > So far I have only managed this as every time I seem to alter any of line 9it comes back with an error saying that "tuple indices must be integers or slices, not strings" but I have no idea how to do this. ``` payroll = int(input("Enter your Payroll.")) department = input("Enter your Department Name.") salary = int(input("Enter your Salary.")) forename = input("Enter your Forename.") surname = input("Enter your Surname.") list_lect = payroll, department, salary, forename, surname str = '{0[4]},{0[3]}{0[0:5]} {0[2]} {0[3]}'.format(list_lect) print(str) ``` Any help would be much appreciated from someone struggling with python.
2018/08/20
[ "https://Stackoverflow.com/questions/51937449", "https://Stackoverflow.com", "https://Stackoverflow.com/users/10251613/" ]
While it would be simple to make a print function to print the way you want: ``` a = ('Surname', 'Forename', 'Payroll', 'Department', 'Salary') def printer(tup): print_string = str("(") pad = 24 print_string += ", ".join(tup[:2]).ljust(pad) print_string += ", ".join(tup[2:4]).ljust(pad) print_string += tup[-1] + ")" print(print_string) >>> printer(a) (Surname, Forename Payroll, Department Salary) ``` I would suggest that it would be cleaner to handle this a different way. Perhaps might I recommend taking in the values separately and then combining them in a named way. Like this ``` payroll = input("Enter your Payroll.") department = input("Enter your Department Name.") salary = input("Enter your Salary.") forename = input("Enter your Forename.") surname = input("Enter your Surname.") ``` You can then perform which ever grouping you want and print them in a more sane manner ``` print("%s, %s %s, %s %s" % (surename, forename, .....etc) ``` and then you can store them in a data structure that makes sense as well
Why do you need this? Printing a tuple with spacing is impossible to my knowledge, but I'm sure theres another way to achieve what you're looking for. Aside from that, there is a kind of work around, although you aren't printing a tuple, to say. ``` indexs = { payroll = 0, dept = 1, salary = 2, name = 3, surname = 4 } str = "('{surname}', '{name}' '{payroll}', '{dept}' '{salary}')".format(surname = z[indexs[surname]], name = z[indexs[name]], payroll = z[indexs[payroll]], dept = z[indexs[dept]], salary = z[indexs[salary]]) print(str) ``` Its not perfect, as its just string formatting, but if you want your output to look exactly as you said, this is the only way. Of course your aim might be very different. Anyway, hope this helps
67,967,272
I am trying to program a calculator using python. It does not let me run the code because this error tells that: ValueError: could not convert string to float: '' This code was working but suddenly this error showed up. Could anyone help me with telling what I should change or add. This is the part of the code where the error occurs. ``` def operation(self, op): self.current = float(self.current) if self.check_sum: self.valid_function() elif not self.result: self.total = self.current self.input_value = True self.check_sum = True self.op = op self.result = False ```
2021/06/14
[ "https://Stackoverflow.com/questions/67967272", "https://Stackoverflow.com", "https://Stackoverflow.com/users/15274509/" ]
As per Lombok documentation (<https://projectlombok.org/api/lombok/AllArgsConstructor.html>): > > An all-args constructor requires one argument for every field in the class. > > > Obviously you haven't provided id as a constructor argument.
If you still need some constructor having not all attributes you can use `lombok.NonNull` & `@RequiredArgsConstrutor`. Simplified example: ``` @AllArgsConstructor @NoArgsConstructor @RequiredArgsConstructor public class Booking { private Long id; @lombok.NonNull private Date startDate; } ``` will provide you with: ``` public Booking() ... public Booking(Date startDate) ... public Booking(Long id, Date startDate) ... ```
63,245,187
I am just starting to get the concept of what [Prometheus](https://prometheus.io/docs/prometheus/latest/getting_started/) is, and I have done a couple of examples already. I can understand how Prometheus monitors some data, even the one generated by itself and also some data related to a python application for example. My question is more simple though. If I have a text file of data already generated (for example some metric of something), is there a way for this data be fed to Prometheus so that I can generate queries or visualize the data?
2020/08/04
[ "https://Stackoverflow.com/questions/63245187", "https://Stackoverflow.com", "https://Stackoverflow.com/users/4451521/" ]
Short answer: No. If you actually have text files with data you want to analyze I'd suggest you to write the data to another TMDB (InfluxDB for example) or a plain old SQL database and then connect it with Grafana. Also take a look at PowerBI. I prefer it for data that is scoped more towards business analytics than monitoring. Long answer: There is a unpublished HTTP API that allows you to push metrics in the JSON format. See the following two issues: <https://github.com/kube-reporting/metering-operator/issues/640> <https://github.com/kube-reporting/metering-operator/issues/656>
While it is impossible to import historical data to Prometheus, such data can be imported to Prometheus-like systems such as VictoriaMetrics. See [these docs](https://victoriametrics.github.io/#how-to-import-time-series-data) for details.
69,647,562
Why does initializing the array `arr` work when it is done as a list comprehension (I think that is what the following example is --not sure), but not when each array location is initialized individually? For example, this works: (a) ``` arr=[] arr=[0 for i in range(5)] ``` but (b), ``` arr=[] arr[0]=0 arr[1]=0 ``` etc, doesn't. Isn't the `arr=[0 for i in range(5)]` instruction essentially doing what is done in (b) above in one fell swoop? I realize that array sizes need to be predefined (or allocated). So, I can understand something like `arr= [0]*5` or using numpy, `arr = np.empty(10, dtype=object)` work. However, I don't see how (a) preallocates the array dimension "ahead of time". How does python interpret (a) vs. (b) above?
2021/10/20
[ "https://Stackoverflow.com/questions/69647562", "https://Stackoverflow.com", "https://Stackoverflow.com/users/7242713/" ]
Firstly, there is no point in declaring a variable if you rebind it later anyway: ``` arr = [] # <-- this line is entirely pointless arr = [0 for i in range(5)] ``` Secondly, the two expressions ``` [0 for i in range(5)] [0] * 5 ``` **create** a new `list` object, whereas ``` arr[0] = 0 ``` mutates an existing one, namely it wants to **re**assign the first element of `arr`. Since this doesn't exist, you will see an error. You could do instead: ``` arr = [] arr.append(0) arr.append(0) ``` to fill an initially empty `list` incrementally. Note that a Python list is not an `Array` in, let's say, the Java sense that it has a predefined size. It is more like an `ArrayList`.
It doesn't pre-allocate. It's basically just appending in a loop, just in nice form (syntactic sugar). Why it doesn't pre-allocate? Because to pre-allocate, we would need to know the length of the iterable, which may be a generator and it would use it up. And also, comprehension can have an if clause, limiting what eventually gets into the list. (See also generator comprehensions, which create generators - no pre-allocation because it's lazily evaluated) --- Let's take a look at documentation: <https://docs.python.org/3/tutorial/datastructures.html#list-comprehensions> > > A list comprehension consists of brackets containing an expression followed by a for clause, then zero or more for or if clauses. The result will be a new list resulting from evaluating the expression in the context of the for and if clauses which follow it. For example, this listcomp combines the elements of two lists if they are not equal: > > > ``` >>> [(x, y) for x in [1,2,3] for y in [3,1,4] if x != y] [(1, 3), (1, 4), (2, 3), (2, 1), (2, 4), (3, 1), (3, 4)] ``` > > and it’s equivalent to: > > > ``` >>> combs = [] >>> for x in [1,2,3]: ... for y in [3,1,4]: ... if x != y: ... combs.append((x, y)) ... >>> combs [(1, 3), (1, 4), (2, 3), (2, 1), (2, 4), (3, 1), (3, 4)] ``` See? Equivalent to append, not to pre-allocated (n\*[0]) list.
69,647,562
Why does initializing the array `arr` work when it is done as a list comprehension (I think that is what the following example is --not sure), but not when each array location is initialized individually? For example, this works: (a) ``` arr=[] arr=[0 for i in range(5)] ``` but (b), ``` arr=[] arr[0]=0 arr[1]=0 ``` etc, doesn't. Isn't the `arr=[0 for i in range(5)]` instruction essentially doing what is done in (b) above in one fell swoop? I realize that array sizes need to be predefined (or allocated). So, I can understand something like `arr= [0]*5` or using numpy, `arr = np.empty(10, dtype=object)` work. However, I don't see how (a) preallocates the array dimension "ahead of time". How does python interpret (a) vs. (b) above?
2021/10/20
[ "https://Stackoverflow.com/questions/69647562", "https://Stackoverflow.com", "https://Stackoverflow.com/users/7242713/" ]
Firstly, there is no point in declaring a variable if you rebind it later anyway: ``` arr = [] # <-- this line is entirely pointless arr = [0 for i in range(5)] ``` Secondly, the two expressions ``` [0 for i in range(5)] [0] * 5 ``` **create** a new `list` object, whereas ``` arr[0] = 0 ``` mutates an existing one, namely it wants to **re**assign the first element of `arr`. Since this doesn't exist, you will see an error. You could do instead: ``` arr = [] arr.append(0) arr.append(0) ``` to fill an initially empty `list` incrementally. Note that a Python list is not an `Array` in, let's say, the Java sense that it has a predefined size. It is more like an `ArrayList`.
> > I don't see how (a) preallocates the array dimension "ahead of time". > > > It doesn't. This: ``` arr=[] arr=[0 for i in range(5)] ``` creates an empty array (I think it's more accurately called a *list* but I'm not a strong Python person) and stores it in `arr`, then creates an entirely new unrelated array and puts the new one in `arr`, throwing the old array away. It doesn't initialize the array you created with `arr=[]`. You can remove the first line (`arr=[]`) entirely; it doesn't do anything useful. You can see that they're different arrays like this: ``` # Create a blank array, store it in `a` a = [] # Store that same array in `b` b = a # Show that they're the same array print(a == b); # Create a new array and put it in `a` a = [0 for i in range(5)] # Show that they aren't the same array print(a == b); ``` The output is ``` True False ``` So just use `arr=[0 for i in range(5)]` or, if you want to do it separately, use `append`: ``` a = [] a.append(0) a.append(0) print(a) ``` which outputs `[0, 0]`.
15,213,428
Recently I was going through the "Using Python App Engine with Google Cloud SQL" tutorial on Google Developers Academy website. However, I stumbled upon on the first part of the exercise "Building an application with a local MySQL instance". I could not connect the sample code (main.py) to my local MySQL instance. Wonder if anyone has found a solution to this problem. It would be great if you could share with me how you set up your MySQL, configure it so the GAE's sandbox would be able to access the MySQL-python connecter.
2013/03/04
[ "https://Stackoverflow.com/questions/15213428", "https://Stackoverflow.com", "https://Stackoverflow.com/users/2130139/" ]
The answer is to use `add_action('edit_link','save_data')` and `add_option('name_of_option')` instead of `add_post_meta` view full results here [MetaBox Links](https://gist.github.com/davidchase/df9adeb1e03b88691899)
After some experiments, I figured out how to save data from custom metabox in link manager into db as post meta key/value (wp\_postmeta). If someone needs, here is a working example: ``` action( 'add_meta_boxes', 'add_link_date' ); function add_link_date() { add_meta_box( 'link-date-meta-box', 'Link Date', 'link_date', 'link', 'normal', 'high' ); } function link_date( $link ) { $values = get_post_custom( $link->link_id ); $date = isset( $values['link_date'] ) ? esc_attr( $values['link_date'][0] ) : ''; wp_nonce_field( plugin_basename( __FILE__ ), 'link_date_nonce' ); ?> <p> <label for="link_date_text">Link Date</label> <input type="text" name="link_date_text" id="link_date_text" value="<?php echo $date; ?>" /> </p> <?php } add_action( 'edit_link', 'myplugin_save_postdata' ); function myplugin_save_postdata( ) { if ( defined( 'DOING_AUTOSAVE' ) && DOING_AUTOSAVE ) return; if ( !isset( $_POST['link_date_nonce'] ) || !wp_verify_nonce( $_POST['link_date_nonce'], plugin_basename( __FILE__ ) ) ) return; $link_id = $_POST['link_id']; $linkDate = sanitize_text_field( $_POST['link_date_text'] ); add_post_meta( $link_id, 'link_date', $linkDate, true ) or update_post_meta( $link_id, 'link_date', $linkDate ); } ```
50,876,292
Given its link, I'd like to capture an online video (say from YouTube) for further processing **without downloading it on the disk**. What I mean by this is that I'd like to load it directly to memory whenever possible. According to these links: <http://answers.opencv.org/question/24012/reading-video-stream-from-ip-camera-in-opencv-java/#24013> <http://answers.opencv.org/question/24154/how-to-using-opencv-api-get-web-video-stream/#24156> <http://answers.opencv.org/question/133/how-do-i-access-an-ip-camera/> <https://pypi.org/project/pafy/> it should be doable. My attempt looks like this: ``` import cv2 import pafy vid = pafy.new("https://www.youtube.com/watch?v=QuELiw8tbx8") vid_cap = cv2.VideoCapture() vid_cap.open(vid.getbest(preftype="webm").url) ``` However it fails with an error ``` (python:12925): GLib-GObject-CRITICAL **: 14:48:56.168: g_object_set: assertion 'G_IS_OBJECT (object)' failed False ``` How can I achieve my goal using python?
2018/06/15
[ "https://Stackoverflow.com/questions/50876292", "https://Stackoverflow.com", "https://Stackoverflow.com/users/4671908/" ]
You can achieve this by using `youtube-dl` and `ffmpeg`: * Install the latest version of [`youtube-dl`](https://rg3.github.io/youtube-dl/download.html). * Then do `sudo pip install --upgrade youtube_dl` * Build `ffmpeg` with HTTPS support. You can do this by [turning on the `--enable-gnutls` option](https://askubuntu.com/a/650617/486771). Once the installations are complete, it's time to test the `youtube-dl` in terminal. We'll be using [this youtube video](https://www.youtube.com/watch?v=HECa3bAFAYk) for testing. First we get the list of formats available for this video: ``` youtube-dl --list-formats https://www.youtube.com/watch?v=HECa3bAFAYk ``` Select a `format code` of your choice. I want the **144p** resolution so I select **160**. [![image](https://i.stack.imgur.com/VausY.png)](https://i.stack.imgur.com/VausY.png) Next we get the video url for our format of choice by: ``` youtube-dl --format 160 --get-url https://www.youtube.com/watch?v=HECa3bAFAYk ``` > > <https://r3---sn-4g5e6nz7.googlevideo.com/videoplayback?clen=184077&aitags=133%2C134%2C160%2C242%2C243%2C278&fvip=3&requiressl=yes&signature=5D21FFD906226C7680B26ACEF996B78B6A31F7C9.31B1115DB13F096AA5968DB2838E22A0D6A2EDCB&source=youtube&mn=sn-4g5e6nz7%2Csn-h0jeen7y&xtags=tx%3D9486108&itag=160&mime=video%2Fmp4&mt=1529091799&ms=au%2Conr&ei=XxckW-73GNCogQfqrryQAg&expire=1529113535&mm=31%2C26&c=WEB&keepalive=yes&id=o-AJExEG49WtIUkrF7OikaaGBCfKntDl75xCoO5_9cL-eP&ip=95.91.202.147&sparams=aitags%2Cclen%2Cdur%2Cei%2Cgir%2Cid%2Cinitcwndbps%2Cip%2Cipbits%2Citag%2Ckeepalive%2Clmt%2Cmime%2Cmm%2Cmn%2Cms%2Cmv%2Cpl%2Crequiressl%2Csource%2Cxtags%2Cexpire&key=yt6&lmt=1526699176943888&dur=25.375&pl=22&gir=yes&mv=m&initcwndbps=1155000&ipbits=0&ratebypass=yes> > > > Finally we can play this video url in either `ffplay` or `vlc`. But instead of copying and pasting, we can do this in one command: ``` ffplay -i $(youtube-dl --format 160 --get-url https://www.youtube.com/watch?v=HECa3bAFAYk) ``` Now that we have confirmed that `youtube-dl` and `ffmpeg` works, we can write a Python script to process the frames in OpenCV. See [this link](https://github.com/rg3/youtube-dl/blob/master/README.md#embedding-youtube-dl) for more Python options. ``` import cv2 import numpy as np import youtube_dl if __name__ == '__main__': video_url = 'https://www.youtube.com/watch?v=HECa3bAFAYkq' ydl_opts = {} # create youtube-dl object ydl = youtube_dl.YoutubeDL(ydl_opts) # set video url, extract video information info_dict = ydl.extract_info(video_url, download=False) # get video formats available formats = info_dict.get('formats',None) for f in formats: # I want the lowest resolution, so I set resolution as 144p if f.get('format_note',None) == '144p': #get the video url url = f.get('url',None) # open url with opencv cap = cv2.VideoCapture(url) # check if url was opened if not cap.isOpened(): print('video not opened') exit(-1) while True: # read frame ret, frame = cap.read() # check if frame is empty if not ret: break # display frame cv2.imshow('frame', frame) if cv2.waitKey(30)&0xFF == ord('q'): break # release VideoCapture cap.release() cv2.destroyAllWindows() ```
Using pafy you can have a more elegant solution: ``` import cv2 import pafy url = "https://www.youtube.com/watch?v=NKpuX_yzdYs" video = pafy.new(url) best = video.getbest(preftype="mp4") capture = cv2.VideoCapture() capture.open(best.url) success,image = capture.read() while success: cv2.imshow('frame', image) if cv2.waitKey(1) & 0xFF == ord('q'): break success,image = capture.read() cv2.destroyAllWindows() capture.release() ```
50,876,292
Given its link, I'd like to capture an online video (say from YouTube) for further processing **without downloading it on the disk**. What I mean by this is that I'd like to load it directly to memory whenever possible. According to these links: <http://answers.opencv.org/question/24012/reading-video-stream-from-ip-camera-in-opencv-java/#24013> <http://answers.opencv.org/question/24154/how-to-using-opencv-api-get-web-video-stream/#24156> <http://answers.opencv.org/question/133/how-do-i-access-an-ip-camera/> <https://pypi.org/project/pafy/> it should be doable. My attempt looks like this: ``` import cv2 import pafy vid = pafy.new("https://www.youtube.com/watch?v=QuELiw8tbx8") vid_cap = cv2.VideoCapture() vid_cap.open(vid.getbest(preftype="webm").url) ``` However it fails with an error ``` (python:12925): GLib-GObject-CRITICAL **: 14:48:56.168: g_object_set: assertion 'G_IS_OBJECT (object)' failed False ``` How can I achieve my goal using python?
2018/06/15
[ "https://Stackoverflow.com/questions/50876292", "https://Stackoverflow.com", "https://Stackoverflow.com/users/4671908/" ]
You can achieve this by using `youtube-dl` and `ffmpeg`: * Install the latest version of [`youtube-dl`](https://rg3.github.io/youtube-dl/download.html). * Then do `sudo pip install --upgrade youtube_dl` * Build `ffmpeg` with HTTPS support. You can do this by [turning on the `--enable-gnutls` option](https://askubuntu.com/a/650617/486771). Once the installations are complete, it's time to test the `youtube-dl` in terminal. We'll be using [this youtube video](https://www.youtube.com/watch?v=HECa3bAFAYk) for testing. First we get the list of formats available for this video: ``` youtube-dl --list-formats https://www.youtube.com/watch?v=HECa3bAFAYk ``` Select a `format code` of your choice. I want the **144p** resolution so I select **160**. [![image](https://i.stack.imgur.com/VausY.png)](https://i.stack.imgur.com/VausY.png) Next we get the video url for our format of choice by: ``` youtube-dl --format 160 --get-url https://www.youtube.com/watch?v=HECa3bAFAYk ``` > > <https://r3---sn-4g5e6nz7.googlevideo.com/videoplayback?clen=184077&aitags=133%2C134%2C160%2C242%2C243%2C278&fvip=3&requiressl=yes&signature=5D21FFD906226C7680B26ACEF996B78B6A31F7C9.31B1115DB13F096AA5968DB2838E22A0D6A2EDCB&source=youtube&mn=sn-4g5e6nz7%2Csn-h0jeen7y&xtags=tx%3D9486108&itag=160&mime=video%2Fmp4&mt=1529091799&ms=au%2Conr&ei=XxckW-73GNCogQfqrryQAg&expire=1529113535&mm=31%2C26&c=WEB&keepalive=yes&id=o-AJExEG49WtIUkrF7OikaaGBCfKntDl75xCoO5_9cL-eP&ip=95.91.202.147&sparams=aitags%2Cclen%2Cdur%2Cei%2Cgir%2Cid%2Cinitcwndbps%2Cip%2Cipbits%2Citag%2Ckeepalive%2Clmt%2Cmime%2Cmm%2Cmn%2Cms%2Cmv%2Cpl%2Crequiressl%2Csource%2Cxtags%2Cexpire&key=yt6&lmt=1526699176943888&dur=25.375&pl=22&gir=yes&mv=m&initcwndbps=1155000&ipbits=0&ratebypass=yes> > > > Finally we can play this video url in either `ffplay` or `vlc`. But instead of copying and pasting, we can do this in one command: ``` ffplay -i $(youtube-dl --format 160 --get-url https://www.youtube.com/watch?v=HECa3bAFAYk) ``` Now that we have confirmed that `youtube-dl` and `ffmpeg` works, we can write a Python script to process the frames in OpenCV. See [this link](https://github.com/rg3/youtube-dl/blob/master/README.md#embedding-youtube-dl) for more Python options. ``` import cv2 import numpy as np import youtube_dl if __name__ == '__main__': video_url = 'https://www.youtube.com/watch?v=HECa3bAFAYkq' ydl_opts = {} # create youtube-dl object ydl = youtube_dl.YoutubeDL(ydl_opts) # set video url, extract video information info_dict = ydl.extract_info(video_url, download=False) # get video formats available formats = info_dict.get('formats',None) for f in formats: # I want the lowest resolution, so I set resolution as 144p if f.get('format_note',None) == '144p': #get the video url url = f.get('url',None) # open url with opencv cap = cv2.VideoCapture(url) # check if url was opened if not cap.isOpened(): print('video not opened') exit(-1) while True: # read frame ret, frame = cap.read() # check if frame is empty if not ret: break # display frame cv2.imshow('frame', frame) if cv2.waitKey(30)&0xFF == ord('q'): break # release VideoCapture cap.release() cv2.destroyAllWindows() ```
First of all Update [`youtube-dl`](https://rg3.github.io/youtube-dl/download.html) using the command `pip install -U youtube-dl` Then use my [`VidGear`](https://github.com/abhiTronix/vidgear) Python Library, then automates the pipelining of YouTube Video using its URL address only. Here's a complete python example: ### For VidGear `v0.1.9` below: ```py # import libraries from vidgear.gears import CamGear import cv2 stream = CamGear(source='https://youtu.be/dQw4w9WgXcQ', y_tube = True, logging=True).start() # YouTube Video URL as input # infinite loop while True: frame = stream.read() # read frames # check if frame is None if frame is None: #if True break the infinite loop break # do something with frame here cv2.imshow("Output Frame", frame) # Show output window key = cv2.waitKey(1) & 0xFF # check for 'q' key-press if key == ord("q"): #if 'q' key-pressed break out break cv2.destroyAllWindows() # close output window # safely close video stream. stream.stop() ``` ### For VidGear `v0.2.0` and above: *(`y_tube` changed to `stream_mode`)* ```py # import libraries from vidgear.gears import CamGear import cv2 stream = CamGear(source='https://youtu.be/dQw4w9WgXcQ', stream_mode = True, logging=True).start() # YouTube Video URL as input # infinite loop while True: frame = stream.read() # read frames # check if frame is None if frame is None: #if True break the infinite loop break # do something with frame here cv2.imshow("Output Frame", frame) # Show output window key = cv2.waitKey(1) & 0xFF # check for 'q' key-press if key == ord("q"): #if 'q' key-pressed break out break cv2.destroyAllWindows() # close output window # safely close video stream. stream.stop() ``` **[Code Source](https://abhitronix.github.io/vidgear/latest/gears/camgear/usage/#using-camgear-with-youtube-videos)** If still get some error, raise an [issue here](https://github.com/abhiTronix/vidgear/issues) in its GitHub repo.
50,876,292
Given its link, I'd like to capture an online video (say from YouTube) for further processing **without downloading it on the disk**. What I mean by this is that I'd like to load it directly to memory whenever possible. According to these links: <http://answers.opencv.org/question/24012/reading-video-stream-from-ip-camera-in-opencv-java/#24013> <http://answers.opencv.org/question/24154/how-to-using-opencv-api-get-web-video-stream/#24156> <http://answers.opencv.org/question/133/how-do-i-access-an-ip-camera/> <https://pypi.org/project/pafy/> it should be doable. My attempt looks like this: ``` import cv2 import pafy vid = pafy.new("https://www.youtube.com/watch?v=QuELiw8tbx8") vid_cap = cv2.VideoCapture() vid_cap.open(vid.getbest(preftype="webm").url) ``` However it fails with an error ``` (python:12925): GLib-GObject-CRITICAL **: 14:48:56.168: g_object_set: assertion 'G_IS_OBJECT (object)' failed False ``` How can I achieve my goal using python?
2018/06/15
[ "https://Stackoverflow.com/questions/50876292", "https://Stackoverflow.com", "https://Stackoverflow.com/users/4671908/" ]
You can achieve this by using `youtube-dl` and `ffmpeg`: * Install the latest version of [`youtube-dl`](https://rg3.github.io/youtube-dl/download.html). * Then do `sudo pip install --upgrade youtube_dl` * Build `ffmpeg` with HTTPS support. You can do this by [turning on the `--enable-gnutls` option](https://askubuntu.com/a/650617/486771). Once the installations are complete, it's time to test the `youtube-dl` in terminal. We'll be using [this youtube video](https://www.youtube.com/watch?v=HECa3bAFAYk) for testing. First we get the list of formats available for this video: ``` youtube-dl --list-formats https://www.youtube.com/watch?v=HECa3bAFAYk ``` Select a `format code` of your choice. I want the **144p** resolution so I select **160**. [![image](https://i.stack.imgur.com/VausY.png)](https://i.stack.imgur.com/VausY.png) Next we get the video url for our format of choice by: ``` youtube-dl --format 160 --get-url https://www.youtube.com/watch?v=HECa3bAFAYk ``` > > <https://r3---sn-4g5e6nz7.googlevideo.com/videoplayback?clen=184077&aitags=133%2C134%2C160%2C242%2C243%2C278&fvip=3&requiressl=yes&signature=5D21FFD906226C7680B26ACEF996B78B6A31F7C9.31B1115DB13F096AA5968DB2838E22A0D6A2EDCB&source=youtube&mn=sn-4g5e6nz7%2Csn-h0jeen7y&xtags=tx%3D9486108&itag=160&mime=video%2Fmp4&mt=1529091799&ms=au%2Conr&ei=XxckW-73GNCogQfqrryQAg&expire=1529113535&mm=31%2C26&c=WEB&keepalive=yes&id=o-AJExEG49WtIUkrF7OikaaGBCfKntDl75xCoO5_9cL-eP&ip=95.91.202.147&sparams=aitags%2Cclen%2Cdur%2Cei%2Cgir%2Cid%2Cinitcwndbps%2Cip%2Cipbits%2Citag%2Ckeepalive%2Clmt%2Cmime%2Cmm%2Cmn%2Cms%2Cmv%2Cpl%2Crequiressl%2Csource%2Cxtags%2Cexpire&key=yt6&lmt=1526699176943888&dur=25.375&pl=22&gir=yes&mv=m&initcwndbps=1155000&ipbits=0&ratebypass=yes> > > > Finally we can play this video url in either `ffplay` or `vlc`. But instead of copying and pasting, we can do this in one command: ``` ffplay -i $(youtube-dl --format 160 --get-url https://www.youtube.com/watch?v=HECa3bAFAYk) ``` Now that we have confirmed that `youtube-dl` and `ffmpeg` works, we can write a Python script to process the frames in OpenCV. See [this link](https://github.com/rg3/youtube-dl/blob/master/README.md#embedding-youtube-dl) for more Python options. ``` import cv2 import numpy as np import youtube_dl if __name__ == '__main__': video_url = 'https://www.youtube.com/watch?v=HECa3bAFAYkq' ydl_opts = {} # create youtube-dl object ydl = youtube_dl.YoutubeDL(ydl_opts) # set video url, extract video information info_dict = ydl.extract_info(video_url, download=False) # get video formats available formats = info_dict.get('formats',None) for f in formats: # I want the lowest resolution, so I set resolution as 144p if f.get('format_note',None) == '144p': #get the video url url = f.get('url',None) # open url with opencv cap = cv2.VideoCapture(url) # check if url was opened if not cap.isOpened(): print('video not opened') exit(-1) while True: # read frame ret, frame = cap.read() # check if frame is empty if not ret: break # display frame cv2.imshow('frame', frame) if cv2.waitKey(30)&0xFF == ord('q'): break # release VideoCapture cap.release() cv2.destroyAllWindows() ```
I want to highlight the issue I faced while running was a open-cv version problem, I was using OpenCV 3.4.x and the video feed was exiting before being read into the while loop, so, i upgraded my open cv to "opencv-contrib-python== 4.2.0.34".
50,876,292
Given its link, I'd like to capture an online video (say from YouTube) for further processing **without downloading it on the disk**. What I mean by this is that I'd like to load it directly to memory whenever possible. According to these links: <http://answers.opencv.org/question/24012/reading-video-stream-from-ip-camera-in-opencv-java/#24013> <http://answers.opencv.org/question/24154/how-to-using-opencv-api-get-web-video-stream/#24156> <http://answers.opencv.org/question/133/how-do-i-access-an-ip-camera/> <https://pypi.org/project/pafy/> it should be doable. My attempt looks like this: ``` import cv2 import pafy vid = pafy.new("https://www.youtube.com/watch?v=QuELiw8tbx8") vid_cap = cv2.VideoCapture() vid_cap.open(vid.getbest(preftype="webm").url) ``` However it fails with an error ``` (python:12925): GLib-GObject-CRITICAL **: 14:48:56.168: g_object_set: assertion 'G_IS_OBJECT (object)' failed False ``` How can I achieve my goal using python?
2018/06/15
[ "https://Stackoverflow.com/questions/50876292", "https://Stackoverflow.com", "https://Stackoverflow.com/users/4671908/" ]
First of all Update [`youtube-dl`](https://rg3.github.io/youtube-dl/download.html) using the command `pip install -U youtube-dl` Then use my [`VidGear`](https://github.com/abhiTronix/vidgear) Python Library, then automates the pipelining of YouTube Video using its URL address only. Here's a complete python example: ### For VidGear `v0.1.9` below: ```py # import libraries from vidgear.gears import CamGear import cv2 stream = CamGear(source='https://youtu.be/dQw4w9WgXcQ', y_tube = True, logging=True).start() # YouTube Video URL as input # infinite loop while True: frame = stream.read() # read frames # check if frame is None if frame is None: #if True break the infinite loop break # do something with frame here cv2.imshow("Output Frame", frame) # Show output window key = cv2.waitKey(1) & 0xFF # check for 'q' key-press if key == ord("q"): #if 'q' key-pressed break out break cv2.destroyAllWindows() # close output window # safely close video stream. stream.stop() ``` ### For VidGear `v0.2.0` and above: *(`y_tube` changed to `stream_mode`)* ```py # import libraries from vidgear.gears import CamGear import cv2 stream = CamGear(source='https://youtu.be/dQw4w9WgXcQ', stream_mode = True, logging=True).start() # YouTube Video URL as input # infinite loop while True: frame = stream.read() # read frames # check if frame is None if frame is None: #if True break the infinite loop break # do something with frame here cv2.imshow("Output Frame", frame) # Show output window key = cv2.waitKey(1) & 0xFF # check for 'q' key-press if key == ord("q"): #if 'q' key-pressed break out break cv2.destroyAllWindows() # close output window # safely close video stream. stream.stop() ``` **[Code Source](https://abhitronix.github.io/vidgear/latest/gears/camgear/usage/#using-camgear-with-youtube-videos)** If still get some error, raise an [issue here](https://github.com/abhiTronix/vidgear/issues) in its GitHub repo.
Using pafy you can have a more elegant solution: ``` import cv2 import pafy url = "https://www.youtube.com/watch?v=NKpuX_yzdYs" video = pafy.new(url) best = video.getbest(preftype="mp4") capture = cv2.VideoCapture() capture.open(best.url) success,image = capture.read() while success: cv2.imshow('frame', image) if cv2.waitKey(1) & 0xFF == ord('q'): break success,image = capture.read() cv2.destroyAllWindows() capture.release() ```
50,876,292
Given its link, I'd like to capture an online video (say from YouTube) for further processing **without downloading it on the disk**. What I mean by this is that I'd like to load it directly to memory whenever possible. According to these links: <http://answers.opencv.org/question/24012/reading-video-stream-from-ip-camera-in-opencv-java/#24013> <http://answers.opencv.org/question/24154/how-to-using-opencv-api-get-web-video-stream/#24156> <http://answers.opencv.org/question/133/how-do-i-access-an-ip-camera/> <https://pypi.org/project/pafy/> it should be doable. My attempt looks like this: ``` import cv2 import pafy vid = pafy.new("https://www.youtube.com/watch?v=QuELiw8tbx8") vid_cap = cv2.VideoCapture() vid_cap.open(vid.getbest(preftype="webm").url) ``` However it fails with an error ``` (python:12925): GLib-GObject-CRITICAL **: 14:48:56.168: g_object_set: assertion 'G_IS_OBJECT (object)' failed False ``` How can I achieve my goal using python?
2018/06/15
[ "https://Stackoverflow.com/questions/50876292", "https://Stackoverflow.com", "https://Stackoverflow.com/users/4671908/" ]
First of all Update [`youtube-dl`](https://rg3.github.io/youtube-dl/download.html) using the command `pip install -U youtube-dl` Then use my [`VidGear`](https://github.com/abhiTronix/vidgear) Python Library, then automates the pipelining of YouTube Video using its URL address only. Here's a complete python example: ### For VidGear `v0.1.9` below: ```py # import libraries from vidgear.gears import CamGear import cv2 stream = CamGear(source='https://youtu.be/dQw4w9WgXcQ', y_tube = True, logging=True).start() # YouTube Video URL as input # infinite loop while True: frame = stream.read() # read frames # check if frame is None if frame is None: #if True break the infinite loop break # do something with frame here cv2.imshow("Output Frame", frame) # Show output window key = cv2.waitKey(1) & 0xFF # check for 'q' key-press if key == ord("q"): #if 'q' key-pressed break out break cv2.destroyAllWindows() # close output window # safely close video stream. stream.stop() ``` ### For VidGear `v0.2.0` and above: *(`y_tube` changed to `stream_mode`)* ```py # import libraries from vidgear.gears import CamGear import cv2 stream = CamGear(source='https://youtu.be/dQw4w9WgXcQ', stream_mode = True, logging=True).start() # YouTube Video URL as input # infinite loop while True: frame = stream.read() # read frames # check if frame is None if frame is None: #if True break the infinite loop break # do something with frame here cv2.imshow("Output Frame", frame) # Show output window key = cv2.waitKey(1) & 0xFF # check for 'q' key-press if key == ord("q"): #if 'q' key-pressed break out break cv2.destroyAllWindows() # close output window # safely close video stream. stream.stop() ``` **[Code Source](https://abhitronix.github.io/vidgear/latest/gears/camgear/usage/#using-camgear-with-youtube-videos)** If still get some error, raise an [issue here](https://github.com/abhiTronix/vidgear/issues) in its GitHub repo.
I want to highlight the issue I faced while running was a open-cv version problem, I was using OpenCV 3.4.x and the video feed was exiting before being read into the while loop, so, i upgraded my open cv to "opencv-contrib-python== 4.2.0.34".
27,321,523
I have a Raspberry Pi that I use as a multi-purpose 24/7 device for DLNA, CIFS, VPN etc. Now I bought a TellStick, that is a USB device that can send 433MHz radio commands to wireless power switches, dimmers etc. The manufacturer offers sources and tools for linux, which is really great, btw. Using a special command (named tdtool) I can send commands to my power switches, e.g. ``` tdtool --on 1 ``` This switches on device 1. This works very well and stable, so that I want to get away from shell commands in order to make the handling easier. My idea is to set up a very simple web server that only needs to be able to receive GET or POST requests and triggers some action like running the command "tdtool --off 3". So the web server does not even need to serve pages, it just needs to listen to requests. I want to create a HTTP-based solution because that would allow me to use my smartphone as a remote control. There is an Android app named "Tasker" that is awesome on its own, but it also allows sending customized HTTP requests based on certain conditions, so that I could make my lights go bright when I come home (and Tasker recognizes a connection to my WIFI network or similar). As the Raspberry is not the most powerful piece of hardware, I'd like to keep things as simple as possible. Basically, I need this: A HTTP get request comes in, for example: ``` /switch?device=1&action=on ``` According to this request, the server should translate that *somehow* into this: ``` tdtool --on 1 ``` I am sure that I would find a way to build something like that with Apache and PHP, but I think that would be somewhat overdressed in my case. What would you recommend? Is there some cool python magic that could make this happen? Or some fancy mini webserver with a CGI script? Any thoughts and code samples are greatly appreciated, thanks in advance!
2014/12/05
[ "https://Stackoverflow.com/questions/27321523", "https://Stackoverflow.com", "https://Stackoverflow.com/users/1901272/" ]
While your question is too "opinion-like", there's an almost instant solution: [nginx - How to run a shell script on every request?](https://stackoverflow.com/questions/22891148/nginx-how-to-run-a-shell-script-on-every-request) But since you're talking about R-Pi, maybe you will find Python builtin [CGIHTTPServer](https://docs.python.org/2/library/cgihttpserver.html) (Python 2) or [http.server](https://docs.python.org/3/library/http.server.html) (Python 3) modules be more suitable for the task of executing a shell command
Here a full & working RealLife™ perl's example ---------------------------------------------- ...using [Dancer](https://metacpan.org/pod/Dancer) ``` # cpan Dancer $ dancer -a MyApp $ cd MyApp $ cat ./lib/MyApp.pm # need to be edited, see bellow $ bin/app.pl ``` Now you can call the URL ``` http://127.0.0.1:3000/switch?device=1&action=on ``` `$cmd` will be now executed. The `./lib/MyApp.pm` : ``` package MyApp; use Dancer ':syntax'; our $VERSION = '0.1'; get '/switch' => sub { my $var = params; my $device = $var->{device}; my $action = "--" . $var->{action}; # building custom system command my $cmd = "tdtool $action $device"; # running the command `$cmd`; return "$cmd\nexecuted\n"; }; true; ``` Here another full & working RealLife™ example using [php](/questions/tagged/php "show questions tagged 'php'") -------------------------------------------------------------------------------------------------------------- ``` <?php header("HTTP/1.1 200 OK"); if (isset($_REQUEST['action'], $_REQUEST['device'])) { $device = $_REQUEST['device']; $action = '--' . $_REQUEST['action']; $cmd = "tdtool $action $device"; system("$cmd"); echo "Command<br>$cmd<br>executed..."; } ?> ``` The url is : ``` http://127.0.0.1/switch.php?device=1&action=on ``` This require a `HTTP server` binding on port 80 and the script `switch.php` to be on the top of your `DocumentRoot` (for this example).
73,479,698
I am trying to build a Docker image but when I build it, I get the error message : 'E: Unable to locate package libxcb-util1'. Here is my Dockerfile : ``` `# $DEL_BEGIN` FROM python:3.9.7-buster WORKDIR /prod COPY design_interface design_interface COPY requirements.txt requirements.txt COPY setup.py setup.py RUN pip install --upgrade pip RUN apt-get update && apt install --assume-yes apt-utils RUN apt-get update && pip install . RUN apt-get update && pip install opencv-python RUN apt-get update && apt install --assume-yes libxcb-icccm4 RUN apt-get update && apt install --assume-yes libxcb-image0 RUN apt-get update && apt-get install libxcb-util1 CMD ["python3","design_interface/MainWindow.py"] `# $DEL_END` ``` The error message is : ``` Step 11/12 : RUN apt-get update && apt-get install libxcb-util1 ---> Running in a6000629d968 Hit:1 http://deb.debian.org/debian buster InRelease Hit:2 http://security.debian.org/debian-security buster/updates InRelease Hit:3 http://deb.debian.org/debian buster-updates InRelease Reading package lists... Reading package lists... Building dependency tree... Reading state information... E: Unable to locate package libxcb-util1 The command '/bin/sh -c apt-get update && apt-get install libxcb-util1' returned a non-zero code: 100 ``` What is more difficult to understand is why the command line 'sudo apt-get update && apt-get install libxcb-util1' works well on my computer but does not work when building the Docker image with the Dockerfile.
2022/08/24
[ "https://Stackoverflow.com/questions/73479698", "https://Stackoverflow.com", "https://Stackoverflow.com/users/19739078/" ]
Updating gradle solves the problem. There are different ways to update the gradle, as explained in their official website: <https://gradle.org/install/> *Assuming that you are a windows user*: Downloading binary files of gradle and extracting the folder to the directory "c:/gradle" is enough. * Download binary files of gradle for the next version * Update path of `gradle` in PATH variable in your computer [![enter image description here](https://i.stack.imgur.com/mSaAz.png)](https://i.stack.imgur.com/mSaAz.png) * You should define another variable as follows: [![enter image description here](https://i.stack.imgur.com/8iqxp.png)](https://i.stack.imgur.com/8iqxp.png) * Variable name should be the same and its value should be the same with the one you use. While creating your cordova environment, this url is being used. * Save and restart your IDE's or CLI windows * test it out like ´gradle -v´ and you should see the active version of gradle in your system. * Be prepared to the new problems in your cordova environment because you have a upgraded gradle now
To fix the issue, I've reverted to `cordova-android` version `9.1.0`. I've no idea, as of now, why `cordova-android` version `10` points to `gradle`, which as of now isn't possible to download...
66,797,173
I am using transformers pipeline to perform sentiment analysis on sample texts from 6 different languages. I tested the code in my local Jupyterhub and it worked fine. But when I wrap it in a flask application and create a docker image out of it, the execution is hanging at the pipeline inference line and its taking forever to return the sentiment scores. * mac os catalina 10.15.7 (no GPU) * Python version : 3.8 * Transformers package : 4.4.2 * torch version : 1.6.0 ``` from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline model_name = "nlptown/bert-base-multilingual-uncased-sentiment" model = AutoModelForSequenceClassification.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) classifier = pipeline('sentiment-analysis', model=model, tokenizer=tokenizer) results = classifier(["We are very happy to show you the Transformers library.", "We hope you don't hate it."]) print([i['score'] for i in results]) ``` The above code works fine in Jupyter notebook and it has provided me the expected result ``` [0.7495927810668945,0.2365245819091797] ``` So now if I create a docker image with flask wrapper its getting stuck at the `results = classifier([input_data])` line and the execution is running forever. My folder structure is as follows: ``` - src |-- app |--main.py |-- Dockerfile |-- requirements.txt ``` I used the below `Dockerfile` to create the image ``` FROM tiangolo/uwsgi-nginx-flask:python3.8 COPY ./requirements.txt /requirements.txt COPY ./app /app WORKDIR /app RUN pip install -r /requirements.txt RUN echo "uwsgi_read_timeout 1200s;" > /etc/nginx/conf.d/custom_timeout.conf ``` And my `requirements.txt` file is as follows: ``` pandas==1.1.5 transformers==4.4.2 torch==1.6.0 ``` My `main.py` script look like this : ``` from flask import Flask, json, request, jsonify import traceback import pandas as pd from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline app = Flask(__name__) app.config["JSON_SORT_KEYS"] = False model_name = 'nlptown/bert-base-multilingual-uncased-sentiment' model = AutoModelForSequenceClassification.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) nlp = pipeline('sentiment-analysis', model=model_path, tokenizer=model_path) @app.route("/") def hello(): return "Model: Sentiment pipeline test" @app.route("/predict", methods=['POST']) def predict(): json_request = request.get_json(silent=True) input_list = [i['text'] for i in json_request["input_data"]] results = nlp(input_list) ########## Getting stuck here for result in results: print(f"label: {result['label']}, with score: {round(result['score'], 4)}") score_list = [round(i['score'], 4) for i in results] return jsonify(score_list) if __name__ == "__main__": app.run(host='0.0.0.0', debug=False, port=80) ``` My input payload is of the form ``` {"input_data" : [{"text" : "We are very happy to show you the Transformers library."}, {"text" : "We hope you don't hate it."}]} ``` I tried looking into the transformers github issues but couldn't find one. I execution works fine even when using the flask development server but it runs forever when I wrap it and create a docker image. I am not sure if I am missing any additional dependency to be included while creating the docker image. Thanks.
2021/03/25
[ "https://Stackoverflow.com/questions/66797173", "https://Stackoverflow.com", "https://Stackoverflow.com/users/10422855/" ]
I was having a similar issue. It seems that starting the app somehow polutes the memory of transformers models. Probably something to do with how Flask does threading but no idea why. What fixed it for me was doing the things that are causing trouble (loading the models) in a different thread. ``` import threading def preload_models(): "LOAD MODELS" return 0 def start_app(): app = create_app() register_handlers(app) preloading = threading.Thread(target=preload_models) preloading.start() preloading.join() return app ``` First reply here. I would be really glad if this helps.
Flask uses port 5000. In creating a docker image, it's important to make sure that the port is set up this way. Replace the last line with the following: ``` app.run(host="0.0.0.0", port=int(os.environ.get("PORT", 5000))) ``` Be also sure to `import os` at the top Lastly, in `Dockerfile`, add ``` EXPOSE 5000 CMD ["python", "./main.py"] ```
3,887,393
I'm hacking a quick and dirty python script to generate some reports as static html files. What would be a good module to easily build static html files outside the context of a web application? My goals are simplicity (the HTML will not be very complex) and ease of use (I don't want to write a lot of code just to output some html tags). I found two alternatives on my first goolge search: * markup.py - <http://markup.sourceforge.net/> * HTML.py - <http://www.decalage.info/en/python/html> Also, I feel that using a templating engine would be over-kill, but if you differ please say it and why. Any other recommendation?
2010/10/08
[ "https://Stackoverflow.com/questions/3887393", "https://Stackoverflow.com", "https://Stackoverflow.com/users/2954/" ]
Maybe you could try [Markdown](http://www.freewisdom.org/projects/python-markdown/) instead, and convert it to HTML on the fly?
You don't necessarily need something complex - for instance, here's a ~150 line library to generate HTML in a functional manner: <http://github.com/Yelp/PushmasterApp/blob/master/pushmaster/taglib.py> (Full disclosure, I work with the person who originally wrote that version, and I also use it myself.)
3,887,393
I'm hacking a quick and dirty python script to generate some reports as static html files. What would be a good module to easily build static html files outside the context of a web application? My goals are simplicity (the HTML will not be very complex) and ease of use (I don't want to write a lot of code just to output some html tags). I found two alternatives on my first goolge search: * markup.py - <http://markup.sourceforge.net/> * HTML.py - <http://www.decalage.info/en/python/html> Also, I feel that using a templating engine would be over-kill, but if you differ please say it and why. Any other recommendation?
2010/10/08
[ "https://Stackoverflow.com/questions/3887393", "https://Stackoverflow.com", "https://Stackoverflow.com/users/2954/" ]
Maybe you could try [Markdown](http://www.freewisdom.org/projects/python-markdown/) instead, and convert it to HTML on the fly?
Why would a templating engine necessarily be overkill? You don't need the whole web framework just to use the templating engine (at least, for most templating engines). [Mako](http://www.makotemplates.org/) for example can be used stand-alone just fine, and I often use it to generate html files (reports from a db and such)
3,887,393
I'm hacking a quick and dirty python script to generate some reports as static html files. What would be a good module to easily build static html files outside the context of a web application? My goals are simplicity (the HTML will not be very complex) and ease of use (I don't want to write a lot of code just to output some html tags). I found two alternatives on my first goolge search: * markup.py - <http://markup.sourceforge.net/> * HTML.py - <http://www.decalage.info/en/python/html> Also, I feel that using a templating engine would be over-kill, but if you differ please say it and why. Any other recommendation?
2010/10/08
[ "https://Stackoverflow.com/questions/3887393", "https://Stackoverflow.com", "https://Stackoverflow.com/users/2954/" ]
Maybe you could try [Markdown](http://www.freewisdom.org/projects/python-markdown/) instead, and convert it to HTML on the fly?
i recommend having a look at [shpaml](http://shpaml.webfactional.com/)
3,887,393
I'm hacking a quick and dirty python script to generate some reports as static html files. What would be a good module to easily build static html files outside the context of a web application? My goals are simplicity (the HTML will not be very complex) and ease of use (I don't want to write a lot of code just to output some html tags). I found two alternatives on my first goolge search: * markup.py - <http://markup.sourceforge.net/> * HTML.py - <http://www.decalage.info/en/python/html> Also, I feel that using a templating engine would be over-kill, but if you differ please say it and why. Any other recommendation?
2010/10/08
[ "https://Stackoverflow.com/questions/3887393", "https://Stackoverflow.com", "https://Stackoverflow.com/users/2954/" ]
Maybe you could try [Markdown](http://www.freewisdom.org/projects/python-markdown/) instead, and convert it to HTML on the fly?
If you have just some simple static HTML files. Then why not use string templates like so. ``` import string TEMPLATE_FORMAT = """ <html> <head><title>Trial</title></head> <body> <div class="myclass">$my_div_data</div> </body> """ my_div_data = "some_data_to_display_in_HTML" TEMPLATE = string.Template(TEMPLATE_FORMAT) html_data = TEMPLATE.safe_substitute(my_div_data) open("out.html", "w").write(html_data) ``` Give this a shot if you don't have too big HTML files to generate. Saves you on the learning you need to do if you decide to use libraries.
3,887,393
I'm hacking a quick and dirty python script to generate some reports as static html files. What would be a good module to easily build static html files outside the context of a web application? My goals are simplicity (the HTML will not be very complex) and ease of use (I don't want to write a lot of code just to output some html tags). I found two alternatives on my first goolge search: * markup.py - <http://markup.sourceforge.net/> * HTML.py - <http://www.decalage.info/en/python/html> Also, I feel that using a templating engine would be over-kill, but if you differ please say it and why. Any other recommendation?
2010/10/08
[ "https://Stackoverflow.com/questions/3887393", "https://Stackoverflow.com", "https://Stackoverflow.com/users/2954/" ]
Maybe you could try [Markdown](http://www.freewisdom.org/projects/python-markdown/) instead, and convert it to HTML on the fly?
ElementTree can produce html with some limitations. I'd write it like this: ``` from xml.etree.ElementTree import ElementTree, Element, SubElement import sys html = Element('html') head = SubElement(html, 'head') style = SubElement(head, 'link') style.attrib = {'rel': 'stylesheet', 'href': 'style.css', 'type': 'text/css'} body = SubElement(html, 'body') para = SubElement(body, 'p') para.text = 'Lorem ipsum sit amet' doc = ElementTree(html) doc.write(sys.stdout) ``` In case of moderately complex html I'd stick with some templating engine: Jinja2, Mako, Cheetah, just to name a few.
3,887,393
I'm hacking a quick and dirty python script to generate some reports as static html files. What would be a good module to easily build static html files outside the context of a web application? My goals are simplicity (the HTML will not be very complex) and ease of use (I don't want to write a lot of code just to output some html tags). I found two alternatives on my first goolge search: * markup.py - <http://markup.sourceforge.net/> * HTML.py - <http://www.decalage.info/en/python/html> Also, I feel that using a templating engine would be over-kill, but if you differ please say it and why. Any other recommendation?
2010/10/08
[ "https://Stackoverflow.com/questions/3887393", "https://Stackoverflow.com", "https://Stackoverflow.com/users/2954/" ]
You don't necessarily need something complex - for instance, here's a ~150 line library to generate HTML in a functional manner: <http://github.com/Yelp/PushmasterApp/blob/master/pushmaster/taglib.py> (Full disclosure, I work with the person who originally wrote that version, and I also use it myself.)
i recommend having a look at [shpaml](http://shpaml.webfactional.com/)
3,887,393
I'm hacking a quick and dirty python script to generate some reports as static html files. What would be a good module to easily build static html files outside the context of a web application? My goals are simplicity (the HTML will not be very complex) and ease of use (I don't want to write a lot of code just to output some html tags). I found two alternatives on my first goolge search: * markup.py - <http://markup.sourceforge.net/> * HTML.py - <http://www.decalage.info/en/python/html> Also, I feel that using a templating engine would be over-kill, but if you differ please say it and why. Any other recommendation?
2010/10/08
[ "https://Stackoverflow.com/questions/3887393", "https://Stackoverflow.com", "https://Stackoverflow.com/users/2954/" ]
Why would a templating engine necessarily be overkill? You don't need the whole web framework just to use the templating engine (at least, for most templating engines). [Mako](http://www.makotemplates.org/) for example can be used stand-alone just fine, and I often use it to generate html files (reports from a db and such)
i recommend having a look at [shpaml](http://shpaml.webfactional.com/)
3,887,393
I'm hacking a quick and dirty python script to generate some reports as static html files. What would be a good module to easily build static html files outside the context of a web application? My goals are simplicity (the HTML will not be very complex) and ease of use (I don't want to write a lot of code just to output some html tags). I found two alternatives on my first goolge search: * markup.py - <http://markup.sourceforge.net/> * HTML.py - <http://www.decalage.info/en/python/html> Also, I feel that using a templating engine would be over-kill, but if you differ please say it and why. Any other recommendation?
2010/10/08
[ "https://Stackoverflow.com/questions/3887393", "https://Stackoverflow.com", "https://Stackoverflow.com/users/2954/" ]
If you have just some simple static HTML files. Then why not use string templates like so. ``` import string TEMPLATE_FORMAT = """ <html> <head><title>Trial</title></head> <body> <div class="myclass">$my_div_data</div> </body> """ my_div_data = "some_data_to_display_in_HTML" TEMPLATE = string.Template(TEMPLATE_FORMAT) html_data = TEMPLATE.safe_substitute(my_div_data) open("out.html", "w").write(html_data) ``` Give this a shot if you don't have too big HTML files to generate. Saves you on the learning you need to do if you decide to use libraries.
i recommend having a look at [shpaml](http://shpaml.webfactional.com/)
3,887,393
I'm hacking a quick and dirty python script to generate some reports as static html files. What would be a good module to easily build static html files outside the context of a web application? My goals are simplicity (the HTML will not be very complex) and ease of use (I don't want to write a lot of code just to output some html tags). I found two alternatives on my first goolge search: * markup.py - <http://markup.sourceforge.net/> * HTML.py - <http://www.decalage.info/en/python/html> Also, I feel that using a templating engine would be over-kill, but if you differ please say it and why. Any other recommendation?
2010/10/08
[ "https://Stackoverflow.com/questions/3887393", "https://Stackoverflow.com", "https://Stackoverflow.com/users/2954/" ]
ElementTree can produce html with some limitations. I'd write it like this: ``` from xml.etree.ElementTree import ElementTree, Element, SubElement import sys html = Element('html') head = SubElement(html, 'head') style = SubElement(head, 'link') style.attrib = {'rel': 'stylesheet', 'href': 'style.css', 'type': 'text/css'} body = SubElement(html, 'body') para = SubElement(body, 'p') para.text = 'Lorem ipsum sit amet' doc = ElementTree(html) doc.write(sys.stdout) ``` In case of moderately complex html I'd stick with some templating engine: Jinja2, Mako, Cheetah, just to name a few.
i recommend having a look at [shpaml](http://shpaml.webfactional.com/)
6,699,201
What would I have to do to make a Python application I am writing open up a web page in the default browser? It doesn't need to be told what the webpage is or anything, it'll be opening one that I've already chosen. I found some documentation [here](http://docs.python.org/library/webbrowser.html) but I always get a syntax error from the symbols in the URL. Any help?
2011/07/14
[ "https://Stackoverflow.com/questions/6699201", "https://Stackoverflow.com", "https://Stackoverflow.com/users/841843/" ]
The URL needs to be in a string. ``` webbrowser.open('http://www.google.com/') ```
Have a look at the `webbrowser` module.
64,082,288
I masked a *sorted* 1-D numpy array using the method below (which follows a solution proposed [here](https://stackoverflow.com/questions/64076440/accessing-a-large-numpy-array-while-preserving-its-order)): ``` def get_from_sorted(sorted,idx): mask = np.zeros(sorted.shape, bool) mask[idx] = True return sorted[mask] ``` The python method returns the array after masking on the indexes `idx`. For example, if sorted=`np.array([0.1,0.2,0.3.0.4,0.5])`, and idx=`np.array([4,0,1]`), then the method `get_from_sorted` should return `np.array([0.1,0.2,0.5])` (note the order in the original array is preserved.) Question: I need to get the mapping between the indices of the items in the array after masking and those in the original list. In the example above, such a mapping is ``` 0 -> 0 1 -> 1 2 -> 5 ``` because 0.1, 0.2, and 0.5 is on the 0th, 1st, and 5th place in `sorted`. How can I program this mapping efficiently? **Requirement on efficiency:** Efficiency is the key in my problem solving. Here, both "idx" and "sorted" is a 1-D array of 1 million elements, and idx is a 1-D array of about 0.5 million elements (taken from an image processing application). Thus, checking the elements of the masked array one by one, or in a vectorized fashion, against the original array, for example, using np.where, would not perform well in my case. Ideally, there should be a relatively simply mathematical relation between the indices in the masked array and the original sorted array. Any idea?
2020/09/26
[ "https://Stackoverflow.com/questions/64082288", "https://Stackoverflow.com", "https://Stackoverflow.com/users/815653/" ]
I believe your goal as follows. * Your question has the following 2 questions. 1. You want to know the method for creating new Google Document including the text data. 2. You want to know the method for adding more text data to the existing Google Document. * You want to achieve this using Drive API with googleapis for Node.js. * You have already been able to get and put the file using Drive API. Answer for question 1: ---------------------- In this answer, new Google Document is created by including the text data using Drive API. ### Modification points: * In this case, it is required to convert the text to the stream type. * When the text is converted to Google Document, `mimeType` is required to be included in `fileMetadata`. When above points are reflected to your script, it becomes as follows. ### Modified script: From: ``` var content = "Content to be written in file" var fileMetadata = { name: filename, parents: [rootFolderId] }; var media = { mimeType: 'application/vnd.google-apps.document', body: content // In the form of string }; ``` To: ``` const stream = require("stream"); var filename = "sample filename"; // Please set the filename of created Google Document. var rootFolderId = "root"; // Please set the folder ID. var content = "Content to be written in file"; var bufferStream = new stream.PassThrough(); bufferStream.end(Uint8Array.from(Buffer.from(content, "binary"))); var fileMetadata = { name: filename, parents: [rootFolderId], mimeType: "application/vnd.google-apps.document", }; var media = { mimeType: "text/plain", // <--- Added body: bufferStream }; ``` * In this case, `stream` module is used. Answer for question 2: ---------------------- In this answer, more text data is added to the existing Google Document using Drive API. ### Modification points: * In this case, it is required to do the following flow. 1. Retrieve all texts data from the existing Google Document. 2. Add more text data to the retrieved texts. 3. Update the existing Google Document using the updated text data. + In this case, the method of "Files: update" in Drive API is used. The sample script is as follows. ### Sample script: ``` const documentId = "###"; // Please set the Google Document ID of the existing Google Document. drive.files.export( { fileId: documentId, mimeType: "text/plain", }, { responseType: "stream" }, (err, { data }) => { if (err) { console.log(err); return; } let buf = []; data.on("data", (e) => buf.push(e)); data.on("end", () => { const stream = require("stream"); const content = "\n" + "Added text data"; // Here, the text data is added to the existing text in Document. buf.push(Buffer.from(content, "binary")); const bufferStream = new stream.PassThrough(); bufferStream.end(Uint8Array.from(Buffer.concat(buf))); var media = { body: bufferStream, }; drive.files.update( { fileId: documentId, resource: {}, media: media, fields: "id", }, function (err, file) { if (err) { console.error(err); return; } console.log(file.data.id); } ); }); } ); ``` * In this sample script, I used `const content = "\n" + "Added text data";` for adding more text data. If you don't want to insert the line break for this, please remove `"\n"`. Note: ----- * In order to add more text data, I think that you can also use Docs API. But in your goal, Drive API is used. So I proposed the method for using Drive API. References: ----------- * [Files: create](https://developers.google.com/drive/api/v3/reference/files/create) * [Class: stream.PassThrough](https://nodejs.org/api/stream.html#stream_class_stream_passthrough) * [Files: update](https://developers.google.com/drive/api/v3/reference/files/update)
From the [Media Uploads example](https://github.com/googleapis/google-api-nodejs-client#media-uploads) for `googleapis@60.0.1`, you can create a Google Document with a given title and content inside a given folder with ``` const drive = google.drive({ version: 'v3', auth }); const filename = '<filename>'; const parentFolderId = '<parent-folder-id>'; const content = '<file-content>'; const requestBody = { name: filename, parents: [parentFolderId], mimeType: 'application/vnd.google-apps.document', }; const media = { mimeType: 'text/plain', body: content, }; await drive.files.create({ requestBody, media, fields: 'id', }); ``` To perform modifications on the document, best use the [Docs API](https://developers.google.com/docs/api/quickstart/nodejs). It offers fine control over the document modifications. If you're looking for a **simple solution to update a Google Document's content using the Drive API**, a slightly coarser approach to using the Docs API is ``` drive = google.drive({ version: 'v3', auth }); const fileId = '<file-id>'; const newContent = '<new content>'; const media = { mimeType: 'text/plain', body: newContent, }; await drive.files.update({ fileId, media, }); ``` For **appending text to a document using the Drive API**, you can use something along the lines of ``` const drive = google.drive({ version: 'v3', auth }); const fileId = '<file-id>'; const contentToAppend = '<new content>'; const { data: prevContent } = await drive.files.export({ fileId, mimeType: 'text/plain', }); const newContent = prevContent + contentToAppend; const media = { mimeType: 'text/plain', body: newContent, }; await drive.files.update({ fileId, media, }); ```
57,358,927
I would like to use the twilight or twilight\_shifted colormap in my 2.7 python build, but it seems to be python 3 only? Is there some way to manually add it?
2019/08/05
[ "https://Stackoverflow.com/questions/57358927", "https://Stackoverflow.com", "https://Stackoverflow.com/users/1608765/" ]
`twilight` was added in matplotlib v3.0 which is python 3 only. But we can find where it was added in the source code are re-engineer it. In the code below, you just need to grab the data used for `twilight` from the matplotlib source on github, by following this [link](https://github.com/matplotlib/matplotlib/blob/f2116d82dfd6b82fe178230766d95ea9ac2b0c8c/lib/matplotlib/_cm_listed.py#L1288). ``` import numpy as np import matplotlib.pyplot as plt import matplotlib.colors as colors _twilight_data = [ # data too long for stack overflow. get it from here: # https://github.com/matplotlib/matplotlib/blob/f2116d82dfd6b82fe178230766d95ea9ac2b0c8c/lib/matplotlib/_cm_listed.py#L1288 ] _twilight_shifted_data = (_twilight_data[len(_twilight_data)//2:] + _twilight_data[:len(_twilight_data)//2]) _twilight_shifted_data.reverse() cmaps = {} for (name, data) in (('twilight', _twilight_data), ('twilight_shifted', _twilight_shifted_data)): cmaps[name] = colors.ListedColormap(data, name=name) # generate reversed colormap name = name + '_r' cmaps[name] = colors.ListedColormap(list(reversed(data)), name=name) fig, ax = plt.subplots() p = ax.pcolormesh(np.arange(25).reshape(5, 5), cmap=cmaps['twilight']) fig.colorbar(p, ax=ax) plt.show() ``` That crates a dict with `twilight`, `twilight_r`, `twilight_shifted` and `twilight_shifted_r` colormaps. The script also produces this test image: [![enter image description here](https://i.stack.imgur.com/a4XPo.png)](https://i.stack.imgur.com/a4XPo.png)
You can create a new custom colormap as shown in this [tutorial](https://matplotlib.org/3.1.0/tutorials/colors/colormap-manipulation.html). The data for the "twilight" and "twilight\_shifted" colormaps is [here](https://github.com/matplotlib/matplotlib/blob/master/lib/matplotlib/_cm_listed.py).
57,358,927
I would like to use the twilight or twilight\_shifted colormap in my 2.7 python build, but it seems to be python 3 only? Is there some way to manually add it?
2019/08/05
[ "https://Stackoverflow.com/questions/57358927", "https://Stackoverflow.com", "https://Stackoverflow.com/users/1608765/" ]
`twilight` was added in matplotlib v3.0 which is python 3 only. But we can find where it was added in the source code are re-engineer it. In the code below, you just need to grab the data used for `twilight` from the matplotlib source on github, by following this [link](https://github.com/matplotlib/matplotlib/blob/f2116d82dfd6b82fe178230766d95ea9ac2b0c8c/lib/matplotlib/_cm_listed.py#L1288). ``` import numpy as np import matplotlib.pyplot as plt import matplotlib.colors as colors _twilight_data = [ # data too long for stack overflow. get it from here: # https://github.com/matplotlib/matplotlib/blob/f2116d82dfd6b82fe178230766d95ea9ac2b0c8c/lib/matplotlib/_cm_listed.py#L1288 ] _twilight_shifted_data = (_twilight_data[len(_twilight_data)//2:] + _twilight_data[:len(_twilight_data)//2]) _twilight_shifted_data.reverse() cmaps = {} for (name, data) in (('twilight', _twilight_data), ('twilight_shifted', _twilight_shifted_data)): cmaps[name] = colors.ListedColormap(data, name=name) # generate reversed colormap name = name + '_r' cmaps[name] = colors.ListedColormap(list(reversed(data)), name=name) fig, ax = plt.subplots() p = ax.pcolormesh(np.arange(25).reshape(5, 5), cmap=cmaps['twilight']) fig.colorbar(p, ax=ax) plt.show() ``` That crates a dict with `twilight`, `twilight_r`, `twilight_shifted` and `twilight_shifted_r` colormaps. The script also produces this test image: [![enter image description here](https://i.stack.imgur.com/a4XPo.png)](https://i.stack.imgur.com/a4XPo.png)
You can take the [`_cm_listed.py`](https://github.com/matplotlib/matplotlib/blob/master/lib/matplotlib/_cm_listed.py) file from the current version and copy it to your matplotlib 2.2.3 folder. Since the file is version agnostic, this should directly give you the additional colormaps.
65,579,018
**What I intend to do :** I have an excel file with Voltage and Current data which I would like to extract from a specific sheet say 'IV\_RAW'. The values are only from 4th row and are in columns D and E. Lets say the values look like this: | V(voltage) | I(Current) | | --- | --- | | 47 | 1 | | 46 | 2 | | 45 | 3 | | 0 | 4 | | -0.1 | 5 | | -10 | 5 | Now, I just want to take out only the values starting with a voltage (V) of 45 and **shouldnt take negative voltages**. The corresponding current (I) values are also needed to be taken out. This has to be done for multiple excel files. So starting from a particular row number cannot be done instead voltage values should be the criterion. **What I know:** I know only how to take out the entire set of values using openxyl: ``` loc = ("path") wb = load_workbook("Data") #thefilename ws = wb["IV_raw"] #theactiveworksheet #to extract the voltage and current data: for row in ws.iter_rows(min_row=1, max_col=3, max_row=2, values_only=True): print(row) ``` ***I am a noon coder and new to python. So it will be really helpful if you guys could help. If there is a simplified versions with `pandas` it will be really great. Thank you in advance***
2021/01/05
[ "https://Stackoverflow.com/questions/65579018", "https://Stackoverflow.com", "https://Stackoverflow.com/users/14944185/" ]
The following uses `pandas` which you should definitly take a look at. with `sheet_name` you set the sheet\_name, `header` is the row index of the header (starting at 0, so Row 4 -> 3), `usecols` defines the columns using A1 notation. The last line filters the dataframe. If I understand correctly, then you want Voltage between 0 and 45, thats what the example does and df is your resulting data\_frame ``` import pandas as pd file_loc = "path.xlsx" df = pd.read_excel(file_loc, sheet_name = 'IV_raw', header = 3, usecols = "D:E") df = df[(df['V(voltage)'] > 0) & (df['V(voltage)'] < 45)] ```
you can try this, ``` import openpyxl tWorkbook = openpyxl.load_workbook("YOUR_FILEPATH") tDataBase = tWorkbook.active voltageVal= "D4" currentVal= "E4" V = tDataBase[voltageVal].value I = tDataBase[currentVal].value ```
65,579,018
**What I intend to do :** I have an excel file with Voltage and Current data which I would like to extract from a specific sheet say 'IV\_RAW'. The values are only from 4th row and are in columns D and E. Lets say the values look like this: | V(voltage) | I(Current) | | --- | --- | | 47 | 1 | | 46 | 2 | | 45 | 3 | | 0 | 4 | | -0.1 | 5 | | -10 | 5 | Now, I just want to take out only the values starting with a voltage (V) of 45 and **shouldnt take negative voltages**. The corresponding current (I) values are also needed to be taken out. This has to be done for multiple excel files. So starting from a particular row number cannot be done instead voltage values should be the criterion. **What I know:** I know only how to take out the entire set of values using openxyl: ``` loc = ("path") wb = load_workbook("Data") #thefilename ws = wb["IV_raw"] #theactiveworksheet #to extract the voltage and current data: for row in ws.iter_rows(min_row=1, max_col=3, max_row=2, values_only=True): print(row) ``` ***I am a noon coder and new to python. So it will be really helpful if you guys could help. If there is a simplified versions with `pandas` it will be really great. Thank you in advance***
2021/01/05
[ "https://Stackoverflow.com/questions/65579018", "https://Stackoverflow.com", "https://Stackoverflow.com/users/14944185/" ]
The following uses `pandas` which you should definitly take a look at. with `sheet_name` you set the sheet\_name, `header` is the row index of the header (starting at 0, so Row 4 -> 3), `usecols` defines the columns using A1 notation. The last line filters the dataframe. If I understand correctly, then you want Voltage between 0 and 45, thats what the example does and df is your resulting data\_frame ``` import pandas as pd file_loc = "path.xlsx" df = pd.read_excel(file_loc, sheet_name = 'IV_raw', header = 3, usecols = "D:E") df = df[(df['V(voltage)'] > 0) & (df['V(voltage)'] < 45)] ```
Building on from your example, you can use the following example to get what you need ``` from openpyxl import load_workbook wb = load_workbook(filepath,data_only=True) #load the file using its full path ws = wb["Sheet1"] #theactiveworksheet #to extract the voltage and current data: data = ws.iter_rows(min_col=4, max_col=5, min_row=2, max_row=ws.max_row, values_only=True) output = [row for row in data if row[0]>45] ```
65,579,018
**What I intend to do :** I have an excel file with Voltage and Current data which I would like to extract from a specific sheet say 'IV\_RAW'. The values are only from 4th row and are in columns D and E. Lets say the values look like this: | V(voltage) | I(Current) | | --- | --- | | 47 | 1 | | 46 | 2 | | 45 | 3 | | 0 | 4 | | -0.1 | 5 | | -10 | 5 | Now, I just want to take out only the values starting with a voltage (V) of 45 and **shouldnt take negative voltages**. The corresponding current (I) values are also needed to be taken out. This has to be done for multiple excel files. So starting from a particular row number cannot be done instead voltage values should be the criterion. **What I know:** I know only how to take out the entire set of values using openxyl: ``` loc = ("path") wb = load_workbook("Data") #thefilename ws = wb["IV_raw"] #theactiveworksheet #to extract the voltage and current data: for row in ws.iter_rows(min_row=1, max_col=3, max_row=2, values_only=True): print(row) ``` ***I am a noon coder and new to python. So it will be really helpful if you guys could help. If there is a simplified versions with `pandas` it will be really great. Thank you in advance***
2021/01/05
[ "https://Stackoverflow.com/questions/65579018", "https://Stackoverflow.com", "https://Stackoverflow.com/users/14944185/" ]
Building on from your example, you can use the following example to get what you need ``` from openpyxl import load_workbook wb = load_workbook(filepath,data_only=True) #load the file using its full path ws = wb["Sheet1"] #theactiveworksheet #to extract the voltage and current data: data = ws.iter_rows(min_col=4, max_col=5, min_row=2, max_row=ws.max_row, values_only=True) output = [row for row in data if row[0]>45] ```
you can try this, ``` import openpyxl tWorkbook = openpyxl.load_workbook("YOUR_FILEPATH") tDataBase = tWorkbook.active voltageVal= "D4" currentVal= "E4" V = tDataBase[voltageVal].value I = tDataBase[currentVal].value ```
26,513,125
I have some django view handler functions which are structured like this ``` def view1(request): # Check for authorization if not isAuthorized(request): return HttpResponse('Foo error', status=401) return HttpResponse('view1 data') def view2(request): # Check for authorization if not isAuthorized(request): return HttpResponse('Foo error', status=401) return HttpResponse('view2 data') def view3(request): # Check for authorization if not isAuthorized(request): return HttpResponse('Foo error', status=401) return HttpResponse('view3 data') ``` I want to make this part : ``` # Check for authorization if not isAuthorized(request): return HttpResponse('Foo error', status=401) ``` some sort of one-liner, so that I do not have to repeat it in each view In C this would have been a macro, but I've no clue how to achieve something similar in python The check authorization function part is an example, it can be any check which has nothing to do with user authorization in particular [Edit] <https://stackoverflow.com/users/2337736/peter-deglopper> mentions decorators ... To elaborate I have a web API that can take either POST or GET ``` # return either GET or POST dict whichever exists def getParams(request): if request.method == 'GET': return request.GET return request.POST ``` The views do this : ``` def someAPI(request): dct = getParams(request) if not isValid(dct): return HttpResponse('Bad request', status=401) ``` How could I acheive this with a decorator? I have that getParams() function in between....
2014/10/22
[ "https://Stackoverflow.com/questions/26513125", "https://Stackoverflow.com", "https://Stackoverflow.com/users/20392/" ]
The line `int ans = tmp->next;` appears to be the source of the problem. This is attempting to take the `next` pointer in the node, convert it to an `int`, and return it. What you (almost certainly) want is to retrieve the data from the node and return that, with something like `int ans = tmp->num;`. Of course, that's not saying the code is perfect otherwise (e.g., it seems to lack any attempt at checking for, not to mention dealing with, errors), but at least with that change, it stands some chance of working correctly under some (ideal) circumstances.
First, you are trying to delete `tmp` node, but top node still exist and value has to be returned as ans or top->next or in this situation top->num. Why do you initialize node `tmp` in the function when node `tmp` is a parameter? Why should node \* &top be in the function parameters instead of `tmp`. value = top->num doesn't fix the problem, because he wants the pointer from the top of the linked list not the random node inputed through the function parameters. To fix this problem `Node * tmp` should equal top and then value should be equal to tmp->num. Otherwise all other problems have been fixed. **//EDIT** Ignore everything before //edit because all that is questions about his question that I now already know. I have compiled this code and it completely worked for me. ``` struct Node { int data; Node *next; }; int pop(Node *head) { while(head->next != NULL) { head = head->next; } int value; Node *tmp; tmp = new Node; value = head->data; tmp = head; delete tmp; return value; } ``` Compiled code link - <http://ideone.com/7EgBhf>
26,513,125
I have some django view handler functions which are structured like this ``` def view1(request): # Check for authorization if not isAuthorized(request): return HttpResponse('Foo error', status=401) return HttpResponse('view1 data') def view2(request): # Check for authorization if not isAuthorized(request): return HttpResponse('Foo error', status=401) return HttpResponse('view2 data') def view3(request): # Check for authorization if not isAuthorized(request): return HttpResponse('Foo error', status=401) return HttpResponse('view3 data') ``` I want to make this part : ``` # Check for authorization if not isAuthorized(request): return HttpResponse('Foo error', status=401) ``` some sort of one-liner, so that I do not have to repeat it in each view In C this would have been a macro, but I've no clue how to achieve something similar in python The check authorization function part is an example, it can be any check which has nothing to do with user authorization in particular [Edit] <https://stackoverflow.com/users/2337736/peter-deglopper> mentions decorators ... To elaborate I have a web API that can take either POST or GET ``` # return either GET or POST dict whichever exists def getParams(request): if request.method == 'GET': return request.GET return request.POST ``` The views do this : ``` def someAPI(request): dct = getParams(request) if not isValid(dct): return HttpResponse('Bad request', status=401) ``` How could I acheive this with a decorator? I have that getParams() function in between....
2014/10/22
[ "https://Stackoverflow.com/questions/26513125", "https://Stackoverflow.com", "https://Stackoverflow.com/users/20392/" ]
Usually such a function throws an exception if the stack is empty or it has undefined behaviour. I used return value 0 in case when the stack is empty. ``` int pop( Node * &top ) { int value = 0; if ( top ) { value = top->num; Node *tmp = top; top = top->next; delete tmp; } return value; } ``` There is another approach when function poo has type void that is when it returns nothing but simply removes the element on the top.
First, you are trying to delete `tmp` node, but top node still exist and value has to be returned as ans or top->next or in this situation top->num. Why do you initialize node `tmp` in the function when node `tmp` is a parameter? Why should node \* &top be in the function parameters instead of `tmp`. value = top->num doesn't fix the problem, because he wants the pointer from the top of the linked list not the random node inputed through the function parameters. To fix this problem `Node * tmp` should equal top and then value should be equal to tmp->num. Otherwise all other problems have been fixed. **//EDIT** Ignore everything before //edit because all that is questions about his question that I now already know. I have compiled this code and it completely worked for me. ``` struct Node { int data; Node *next; }; int pop(Node *head) { while(head->next != NULL) { head = head->next; } int value; Node *tmp; tmp = new Node; value = head->data; tmp = head; delete tmp; return value; } ``` Compiled code link - <http://ideone.com/7EgBhf>
26,513,125
I have some django view handler functions which are structured like this ``` def view1(request): # Check for authorization if not isAuthorized(request): return HttpResponse('Foo error', status=401) return HttpResponse('view1 data') def view2(request): # Check for authorization if not isAuthorized(request): return HttpResponse('Foo error', status=401) return HttpResponse('view2 data') def view3(request): # Check for authorization if not isAuthorized(request): return HttpResponse('Foo error', status=401) return HttpResponse('view3 data') ``` I want to make this part : ``` # Check for authorization if not isAuthorized(request): return HttpResponse('Foo error', status=401) ``` some sort of one-liner, so that I do not have to repeat it in each view In C this would have been a macro, but I've no clue how to achieve something similar in python The check authorization function part is an example, it can be any check which has nothing to do with user authorization in particular [Edit] <https://stackoverflow.com/users/2337736/peter-deglopper> mentions decorators ... To elaborate I have a web API that can take either POST or GET ``` # return either GET or POST dict whichever exists def getParams(request): if request.method == 'GET': return request.GET return request.POST ``` The views do this : ``` def someAPI(request): dct = getParams(request) if not isValid(dct): return HttpResponse('Bad request', status=401) ``` How could I acheive this with a decorator? I have that getParams() function in between....
2014/10/22
[ "https://Stackoverflow.com/questions/26513125", "https://Stackoverflow.com", "https://Stackoverflow.com/users/20392/" ]
As mentioned in my [comment](https://stackoverflow.com/questions/26513124/pop-function-on-linked-list-stack/26513670#comment41656530_26513124) you should split this up to two separate functions. One to get the value, and another one to pop (remove) the `Node` ``` void pop(Node*& top) { // Note the reference. You want to change the current top node. // ^ if ( top ) { Node *tmp = top; top = top->next; delete tmp; } } int& top(Node* top) { if ( top ) { return top->num; } // Throw an appropriate exception if the stack is empty throw std::out_of_range("Stack is empty."); } ```
First, you are trying to delete `tmp` node, but top node still exist and value has to be returned as ans or top->next or in this situation top->num. Why do you initialize node `tmp` in the function when node `tmp` is a parameter? Why should node \* &top be in the function parameters instead of `tmp`. value = top->num doesn't fix the problem, because he wants the pointer from the top of the linked list not the random node inputed through the function parameters. To fix this problem `Node * tmp` should equal top and then value should be equal to tmp->num. Otherwise all other problems have been fixed. **//EDIT** Ignore everything before //edit because all that is questions about his question that I now already know. I have compiled this code and it completely worked for me. ``` struct Node { int data; Node *next; }; int pop(Node *head) { while(head->next != NULL) { head = head->next; } int value; Node *tmp; tmp = new Node; value = head->data; tmp = head; delete tmp; return value; } ``` Compiled code link - <http://ideone.com/7EgBhf>
56,439,798
I have a camera running on rtsp link. I want to write python code to check if the camera is live or dead. Similar to using curl to check http if url is working or not. What is a similar command can one use to check rtsp url status? I have tried using openRTSP on terminal and I want to use it as python script openRTSP rtsp://test\_url\_here
2019/06/04
[ "https://Stackoverflow.com/questions/56439798", "https://Stackoverflow.com", "https://Stackoverflow.com/users/6006820/" ]
You can call FFMPEG to extract a snapshot. If successful stream is accessible. Test this functionality (exctracting snapshot from rtsp) with <https://videonow.live/broadcast-ip-camera-or-stream/> per tutorial at <https://broadcastlivevideo.com/publish-ip-camera-stream-to-website/>. Command to extract should be something like: ``` /usr/bin/ffmpeg -y -frames 1 snapshot.png -rtsp_transport tcp -i rtsp://test_url_here ``` Then check if snapshot file was created and is not empty. You can find exact code for functionality in this free open source WP plugin <https://wordpress.org/plugins/videowhisper-live-streaming-integration/> .
You can use the `opencv_python` module to play rtsp stream. Sample codes: ``` import cv2 cap=cv2.VideoCapture("rtsp://admin:admin123@test_url_here") ret,frame = cap.read() while ret: ret,frame = cap.read() cv2.imshow("frame",frame) if cv2.waitKey(1) & 0xFF == ord('q'): break cv2.destroyAllWindows() cap.release() ```
27,466,862
There's something wrong with my OSX system and python that no amount of googling has fixed. I've uninstalled all traces of python except the system python package with OSX that I'm not supposed to uninstall, and then started afresh with a new python from python.org, and installed pip. Now...not sure if this particular behavior below is part of the issue, but it seems strange to me: I ran python twice. Once with sudo and once without. Without sudo, I can't access pip. What's going on? ``` $ sudo /Library/Frameworks/Python.framework/Versions/2.7/bin/python Python 2.7.9 (v2.7.9:648dcafa7e5f, Dec 10 2014, 10:10:46) [GCC 4.2.1 (Apple Inc. build 5666) (dot 3)] on darwin Type "help", "copyright", "credits" or "license" for more information. >>> import pip Traceback (most recent call last): File "<stdin>", line 1, in <module> ImportError: No module named pip ``` However... ``` $ /Library/Frameworks/Python.framework/Versions/2.7/bin/python Python 2.7.9 (v2.7.9:648dcafa7e5f, Dec 10 2014, 10:10:46) [GCC 4.2.1 (Apple Inc. build 5666) (dot 3)] on darwin Type "help", "copyright", "credits" or "license" for more information. >>> import pip >>> ``` I've already referred to: [sudo python runs old python version](https://stackoverflow.com/questions/15441440/sudo-python-runs-old-python-version) I have nothing in my .bash\_profile, or anything in any other profiles. All I've done is the following: ``` export PYTHONPATH=/lib/python2.7/site-packages/ ``` `ls $PYTHONPATH` returns: ``` _markerlib pip pkg_resources.pyc setuptools-8.0.1.dist-info virtualenv.pyc easy_install.py pip-1.5.6.dist-info setuptools virtualenv-1.11.6.dist-info virtualenv_support easy_install.pyc pkg_resources.py setuptools-7.0.dist-info virtualenv.py ``` `which pip` returns: ``` /bin/pip ```
2014/12/14
[ "https://Stackoverflow.com/questions/27466862", "https://Stackoverflow.com", "https://Stackoverflow.com/users/1119779/" ]
`sudo` overrides your `export`. It's the same Python (as you can easily tell from the version information it prints) but it runs with a different (system default) `PYTHONPATH`. This is one of the jobs of `sudo`; it sanitizes the environment to safe defaults. You may be able to tweak this, but the real question is, what are you trying to accomplish? If you need to run as `root` with a particular environment, set up a `virtualenv` and/or write a wrapper script which sets things up before dispatching Python.
What do you get when you compare the output of `which pip` and `sudo which pip`? On my system I get different outputs. If you do, I'm not sure how to fix that, but you could try to force the sudo'd python to look in the correct directory: ``` import sys sys.path.insert(0, '/lib/python2.7/site-packages/') import pip ```
21,783,840
I have a CSV file that has numerous data points included in each row, despite belonging to the same column. Something similar to this: ``` A, B, C, X, Y, Z ``` Now, what I would like to do is to reformat the file such that the resulting CSV is: ``` A, B, C X, Y, Z ``` I'm not too sure how to go about this / express it in a pythonic way. One idea (which I wasn't sure how to code) was to create a for loop similar to ``` For elements in file: new_csv = [] counter = 0 max_counter = 3 ##I'm using english because I dont know how to express it in python syntax take first element in CSV and add 1 to counter append first element to new_csv is counter < max_counter? if yes: go to next element if no: append /n to new csv then go to next element ``` This probably is a terrible way to do it, but I'm struggling to find another way. How would I express this in the python syntax? Any ideas?
2014/02/14
[ "https://Stackoverflow.com/questions/21783840", "https://Stackoverflow.com", "https://Stackoverflow.com/users/2179795/" ]
Just write two rows to an output [`csv.writer()` object](http://docs.python.org/2/library/csv.html#csv.writer): ``` with open(inputfilename, 'rb') as infh, open(outputfilename, 'wb') as outfh: reader = csv.reader(infh) writer = csv.writer(outfh) for row in reader: writer.writerows([row[:3], row[3:]]) ``` Each row the `csv.reader()` object produces is a list object, so all we have to do is using the [Python slice notation](https://stackoverflow.com/questions/509211/pythons-slice-notation) to slice of a new list object of the first 3 elements, and another list object for the remaining 3 elements.
You could probably use python's [CSV module](http://docs.python.org/2/library/csv.html) Example: ``` #!/usr/bin/env python import csv with open("input.csv", "r") as input_file, open("output.csv", "w+"): input_csv, output_csv = csv.reader(input_file), csv.writer(output_file); for row in input_csv: output_csv.writerows([row[:len(row)/2],row[len(row)/2]]); ``` Which given the input ``` a,b,c,d,e,f A,B,C,D,E,F,G 1,2,3,4,5,6 ``` will output ``` a,b,c d,e,f A,B,C D,E,F,G 1,2,3 4,5,6 ```
21,783,840
I have a CSV file that has numerous data points included in each row, despite belonging to the same column. Something similar to this: ``` A, B, C, X, Y, Z ``` Now, what I would like to do is to reformat the file such that the resulting CSV is: ``` A, B, C X, Y, Z ``` I'm not too sure how to go about this / express it in a pythonic way. One idea (which I wasn't sure how to code) was to create a for loop similar to ``` For elements in file: new_csv = [] counter = 0 max_counter = 3 ##I'm using english because I dont know how to express it in python syntax take first element in CSV and add 1 to counter append first element to new_csv is counter < max_counter? if yes: go to next element if no: append /n to new csv then go to next element ``` This probably is a terrible way to do it, but I'm struggling to find another way. How would I express this in the python syntax? Any ideas?
2014/02/14
[ "https://Stackoverflow.com/questions/21783840", "https://Stackoverflow.com", "https://Stackoverflow.com/users/2179795/" ]
Just write two rows to an output [`csv.writer()` object](http://docs.python.org/2/library/csv.html#csv.writer): ``` with open(inputfilename, 'rb') as infh, open(outputfilename, 'wb') as outfh: reader = csv.reader(infh) writer = csv.writer(outfh) for row in reader: writer.writerows([row[:3], row[3:]]) ``` Each row the `csv.reader()` object produces is a list object, so all we have to do is using the [Python slice notation](https://stackoverflow.com/questions/509211/pythons-slice-notation) to slice of a new list object of the first 3 elements, and another list object for the remaining 3 elements.
*This solution is for Python 3. For Python 2, see [Martijn Pieters's answer](https://stackoverflow.com/a/21784082/1970751) (differs in the way files are opened).* Python has a module to [read](http://docs.python.org/3.3/library/csv.html?highlight=csv.writer#csv.reader) and [write](http://docs.python.org/3.3/library/csv.html?highlight=csv.writer#csv.writer) csv files. There are some [nice examples](http://docs.python.org/3.3/library/csv.html?highlight=csv.writer#examples) in the official documentation on how to use them. (Note the `newline=""` for output file, this lets the csv module take care of line return, which according to the specification should be `\r\n`, `\n` being used for line feed in a cell.) While reading, you then split the rows with [slicing](http://docs.python.org/3.3/glossary.html#term-slice) and feed the parts in the writer. ``` import csv # with ensures files are closed when leaving block with open("in.csv", "r") as fin, open("out.csv", "w", newline="") as fout: reader = csv.reader(fin, delimiter=",") writer =csv.writer(fout, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL) # Rows are each line of the csv returned as a list for row in reader: # Separate the row: # from beginning to 3rd element, and from 3rd element to end writer.writerows((row[:3], row[3:]) ```
21,783,840
I have a CSV file that has numerous data points included in each row, despite belonging to the same column. Something similar to this: ``` A, B, C, X, Y, Z ``` Now, what I would like to do is to reformat the file such that the resulting CSV is: ``` A, B, C X, Y, Z ``` I'm not too sure how to go about this / express it in a pythonic way. One idea (which I wasn't sure how to code) was to create a for loop similar to ``` For elements in file: new_csv = [] counter = 0 max_counter = 3 ##I'm using english because I dont know how to express it in python syntax take first element in CSV and add 1 to counter append first element to new_csv is counter < max_counter? if yes: go to next element if no: append /n to new csv then go to next element ``` This probably is a terrible way to do it, but I'm struggling to find another way. How would I express this in the python syntax? Any ideas?
2014/02/14
[ "https://Stackoverflow.com/questions/21783840", "https://Stackoverflow.com", "https://Stackoverflow.com/users/2179795/" ]
You could probably use python's [CSV module](http://docs.python.org/2/library/csv.html) Example: ``` #!/usr/bin/env python import csv with open("input.csv", "r") as input_file, open("output.csv", "w+"): input_csv, output_csv = csv.reader(input_file), csv.writer(output_file); for row in input_csv: output_csv.writerows([row[:len(row)/2],row[len(row)/2]]); ``` Which given the input ``` a,b,c,d,e,f A,B,C,D,E,F,G 1,2,3,4,5,6 ``` will output ``` a,b,c d,e,f A,B,C D,E,F,G 1,2,3 4,5,6 ```
*This solution is for Python 3. For Python 2, see [Martijn Pieters's answer](https://stackoverflow.com/a/21784082/1970751) (differs in the way files are opened).* Python has a module to [read](http://docs.python.org/3.3/library/csv.html?highlight=csv.writer#csv.reader) and [write](http://docs.python.org/3.3/library/csv.html?highlight=csv.writer#csv.writer) csv files. There are some [nice examples](http://docs.python.org/3.3/library/csv.html?highlight=csv.writer#examples) in the official documentation on how to use them. (Note the `newline=""` for output file, this lets the csv module take care of line return, which according to the specification should be `\r\n`, `\n` being used for line feed in a cell.) While reading, you then split the rows with [slicing](http://docs.python.org/3.3/glossary.html#term-slice) and feed the parts in the writer. ``` import csv # with ensures files are closed when leaving block with open("in.csv", "r") as fin, open("out.csv", "w", newline="") as fout: reader = csv.reader(fin, delimiter=",") writer =csv.writer(fout, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL) # Rows are each line of the csv returned as a list for row in reader: # Separate the row: # from beginning to 3rd element, and from 3rd element to end writer.writerows((row[:3], row[3:]) ```
74,304,917
I'm having trouble trying to find the parameters of a gaussian curve fit. The site <https://mycurvefit.com/> provides a good answer fairly quickly. However, my implementation with python's curve\_fit(), from the scipy.optimize library, is not providing good results (even when inputting the answers). For instance, the equation I'm trying to fit is the following: ``` def gauss_func(x, a, b, c): return a * np.exp(-(x-b)**2/(2*c**2)) ``` With input points: ``` x_main = np.array([19.748, 39.611, 59.465]) y_main = np.array([0.438160379, 0.008706677, 0.000160106]) ``` where I want to find the parameters **a**, **b** and **c**. From the mycurvefit website, I get the answers: a = 4821416 b = -154.0293 c = 30.51661 Which fit nicely the given points. But when I try to run with curve\_fit(): ``` poptMain, pcovMain = curve_fit(gauss_func, x_main, y_main, p0=(1, -1, 1),sigma=np.array([1,1,1])) ``` I get the **"RuntimeError: Optimal parameters not found: Number of calls to function has reached maxfev = 800."** error. What I tried: * Changing the maxfev to other values, such as 5000, 10000, 100000 (no effect). * Replacing the initial guess p0 to values closer to the mycurvefit answer (no effect) and common values such as [1, 1, 1], [1, 0, 1], etc (no effect). Even when inputting the answer, it still won't find the parameters! I have used this same code before with other similar cases, and it worked nicely. But this time it's not converging at all. What could I do to solve this?
2022/11/03
[ "https://Stackoverflow.com/questions/74304917", "https://Stackoverflow.com", "https://Stackoverflow.com/users/14703689/" ]
One way to do it is using window functions. The first one ([**`lag`**](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.lag.html#pyspark.sql.functions.lag)) marks the row if it is different than the previous. The second ([**`sum`**](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.sum.html#pyspark.sql.functions.sum)) marks all "Block" rows for previously marked rows. Lastly, deleting roes and the helper (`_flag`) column. Input: ```py from pyspark.sql import functions as F, Window as W df = spark.createDataFrame( [(1, 'A'), (1, 'C'), (1, 'C'), (3, 'A'), (3, 'B')], ['ID', 'Block']) ``` Script: ```py w1 = W.partitionBy('Block').orderBy('ID') w2 = W.partitionBy('Block') grp = F.when(F.lag('ID').over(w1) != F.col('ID'), 1).otherwise(0) df = df.withColumn('_flag', F.sum(grp).over(w2) == 0) \ .filter('_flag').drop('_flag') df.show() # +---+-----+ # | ID|Block| # +---+-----+ # | 3| B| # | 1| C| # | 1| C| # +---+-----+ ```
Use window functions. get ranks per group of blocks and through away any rows that rank higher than 1. Code below ``` (df.withColumn('index', row_number().over(Window.partitionBy().orderBy('ID','Block')))#create an index to reorder after comps .withColumn('BlockRank', rank().over(Window.partitionBy('Block').orderBy('ID'))).orderBy('index')#Rank per Block .where(col('BlockRank')==1) .drop('index','BlockRank') ).show() +---+-----+ | ID|Block| +---+-----+ | 1| A| | 1| C| | 1| C| | 3| B| +---+-----+ ```
63,574,704
I have the following `Dockerfile`: ``` # beginning of the the docker ... ARG SIGNAL_ID CMD python ./my_repo/my_main.py --signal_id $SIGNAL_ID ``` I also have a `docker-compose.yml` with all the needed information for the service ``` version: '3' services: my_app: build: . # additional info ... ``` How would I run two services with different `SIGNAL_ID`'s based on the same Docker files? I have tried the following, however I am unsure how to run them as both of them would be named `my_app`: ``` docker-compose build --build-arg SIGNAL_ID=5 docker-compose build --build-arg SIGNAL_ID=6 ``` Maybe there is a way to pass `SIGNAL_ID` not via the build-arg but directly to the `Dockerfile` during `docker-compose up`? something similar to `docker run -e SIGNAL_ID=5`? --- You could have two different `docker-compose.yml`'s, but considering that the only difference between them would be `SIGNAL_ID`, I would like to avoid it.
2020/08/25
[ "https://Stackoverflow.com/questions/63574704", "https://Stackoverflow.com", "https://Stackoverflow.com/users/9253013/" ]
You can convert an integral value to its decimal representation with [`std::to_string`](https://en.cppreference.com/w/cpp/string/basic_string/to_string): ``` std::string const dec = std::to_string(num); ``` If you have a character array, say `char a[4]`, you can copy the data there element-wise: ``` for (std::size_t i = 0; std::begin(a) + i < std::end(a) && i < dec.size(); ++i) { a[i] = dec[i] - '0'; } ``` --- Edit: See [Konrad Rudolph's answer](https://stackoverflow.com/a/63574802/430766) for a simpler (and presumably faster) way of doing this.
a way is decomposing the number in hundred, tens and units... modulo can help and log10 will be useful too: this is going to be a nice work around if you arent allowed to convert to string here an example: ``` int value = 256; int myArray[3]; auto m = static_cast<int>(ceil(log10(value))); for(int i =0; i < m; ++i) { myArray[m-1-i] = static_cast<int>(value/pow(10,i))%10; } ```
63,574,704
I have the following `Dockerfile`: ``` # beginning of the the docker ... ARG SIGNAL_ID CMD python ./my_repo/my_main.py --signal_id $SIGNAL_ID ``` I also have a `docker-compose.yml` with all the needed information for the service ``` version: '3' services: my_app: build: . # additional info ... ``` How would I run two services with different `SIGNAL_ID`'s based on the same Docker files? I have tried the following, however I am unsure how to run them as both of them would be named `my_app`: ``` docker-compose build --build-arg SIGNAL_ID=5 docker-compose build --build-arg SIGNAL_ID=6 ``` Maybe there is a way to pass `SIGNAL_ID` not via the build-arg but directly to the `Dockerfile` during `docker-compose up`? something similar to `docker run -e SIGNAL_ID=5`? --- You could have two different `docker-compose.yml`'s, but considering that the only difference between them would be `SIGNAL_ID`, I would like to avoid it.
2020/08/25
[ "https://Stackoverflow.com/questions/63574704", "https://Stackoverflow.com", "https://Stackoverflow.com/users/9253013/" ]
C++17 has [`std::to_chars`](https://en.cppreference.com/w/cpp/utility/to_chars) for this purpose: ``` char a[10]; if (auto const result = std::to_chars(a, a + sizeof a - 1, 256); result.ec != std::errc()) { // An error occurred. } else { *result.ptr = '\0'; } ```
a way is decomposing the number in hundred, tens and units... modulo can help and log10 will be useful too: this is going to be a nice work around if you arent allowed to convert to string here an example: ``` int value = 256; int myArray[3]; auto m = static_cast<int>(ceil(log10(value))); for(int i =0; i < m; ++i) { myArray[m-1-i] = static_cast<int>(value/pow(10,i))%10; } ```
7,151,776
*Edit: Let me try to reword and improve my question. The old version is attached at the bottom.* What I am looking for is a way to express and use free functions in a type-generic way. Examples: ``` abs(x) # maps to x.__abs__() next(x) # maps to x.__next__() at least in Python 3 -x # maps to x.__neg__() ``` In these cases the functions have been designed in a way that allows users with user-defined types to customize their behaviour by delegating the work to a non-static method call. This is nice. It allows us to write functions that don't really care about the exact parameter types as long as they "feel" like objects that model a certain concept. Counter examples: Functions that can't be easily used generically: ``` math.exp # only for reals cmath.exp # takes complex numbers ``` Suppose, I want to write a generic function that applies exp on a list of number-like objects. What exp function should I use? How do I select the correct one? ``` def listexp(lst): return [math.exp(x) for x in lst] ``` Obviously, this won't work for lists of complex numbers even though there is an exp for complex numbers (in cmath). And it also won't work for any user-defined number-like type which might offer its own special exp function. So, what I'm looking for is a way to deal with this on both sides -- ideally without special casing a lot of things. As a writer of some generic function that does not care about the exact types of parameters I want to use the correct mathematical functions that is specific to the types involved without having to deal with this explicitly. As a writer of a user-defined type, I would like to expose special mathematical functions that have been augmented to deal with additional data stored in those objects (similar to the imaginary part of complex numbers). What is the preferred pattern/protocol/idiom for doing that? I did not yet test `numpy`. But I downloaded its source code. As far as I know, it offers a sin function for arrays. Unfortunately, I haven't found its implementation yet in the source code. But it would be interesting to see how they managed to pick the right sin function for the right type of numbers the array currently stores. In C++ I would have relied on function overloading and ADL (argument-dependent lookup). With C++ being statically typed, it should come as no surprise that this (name lookup, overload resolution) is handled completely at compile-time. I suppose, I could emulate this at runtime with Python and the reflective tools Python has to offer. But I also know that trying to import a coding style into another language might be a bad idea and not very idiomatic in the new language. So, if you have a different idea for an approach, I'm all ears. I guess, somewhere at some point I need to manually do some type-dependent dispatching in an extensible way. Maybe write a module "tgmath" (type generic math) that comes with support for real and complex support as well as allows others to register their types and special case functions... Opinions? What do the Python masters say about this? TIA Edit: Apparently, I'm not the only one who is interested in generic functions and type-dependent overloading. There is [PEP 3124](http://www.python.org/dev/peps/pep-3124/) but it is in draft state since 4 years ago. --- *Old version of the question:* I have a strong background in Java and C++ and just recently started learning Python. What I'm wondering about is: How do we extend mathematical functions (at least their names) so they work on other user-defined types? Do these kinds of functions offer any kind of extension point/hook I can leverage (similar to the iterator protocol where `next(obj)` actually delegates to `obj.__next__`, etc) ? In C++ I would have simply overloaded the function with the new parameter type and have the compiler figure out which of the functions was meant using the argument expressions' static types. But since Python is a very dynamic language there is no such thing as overloading. What is the preferred Python way of doing this? Also, when I write custom functions, I would like to avoid long chains of ``` if isinstance(arg,someClass): suchandsuch elif ... ``` What are the patterns I could use to make the code look prettier and more Pythonish? I guess, I'm basically trying to deal with the lack of function overloading in Python. At least in C++ overloading and argument-dependent lookup is an important part of good C++ style. Is it possible to make ``` x = udt(something) # object of user-defined type that represents a number y = sin(x) # how do I make this invoke custom type-specific code for sin? t = abs(x) # works because abs delegates to __abs__() which I defined. ``` work? I know I could make sin a non-static method of the class. But then I lose genericity because for every other kind of number-like object it's `sin(x)` and not `x.sin()`. Adding a `__float__` method is not acceptable since I keep additional information in the object such as derivatives for "automatic differentiation". TIA Edit: If you're curious about what the code looks like, check [this](http://ideone.com/H6z60) out. In an ideal world I would be able to use sin/cos/sqrt in a type-generic way. I consider these functions part of the objects interface even if they are "free functions". In `__somefunction` I did not qualify the functions with `math.` nor `__main__.`. It just works because I manually fall back on `math.sin` (etc) in my custom functions via the decorator. But I consider this to be an ugly hack.
2011/08/22
[ "https://Stackoverflow.com/questions/7151776", "https://Stackoverflow.com", "https://Stackoverflow.com/users/172531/" ]
you can do this, but it works backwards. you implement `__float__()` in your new type and then `sin()` will work with your class. in other words, you don't adapt sine to work on other types; you adapt those types so that they work with sine. this is better because it forces consistency. if there is no obvious mapping from your object to a float then there probably isn't a reasonable interpretation of `sin()` for that type. [sorry if i missed the "\_\_float\_\_ won't work" part earlier; perhaps you added that in response to this? anyway, for convincing proof that what you want isn't possible, python has the cmath library to add `sin()` etc for complex numbers...]
Typically the answer to questions like this is "you don't" or "use duck typing". Can you provide a little more detail about what you want to do? Have you looked at the remainder of the protocol methods for numeric types? <http://docs.python.org/reference/datamodel.html#emulating-numeric-types>
7,151,776
*Edit: Let me try to reword and improve my question. The old version is attached at the bottom.* What I am looking for is a way to express and use free functions in a type-generic way. Examples: ``` abs(x) # maps to x.__abs__() next(x) # maps to x.__next__() at least in Python 3 -x # maps to x.__neg__() ``` In these cases the functions have been designed in a way that allows users with user-defined types to customize their behaviour by delegating the work to a non-static method call. This is nice. It allows us to write functions that don't really care about the exact parameter types as long as they "feel" like objects that model a certain concept. Counter examples: Functions that can't be easily used generically: ``` math.exp # only for reals cmath.exp # takes complex numbers ``` Suppose, I want to write a generic function that applies exp on a list of number-like objects. What exp function should I use? How do I select the correct one? ``` def listexp(lst): return [math.exp(x) for x in lst] ``` Obviously, this won't work for lists of complex numbers even though there is an exp for complex numbers (in cmath). And it also won't work for any user-defined number-like type which might offer its own special exp function. So, what I'm looking for is a way to deal with this on both sides -- ideally without special casing a lot of things. As a writer of some generic function that does not care about the exact types of parameters I want to use the correct mathematical functions that is specific to the types involved without having to deal with this explicitly. As a writer of a user-defined type, I would like to expose special mathematical functions that have been augmented to deal with additional data stored in those objects (similar to the imaginary part of complex numbers). What is the preferred pattern/protocol/idiom for doing that? I did not yet test `numpy`. But I downloaded its source code. As far as I know, it offers a sin function for arrays. Unfortunately, I haven't found its implementation yet in the source code. But it would be interesting to see how they managed to pick the right sin function for the right type of numbers the array currently stores. In C++ I would have relied on function overloading and ADL (argument-dependent lookup). With C++ being statically typed, it should come as no surprise that this (name lookup, overload resolution) is handled completely at compile-time. I suppose, I could emulate this at runtime with Python and the reflective tools Python has to offer. But I also know that trying to import a coding style into another language might be a bad idea and not very idiomatic in the new language. So, if you have a different idea for an approach, I'm all ears. I guess, somewhere at some point I need to manually do some type-dependent dispatching in an extensible way. Maybe write a module "tgmath" (type generic math) that comes with support for real and complex support as well as allows others to register their types and special case functions... Opinions? What do the Python masters say about this? TIA Edit: Apparently, I'm not the only one who is interested in generic functions and type-dependent overloading. There is [PEP 3124](http://www.python.org/dev/peps/pep-3124/) but it is in draft state since 4 years ago. --- *Old version of the question:* I have a strong background in Java and C++ and just recently started learning Python. What I'm wondering about is: How do we extend mathematical functions (at least their names) so they work on other user-defined types? Do these kinds of functions offer any kind of extension point/hook I can leverage (similar to the iterator protocol where `next(obj)` actually delegates to `obj.__next__`, etc) ? In C++ I would have simply overloaded the function with the new parameter type and have the compiler figure out which of the functions was meant using the argument expressions' static types. But since Python is a very dynamic language there is no such thing as overloading. What is the preferred Python way of doing this? Also, when I write custom functions, I would like to avoid long chains of ``` if isinstance(arg,someClass): suchandsuch elif ... ``` What are the patterns I could use to make the code look prettier and more Pythonish? I guess, I'm basically trying to deal with the lack of function overloading in Python. At least in C++ overloading and argument-dependent lookup is an important part of good C++ style. Is it possible to make ``` x = udt(something) # object of user-defined type that represents a number y = sin(x) # how do I make this invoke custom type-specific code for sin? t = abs(x) # works because abs delegates to __abs__() which I defined. ``` work? I know I could make sin a non-static method of the class. But then I lose genericity because for every other kind of number-like object it's `sin(x)` and not `x.sin()`. Adding a `__float__` method is not acceptable since I keep additional information in the object such as derivatives for "automatic differentiation". TIA Edit: If you're curious about what the code looks like, check [this](http://ideone.com/H6z60) out. In an ideal world I would be able to use sin/cos/sqrt in a type-generic way. I consider these functions part of the objects interface even if they are "free functions". In `__somefunction` I did not qualify the functions with `math.` nor `__main__.`. It just works because I manually fall back on `math.sin` (etc) in my custom functions via the decorator. But I consider this to be an ugly hack.
2011/08/22
[ "https://Stackoverflow.com/questions/7151776", "https://Stackoverflow.com", "https://Stackoverflow.com/users/172531/" ]
If you want the return type of `math.sin()` to be your user-defined type, you appear to be out of luck. Python's `math` library is basically a thin wrapper around a fast native IEEE 754 floating point math library. If you want to be internally consistent and duck-typed, you can at least put the extensibility shim that python is missing into your own code. ``` def sin(x): try: return x.__sin__() except AttributeError: return math.sin(x) ``` Now you can import this `sin` function and use it indiscriminately wherever you used `math.sin` previously. It's not quite as pretty as having `math.sin` pick up your duck-typing automatically but at least it can be consistent within your codebase.
Typically the answer to questions like this is "you don't" or "use duck typing". Can you provide a little more detail about what you want to do? Have you looked at the remainder of the protocol methods for numeric types? <http://docs.python.org/reference/datamodel.html#emulating-numeric-types>
7,151,776
*Edit: Let me try to reword and improve my question. The old version is attached at the bottom.* What I am looking for is a way to express and use free functions in a type-generic way. Examples: ``` abs(x) # maps to x.__abs__() next(x) # maps to x.__next__() at least in Python 3 -x # maps to x.__neg__() ``` In these cases the functions have been designed in a way that allows users with user-defined types to customize their behaviour by delegating the work to a non-static method call. This is nice. It allows us to write functions that don't really care about the exact parameter types as long as they "feel" like objects that model a certain concept. Counter examples: Functions that can't be easily used generically: ``` math.exp # only for reals cmath.exp # takes complex numbers ``` Suppose, I want to write a generic function that applies exp on a list of number-like objects. What exp function should I use? How do I select the correct one? ``` def listexp(lst): return [math.exp(x) for x in lst] ``` Obviously, this won't work for lists of complex numbers even though there is an exp for complex numbers (in cmath). And it also won't work for any user-defined number-like type which might offer its own special exp function. So, what I'm looking for is a way to deal with this on both sides -- ideally without special casing a lot of things. As a writer of some generic function that does not care about the exact types of parameters I want to use the correct mathematical functions that is specific to the types involved without having to deal with this explicitly. As a writer of a user-defined type, I would like to expose special mathematical functions that have been augmented to deal with additional data stored in those objects (similar to the imaginary part of complex numbers). What is the preferred pattern/protocol/idiom for doing that? I did not yet test `numpy`. But I downloaded its source code. As far as I know, it offers a sin function for arrays. Unfortunately, I haven't found its implementation yet in the source code. But it would be interesting to see how they managed to pick the right sin function for the right type of numbers the array currently stores. In C++ I would have relied on function overloading and ADL (argument-dependent lookup). With C++ being statically typed, it should come as no surprise that this (name lookup, overload resolution) is handled completely at compile-time. I suppose, I could emulate this at runtime with Python and the reflective tools Python has to offer. But I also know that trying to import a coding style into another language might be a bad idea and not very idiomatic in the new language. So, if you have a different idea for an approach, I'm all ears. I guess, somewhere at some point I need to manually do some type-dependent dispatching in an extensible way. Maybe write a module "tgmath" (type generic math) that comes with support for real and complex support as well as allows others to register their types and special case functions... Opinions? What do the Python masters say about this? TIA Edit: Apparently, I'm not the only one who is interested in generic functions and type-dependent overloading. There is [PEP 3124](http://www.python.org/dev/peps/pep-3124/) but it is in draft state since 4 years ago. --- *Old version of the question:* I have a strong background in Java and C++ and just recently started learning Python. What I'm wondering about is: How do we extend mathematical functions (at least their names) so they work on other user-defined types? Do these kinds of functions offer any kind of extension point/hook I can leverage (similar to the iterator protocol where `next(obj)` actually delegates to `obj.__next__`, etc) ? In C++ I would have simply overloaded the function with the new parameter type and have the compiler figure out which of the functions was meant using the argument expressions' static types. But since Python is a very dynamic language there is no such thing as overloading. What is the preferred Python way of doing this? Also, when I write custom functions, I would like to avoid long chains of ``` if isinstance(arg,someClass): suchandsuch elif ... ``` What are the patterns I could use to make the code look prettier and more Pythonish? I guess, I'm basically trying to deal with the lack of function overloading in Python. At least in C++ overloading and argument-dependent lookup is an important part of good C++ style. Is it possible to make ``` x = udt(something) # object of user-defined type that represents a number y = sin(x) # how do I make this invoke custom type-specific code for sin? t = abs(x) # works because abs delegates to __abs__() which I defined. ``` work? I know I could make sin a non-static method of the class. But then I lose genericity because for every other kind of number-like object it's `sin(x)` and not `x.sin()`. Adding a `__float__` method is not acceptable since I keep additional information in the object such as derivatives for "automatic differentiation". TIA Edit: If you're curious about what the code looks like, check [this](http://ideone.com/H6z60) out. In an ideal world I would be able to use sin/cos/sqrt in a type-generic way. I consider these functions part of the objects interface even if they are "free functions". In `__somefunction` I did not qualify the functions with `math.` nor `__main__.`. It just works because I manually fall back on `math.sin` (etc) in my custom functions via the decorator. But I consider this to be an ugly hack.
2011/08/22
[ "https://Stackoverflow.com/questions/7151776", "https://Stackoverflow.com", "https://Stackoverflow.com/users/172531/" ]
Define your own versions in a module. This is what's done in cmath for complex number and in numpy for arrays.
Typically the answer to questions like this is "you don't" or "use duck typing". Can you provide a little more detail about what you want to do? Have you looked at the remainder of the protocol methods for numeric types? <http://docs.python.org/reference/datamodel.html#emulating-numeric-types>
7,151,776
*Edit: Let me try to reword and improve my question. The old version is attached at the bottom.* What I am looking for is a way to express and use free functions in a type-generic way. Examples: ``` abs(x) # maps to x.__abs__() next(x) # maps to x.__next__() at least in Python 3 -x # maps to x.__neg__() ``` In these cases the functions have been designed in a way that allows users with user-defined types to customize their behaviour by delegating the work to a non-static method call. This is nice. It allows us to write functions that don't really care about the exact parameter types as long as they "feel" like objects that model a certain concept. Counter examples: Functions that can't be easily used generically: ``` math.exp # only for reals cmath.exp # takes complex numbers ``` Suppose, I want to write a generic function that applies exp on a list of number-like objects. What exp function should I use? How do I select the correct one? ``` def listexp(lst): return [math.exp(x) for x in lst] ``` Obviously, this won't work for lists of complex numbers even though there is an exp for complex numbers (in cmath). And it also won't work for any user-defined number-like type which might offer its own special exp function. So, what I'm looking for is a way to deal with this on both sides -- ideally without special casing a lot of things. As a writer of some generic function that does not care about the exact types of parameters I want to use the correct mathematical functions that is specific to the types involved without having to deal with this explicitly. As a writer of a user-defined type, I would like to expose special mathematical functions that have been augmented to deal with additional data stored in those objects (similar to the imaginary part of complex numbers). What is the preferred pattern/protocol/idiom for doing that? I did not yet test `numpy`. But I downloaded its source code. As far as I know, it offers a sin function for arrays. Unfortunately, I haven't found its implementation yet in the source code. But it would be interesting to see how they managed to pick the right sin function for the right type of numbers the array currently stores. In C++ I would have relied on function overloading and ADL (argument-dependent lookup). With C++ being statically typed, it should come as no surprise that this (name lookup, overload resolution) is handled completely at compile-time. I suppose, I could emulate this at runtime with Python and the reflective tools Python has to offer. But I also know that trying to import a coding style into another language might be a bad idea and not very idiomatic in the new language. So, if you have a different idea for an approach, I'm all ears. I guess, somewhere at some point I need to manually do some type-dependent dispatching in an extensible way. Maybe write a module "tgmath" (type generic math) that comes with support for real and complex support as well as allows others to register their types and special case functions... Opinions? What do the Python masters say about this? TIA Edit: Apparently, I'm not the only one who is interested in generic functions and type-dependent overloading. There is [PEP 3124](http://www.python.org/dev/peps/pep-3124/) but it is in draft state since 4 years ago. --- *Old version of the question:* I have a strong background in Java and C++ and just recently started learning Python. What I'm wondering about is: How do we extend mathematical functions (at least their names) so they work on other user-defined types? Do these kinds of functions offer any kind of extension point/hook I can leverage (similar to the iterator protocol where `next(obj)` actually delegates to `obj.__next__`, etc) ? In C++ I would have simply overloaded the function with the new parameter type and have the compiler figure out which of the functions was meant using the argument expressions' static types. But since Python is a very dynamic language there is no such thing as overloading. What is the preferred Python way of doing this? Also, when I write custom functions, I would like to avoid long chains of ``` if isinstance(arg,someClass): suchandsuch elif ... ``` What are the patterns I could use to make the code look prettier and more Pythonish? I guess, I'm basically trying to deal with the lack of function overloading in Python. At least in C++ overloading and argument-dependent lookup is an important part of good C++ style. Is it possible to make ``` x = udt(something) # object of user-defined type that represents a number y = sin(x) # how do I make this invoke custom type-specific code for sin? t = abs(x) # works because abs delegates to __abs__() which I defined. ``` work? I know I could make sin a non-static method of the class. But then I lose genericity because for every other kind of number-like object it's `sin(x)` and not `x.sin()`. Adding a `__float__` method is not acceptable since I keep additional information in the object such as derivatives for "automatic differentiation". TIA Edit: If you're curious about what the code looks like, check [this](http://ideone.com/H6z60) out. In an ideal world I would be able to use sin/cos/sqrt in a type-generic way. I consider these functions part of the objects interface even if they are "free functions". In `__somefunction` I did not qualify the functions with `math.` nor `__main__.`. It just works because I manually fall back on `math.sin` (etc) in my custom functions via the decorator. But I consider this to be an ugly hack.
2011/08/22
[ "https://Stackoverflow.com/questions/7151776", "https://Stackoverflow.com", "https://Stackoverflow.com/users/172531/" ]
you can do this, but it works backwards. you implement `__float__()` in your new type and then `sin()` will work with your class. in other words, you don't adapt sine to work on other types; you adapt those types so that they work with sine. this is better because it forces consistency. if there is no obvious mapping from your object to a float then there probably isn't a reasonable interpretation of `sin()` for that type. [sorry if i missed the "\_\_float\_\_ won't work" part earlier; perhaps you added that in response to this? anyway, for convincing proof that what you want isn't possible, python has the cmath library to add `sin()` etc for complex numbers...]
Ideally, you will derive your user-defined numeric types from a native Python type, and the math functions will just work. When that isn't possible, perhaps you can define `__int__()` or `__float__()` or `__complex__()` or `__long__()` on the object so it knows how to convert itself to a type the math functions can handle. When *that* isn't feasible, for example if you wish to take a `sin()` of an object that stores x and y displacement rather than an angle, you will need to provide either your own equivalents of such functions (usually as a method of the class) or a function such as `to_angle()` to convert the object's internal representation to the one needed by Python. Finally, it *is* possible to provide your own `math` module that replaces the built-in math functions with your own varieties, so if you want to allow math on your classes without any syntax changes to the expressions, it can be done in that fashion, although it is tricky and can reduce performance, since you'll be doing (e.g.) a fair bit of preprocessing in Python before calling the native implementations.
7,151,776
*Edit: Let me try to reword and improve my question. The old version is attached at the bottom.* What I am looking for is a way to express and use free functions in a type-generic way. Examples: ``` abs(x) # maps to x.__abs__() next(x) # maps to x.__next__() at least in Python 3 -x # maps to x.__neg__() ``` In these cases the functions have been designed in a way that allows users with user-defined types to customize their behaviour by delegating the work to a non-static method call. This is nice. It allows us to write functions that don't really care about the exact parameter types as long as they "feel" like objects that model a certain concept. Counter examples: Functions that can't be easily used generically: ``` math.exp # only for reals cmath.exp # takes complex numbers ``` Suppose, I want to write a generic function that applies exp on a list of number-like objects. What exp function should I use? How do I select the correct one? ``` def listexp(lst): return [math.exp(x) for x in lst] ``` Obviously, this won't work for lists of complex numbers even though there is an exp for complex numbers (in cmath). And it also won't work for any user-defined number-like type which might offer its own special exp function. So, what I'm looking for is a way to deal with this on both sides -- ideally without special casing a lot of things. As a writer of some generic function that does not care about the exact types of parameters I want to use the correct mathematical functions that is specific to the types involved without having to deal with this explicitly. As a writer of a user-defined type, I would like to expose special mathematical functions that have been augmented to deal with additional data stored in those objects (similar to the imaginary part of complex numbers). What is the preferred pattern/protocol/idiom for doing that? I did not yet test `numpy`. But I downloaded its source code. As far as I know, it offers a sin function for arrays. Unfortunately, I haven't found its implementation yet in the source code. But it would be interesting to see how they managed to pick the right sin function for the right type of numbers the array currently stores. In C++ I would have relied on function overloading and ADL (argument-dependent lookup). With C++ being statically typed, it should come as no surprise that this (name lookup, overload resolution) is handled completely at compile-time. I suppose, I could emulate this at runtime with Python and the reflective tools Python has to offer. But I also know that trying to import a coding style into another language might be a bad idea and not very idiomatic in the new language. So, if you have a different idea for an approach, I'm all ears. I guess, somewhere at some point I need to manually do some type-dependent dispatching in an extensible way. Maybe write a module "tgmath" (type generic math) that comes with support for real and complex support as well as allows others to register their types and special case functions... Opinions? What do the Python masters say about this? TIA Edit: Apparently, I'm not the only one who is interested in generic functions and type-dependent overloading. There is [PEP 3124](http://www.python.org/dev/peps/pep-3124/) but it is in draft state since 4 years ago. --- *Old version of the question:* I have a strong background in Java and C++ and just recently started learning Python. What I'm wondering about is: How do we extend mathematical functions (at least their names) so they work on other user-defined types? Do these kinds of functions offer any kind of extension point/hook I can leverage (similar to the iterator protocol where `next(obj)` actually delegates to `obj.__next__`, etc) ? In C++ I would have simply overloaded the function with the new parameter type and have the compiler figure out which of the functions was meant using the argument expressions' static types. But since Python is a very dynamic language there is no such thing as overloading. What is the preferred Python way of doing this? Also, when I write custom functions, I would like to avoid long chains of ``` if isinstance(arg,someClass): suchandsuch elif ... ``` What are the patterns I could use to make the code look prettier and more Pythonish? I guess, I'm basically trying to deal with the lack of function overloading in Python. At least in C++ overloading and argument-dependent lookup is an important part of good C++ style. Is it possible to make ``` x = udt(something) # object of user-defined type that represents a number y = sin(x) # how do I make this invoke custom type-specific code for sin? t = abs(x) # works because abs delegates to __abs__() which I defined. ``` work? I know I could make sin a non-static method of the class. But then I lose genericity because for every other kind of number-like object it's `sin(x)` and not `x.sin()`. Adding a `__float__` method is not acceptable since I keep additional information in the object such as derivatives for "automatic differentiation". TIA Edit: If you're curious about what the code looks like, check [this](http://ideone.com/H6z60) out. In an ideal world I would be able to use sin/cos/sqrt in a type-generic way. I consider these functions part of the objects interface even if they are "free functions". In `__somefunction` I did not qualify the functions with `math.` nor `__main__.`. It just works because I manually fall back on `math.sin` (etc) in my custom functions via the decorator. But I consider this to be an ugly hack.
2011/08/22
[ "https://Stackoverflow.com/questions/7151776", "https://Stackoverflow.com", "https://Stackoverflow.com/users/172531/" ]
If you want the return type of `math.sin()` to be your user-defined type, you appear to be out of luck. Python's `math` library is basically a thin wrapper around a fast native IEEE 754 floating point math library. If you want to be internally consistent and duck-typed, you can at least put the extensibility shim that python is missing into your own code. ``` def sin(x): try: return x.__sin__() except AttributeError: return math.sin(x) ``` Now you can import this `sin` function and use it indiscriminately wherever you used `math.sin` previously. It's not quite as pretty as having `math.sin` pick up your duck-typing automatically but at least it can be consistent within your codebase.
Ideally, you will derive your user-defined numeric types from a native Python type, and the math functions will just work. When that isn't possible, perhaps you can define `__int__()` or `__float__()` or `__complex__()` or `__long__()` on the object so it knows how to convert itself to a type the math functions can handle. When *that* isn't feasible, for example if you wish to take a `sin()` of an object that stores x and y displacement rather than an angle, you will need to provide either your own equivalents of such functions (usually as a method of the class) or a function such as `to_angle()` to convert the object's internal representation to the one needed by Python. Finally, it *is* possible to provide your own `math` module that replaces the built-in math functions with your own varieties, so if you want to allow math on your classes without any syntax changes to the expressions, it can be done in that fashion, although it is tricky and can reduce performance, since you'll be doing (e.g.) a fair bit of preprocessing in Python before calling the native implementations.
7,151,776
*Edit: Let me try to reword and improve my question. The old version is attached at the bottom.* What I am looking for is a way to express and use free functions in a type-generic way. Examples: ``` abs(x) # maps to x.__abs__() next(x) # maps to x.__next__() at least in Python 3 -x # maps to x.__neg__() ``` In these cases the functions have been designed in a way that allows users with user-defined types to customize their behaviour by delegating the work to a non-static method call. This is nice. It allows us to write functions that don't really care about the exact parameter types as long as they "feel" like objects that model a certain concept. Counter examples: Functions that can't be easily used generically: ``` math.exp # only for reals cmath.exp # takes complex numbers ``` Suppose, I want to write a generic function that applies exp on a list of number-like objects. What exp function should I use? How do I select the correct one? ``` def listexp(lst): return [math.exp(x) for x in lst] ``` Obviously, this won't work for lists of complex numbers even though there is an exp for complex numbers (in cmath). And it also won't work for any user-defined number-like type which might offer its own special exp function. So, what I'm looking for is a way to deal with this on both sides -- ideally without special casing a lot of things. As a writer of some generic function that does not care about the exact types of parameters I want to use the correct mathematical functions that is specific to the types involved without having to deal with this explicitly. As a writer of a user-defined type, I would like to expose special mathematical functions that have been augmented to deal with additional data stored in those objects (similar to the imaginary part of complex numbers). What is the preferred pattern/protocol/idiom for doing that? I did not yet test `numpy`. But I downloaded its source code. As far as I know, it offers a sin function for arrays. Unfortunately, I haven't found its implementation yet in the source code. But it would be interesting to see how they managed to pick the right sin function for the right type of numbers the array currently stores. In C++ I would have relied on function overloading and ADL (argument-dependent lookup). With C++ being statically typed, it should come as no surprise that this (name lookup, overload resolution) is handled completely at compile-time. I suppose, I could emulate this at runtime with Python and the reflective tools Python has to offer. But I also know that trying to import a coding style into another language might be a bad idea and not very idiomatic in the new language. So, if you have a different idea for an approach, I'm all ears. I guess, somewhere at some point I need to manually do some type-dependent dispatching in an extensible way. Maybe write a module "tgmath" (type generic math) that comes with support for real and complex support as well as allows others to register their types and special case functions... Opinions? What do the Python masters say about this? TIA Edit: Apparently, I'm not the only one who is interested in generic functions and type-dependent overloading. There is [PEP 3124](http://www.python.org/dev/peps/pep-3124/) but it is in draft state since 4 years ago. --- *Old version of the question:* I have a strong background in Java and C++ and just recently started learning Python. What I'm wondering about is: How do we extend mathematical functions (at least their names) so they work on other user-defined types? Do these kinds of functions offer any kind of extension point/hook I can leverage (similar to the iterator protocol where `next(obj)` actually delegates to `obj.__next__`, etc) ? In C++ I would have simply overloaded the function with the new parameter type and have the compiler figure out which of the functions was meant using the argument expressions' static types. But since Python is a very dynamic language there is no such thing as overloading. What is the preferred Python way of doing this? Also, when I write custom functions, I would like to avoid long chains of ``` if isinstance(arg,someClass): suchandsuch elif ... ``` What are the patterns I could use to make the code look prettier and more Pythonish? I guess, I'm basically trying to deal with the lack of function overloading in Python. At least in C++ overloading and argument-dependent lookup is an important part of good C++ style. Is it possible to make ``` x = udt(something) # object of user-defined type that represents a number y = sin(x) # how do I make this invoke custom type-specific code for sin? t = abs(x) # works because abs delegates to __abs__() which I defined. ``` work? I know I could make sin a non-static method of the class. But then I lose genericity because for every other kind of number-like object it's `sin(x)` and not `x.sin()`. Adding a `__float__` method is not acceptable since I keep additional information in the object such as derivatives for "automatic differentiation". TIA Edit: If you're curious about what the code looks like, check [this](http://ideone.com/H6z60) out. In an ideal world I would be able to use sin/cos/sqrt in a type-generic way. I consider these functions part of the objects interface even if they are "free functions". In `__somefunction` I did not qualify the functions with `math.` nor `__main__.`. It just works because I manually fall back on `math.sin` (etc) in my custom functions via the decorator. But I consider this to be an ugly hack.
2011/08/22
[ "https://Stackoverflow.com/questions/7151776", "https://Stackoverflow.com", "https://Stackoverflow.com/users/172531/" ]
Define your own versions in a module. This is what's done in cmath for complex number and in numpy for arrays.
Ideally, you will derive your user-defined numeric types from a native Python type, and the math functions will just work. When that isn't possible, perhaps you can define `__int__()` or `__float__()` or `__complex__()` or `__long__()` on the object so it knows how to convert itself to a type the math functions can handle. When *that* isn't feasible, for example if you wish to take a `sin()` of an object that stores x and y displacement rather than an angle, you will need to provide either your own equivalents of such functions (usually as a method of the class) or a function such as `to_angle()` to convert the object's internal representation to the one needed by Python. Finally, it *is* possible to provide your own `math` module that replaces the built-in math functions with your own varieties, so if you want to allow math on your classes without any syntax changes to the expressions, it can be done in that fashion, although it is tricky and can reduce performance, since you'll be doing (e.g.) a fair bit of preprocessing in Python before calling the native implementations.
7,151,776
*Edit: Let me try to reword and improve my question. The old version is attached at the bottom.* What I am looking for is a way to express and use free functions in a type-generic way. Examples: ``` abs(x) # maps to x.__abs__() next(x) # maps to x.__next__() at least in Python 3 -x # maps to x.__neg__() ``` In these cases the functions have been designed in a way that allows users with user-defined types to customize their behaviour by delegating the work to a non-static method call. This is nice. It allows us to write functions that don't really care about the exact parameter types as long as they "feel" like objects that model a certain concept. Counter examples: Functions that can't be easily used generically: ``` math.exp # only for reals cmath.exp # takes complex numbers ``` Suppose, I want to write a generic function that applies exp on a list of number-like objects. What exp function should I use? How do I select the correct one? ``` def listexp(lst): return [math.exp(x) for x in lst] ``` Obviously, this won't work for lists of complex numbers even though there is an exp for complex numbers (in cmath). And it also won't work for any user-defined number-like type which might offer its own special exp function. So, what I'm looking for is a way to deal with this on both sides -- ideally without special casing a lot of things. As a writer of some generic function that does not care about the exact types of parameters I want to use the correct mathematical functions that is specific to the types involved without having to deal with this explicitly. As a writer of a user-defined type, I would like to expose special mathematical functions that have been augmented to deal with additional data stored in those objects (similar to the imaginary part of complex numbers). What is the preferred pattern/protocol/idiom for doing that? I did not yet test `numpy`. But I downloaded its source code. As far as I know, it offers a sin function for arrays. Unfortunately, I haven't found its implementation yet in the source code. But it would be interesting to see how they managed to pick the right sin function for the right type of numbers the array currently stores. In C++ I would have relied on function overloading and ADL (argument-dependent lookup). With C++ being statically typed, it should come as no surprise that this (name lookup, overload resolution) is handled completely at compile-time. I suppose, I could emulate this at runtime with Python and the reflective tools Python has to offer. But I also know that trying to import a coding style into another language might be a bad idea and not very idiomatic in the new language. So, if you have a different idea for an approach, I'm all ears. I guess, somewhere at some point I need to manually do some type-dependent dispatching in an extensible way. Maybe write a module "tgmath" (type generic math) that comes with support for real and complex support as well as allows others to register their types and special case functions... Opinions? What do the Python masters say about this? TIA Edit: Apparently, I'm not the only one who is interested in generic functions and type-dependent overloading. There is [PEP 3124](http://www.python.org/dev/peps/pep-3124/) but it is in draft state since 4 years ago. --- *Old version of the question:* I have a strong background in Java and C++ and just recently started learning Python. What I'm wondering about is: How do we extend mathematical functions (at least their names) so they work on other user-defined types? Do these kinds of functions offer any kind of extension point/hook I can leverage (similar to the iterator protocol where `next(obj)` actually delegates to `obj.__next__`, etc) ? In C++ I would have simply overloaded the function with the new parameter type and have the compiler figure out which of the functions was meant using the argument expressions' static types. But since Python is a very dynamic language there is no such thing as overloading. What is the preferred Python way of doing this? Also, when I write custom functions, I would like to avoid long chains of ``` if isinstance(arg,someClass): suchandsuch elif ... ``` What are the patterns I could use to make the code look prettier and more Pythonish? I guess, I'm basically trying to deal with the lack of function overloading in Python. At least in C++ overloading and argument-dependent lookup is an important part of good C++ style. Is it possible to make ``` x = udt(something) # object of user-defined type that represents a number y = sin(x) # how do I make this invoke custom type-specific code for sin? t = abs(x) # works because abs delegates to __abs__() which I defined. ``` work? I know I could make sin a non-static method of the class. But then I lose genericity because for every other kind of number-like object it's `sin(x)` and not `x.sin()`. Adding a `__float__` method is not acceptable since I keep additional information in the object such as derivatives for "automatic differentiation". TIA Edit: If you're curious about what the code looks like, check [this](http://ideone.com/H6z60) out. In an ideal world I would be able to use sin/cos/sqrt in a type-generic way. I consider these functions part of the objects interface even if they are "free functions". In `__somefunction` I did not qualify the functions with `math.` nor `__main__.`. It just works because I manually fall back on `math.sin` (etc) in my custom functions via the decorator. But I consider this to be an ugly hack.
2011/08/22
[ "https://Stackoverflow.com/questions/7151776", "https://Stackoverflow.com", "https://Stackoverflow.com/users/172531/" ]
you can do this, but it works backwards. you implement `__float__()` in your new type and then `sin()` will work with your class. in other words, you don't adapt sine to work on other types; you adapt those types so that they work with sine. this is better because it forces consistency. if there is no obvious mapping from your object to a float then there probably isn't a reasonable interpretation of `sin()` for that type. [sorry if i missed the "\_\_float\_\_ won't work" part earlier; perhaps you added that in response to this? anyway, for convincing proof that what you want isn't possible, python has the cmath library to add `sin()` etc for complex numbers...]
If you want the return type of `math.sin()` to be your user-defined type, you appear to be out of luck. Python's `math` library is basically a thin wrapper around a fast native IEEE 754 floating point math library. If you want to be internally consistent and duck-typed, you can at least put the extensibility shim that python is missing into your own code. ``` def sin(x): try: return x.__sin__() except AttributeError: return math.sin(x) ``` Now you can import this `sin` function and use it indiscriminately wherever you used `math.sin` previously. It's not quite as pretty as having `math.sin` pick up your duck-typing automatically but at least it can be consistent within your codebase.
7,151,776
*Edit: Let me try to reword and improve my question. The old version is attached at the bottom.* What I am looking for is a way to express and use free functions in a type-generic way. Examples: ``` abs(x) # maps to x.__abs__() next(x) # maps to x.__next__() at least in Python 3 -x # maps to x.__neg__() ``` In these cases the functions have been designed in a way that allows users with user-defined types to customize their behaviour by delegating the work to a non-static method call. This is nice. It allows us to write functions that don't really care about the exact parameter types as long as they "feel" like objects that model a certain concept. Counter examples: Functions that can't be easily used generically: ``` math.exp # only for reals cmath.exp # takes complex numbers ``` Suppose, I want to write a generic function that applies exp on a list of number-like objects. What exp function should I use? How do I select the correct one? ``` def listexp(lst): return [math.exp(x) for x in lst] ``` Obviously, this won't work for lists of complex numbers even though there is an exp for complex numbers (in cmath). And it also won't work for any user-defined number-like type which might offer its own special exp function. So, what I'm looking for is a way to deal with this on both sides -- ideally without special casing a lot of things. As a writer of some generic function that does not care about the exact types of parameters I want to use the correct mathematical functions that is specific to the types involved without having to deal with this explicitly. As a writer of a user-defined type, I would like to expose special mathematical functions that have been augmented to deal with additional data stored in those objects (similar to the imaginary part of complex numbers). What is the preferred pattern/protocol/idiom for doing that? I did not yet test `numpy`. But I downloaded its source code. As far as I know, it offers a sin function for arrays. Unfortunately, I haven't found its implementation yet in the source code. But it would be interesting to see how they managed to pick the right sin function for the right type of numbers the array currently stores. In C++ I would have relied on function overloading and ADL (argument-dependent lookup). With C++ being statically typed, it should come as no surprise that this (name lookup, overload resolution) is handled completely at compile-time. I suppose, I could emulate this at runtime with Python and the reflective tools Python has to offer. But I also know that trying to import a coding style into another language might be a bad idea and not very idiomatic in the new language. So, if you have a different idea for an approach, I'm all ears. I guess, somewhere at some point I need to manually do some type-dependent dispatching in an extensible way. Maybe write a module "tgmath" (type generic math) that comes with support for real and complex support as well as allows others to register their types and special case functions... Opinions? What do the Python masters say about this? TIA Edit: Apparently, I'm not the only one who is interested in generic functions and type-dependent overloading. There is [PEP 3124](http://www.python.org/dev/peps/pep-3124/) but it is in draft state since 4 years ago. --- *Old version of the question:* I have a strong background in Java and C++ and just recently started learning Python. What I'm wondering about is: How do we extend mathematical functions (at least their names) so they work on other user-defined types? Do these kinds of functions offer any kind of extension point/hook I can leverage (similar to the iterator protocol where `next(obj)` actually delegates to `obj.__next__`, etc) ? In C++ I would have simply overloaded the function with the new parameter type and have the compiler figure out which of the functions was meant using the argument expressions' static types. But since Python is a very dynamic language there is no such thing as overloading. What is the preferred Python way of doing this? Also, when I write custom functions, I would like to avoid long chains of ``` if isinstance(arg,someClass): suchandsuch elif ... ``` What are the patterns I could use to make the code look prettier and more Pythonish? I guess, I'm basically trying to deal with the lack of function overloading in Python. At least in C++ overloading and argument-dependent lookup is an important part of good C++ style. Is it possible to make ``` x = udt(something) # object of user-defined type that represents a number y = sin(x) # how do I make this invoke custom type-specific code for sin? t = abs(x) # works because abs delegates to __abs__() which I defined. ``` work? I know I could make sin a non-static method of the class. But then I lose genericity because for every other kind of number-like object it's `sin(x)` and not `x.sin()`. Adding a `__float__` method is not acceptable since I keep additional information in the object such as derivatives for "automatic differentiation". TIA Edit: If you're curious about what the code looks like, check [this](http://ideone.com/H6z60) out. In an ideal world I would be able to use sin/cos/sqrt in a type-generic way. I consider these functions part of the objects interface even if they are "free functions". In `__somefunction` I did not qualify the functions with `math.` nor `__main__.`. It just works because I manually fall back on `math.sin` (etc) in my custom functions via the decorator. But I consider this to be an ugly hack.
2011/08/22
[ "https://Stackoverflow.com/questions/7151776", "https://Stackoverflow.com", "https://Stackoverflow.com/users/172531/" ]
you can do this, but it works backwards. you implement `__float__()` in your new type and then `sin()` will work with your class. in other words, you don't adapt sine to work on other types; you adapt those types so that they work with sine. this is better because it forces consistency. if there is no obvious mapping from your object to a float then there probably isn't a reasonable interpretation of `sin()` for that type. [sorry if i missed the "\_\_float\_\_ won't work" part earlier; perhaps you added that in response to this? anyway, for convincing proof that what you want isn't possible, python has the cmath library to add `sin()` etc for complex numbers...]
Define your own versions in a module. This is what's done in cmath for complex number and in numpy for arrays.
7,151,776
*Edit: Let me try to reword and improve my question. The old version is attached at the bottom.* What I am looking for is a way to express and use free functions in a type-generic way. Examples: ``` abs(x) # maps to x.__abs__() next(x) # maps to x.__next__() at least in Python 3 -x # maps to x.__neg__() ``` In these cases the functions have been designed in a way that allows users with user-defined types to customize their behaviour by delegating the work to a non-static method call. This is nice. It allows us to write functions that don't really care about the exact parameter types as long as they "feel" like objects that model a certain concept. Counter examples: Functions that can't be easily used generically: ``` math.exp # only for reals cmath.exp # takes complex numbers ``` Suppose, I want to write a generic function that applies exp on a list of number-like objects. What exp function should I use? How do I select the correct one? ``` def listexp(lst): return [math.exp(x) for x in lst] ``` Obviously, this won't work for lists of complex numbers even though there is an exp for complex numbers (in cmath). And it also won't work for any user-defined number-like type which might offer its own special exp function. So, what I'm looking for is a way to deal with this on both sides -- ideally without special casing a lot of things. As a writer of some generic function that does not care about the exact types of parameters I want to use the correct mathematical functions that is specific to the types involved without having to deal with this explicitly. As a writer of a user-defined type, I would like to expose special mathematical functions that have been augmented to deal with additional data stored in those objects (similar to the imaginary part of complex numbers). What is the preferred pattern/protocol/idiom for doing that? I did not yet test `numpy`. But I downloaded its source code. As far as I know, it offers a sin function for arrays. Unfortunately, I haven't found its implementation yet in the source code. But it would be interesting to see how they managed to pick the right sin function for the right type of numbers the array currently stores. In C++ I would have relied on function overloading and ADL (argument-dependent lookup). With C++ being statically typed, it should come as no surprise that this (name lookup, overload resolution) is handled completely at compile-time. I suppose, I could emulate this at runtime with Python and the reflective tools Python has to offer. But I also know that trying to import a coding style into another language might be a bad idea and not very idiomatic in the new language. So, if you have a different idea for an approach, I'm all ears. I guess, somewhere at some point I need to manually do some type-dependent dispatching in an extensible way. Maybe write a module "tgmath" (type generic math) that comes with support for real and complex support as well as allows others to register their types and special case functions... Opinions? What do the Python masters say about this? TIA Edit: Apparently, I'm not the only one who is interested in generic functions and type-dependent overloading. There is [PEP 3124](http://www.python.org/dev/peps/pep-3124/) but it is in draft state since 4 years ago. --- *Old version of the question:* I have a strong background in Java and C++ and just recently started learning Python. What I'm wondering about is: How do we extend mathematical functions (at least their names) so they work on other user-defined types? Do these kinds of functions offer any kind of extension point/hook I can leverage (similar to the iterator protocol where `next(obj)` actually delegates to `obj.__next__`, etc) ? In C++ I would have simply overloaded the function with the new parameter type and have the compiler figure out which of the functions was meant using the argument expressions' static types. But since Python is a very dynamic language there is no such thing as overloading. What is the preferred Python way of doing this? Also, when I write custom functions, I would like to avoid long chains of ``` if isinstance(arg,someClass): suchandsuch elif ... ``` What are the patterns I could use to make the code look prettier and more Pythonish? I guess, I'm basically trying to deal with the lack of function overloading in Python. At least in C++ overloading and argument-dependent lookup is an important part of good C++ style. Is it possible to make ``` x = udt(something) # object of user-defined type that represents a number y = sin(x) # how do I make this invoke custom type-specific code for sin? t = abs(x) # works because abs delegates to __abs__() which I defined. ``` work? I know I could make sin a non-static method of the class. But then I lose genericity because for every other kind of number-like object it's `sin(x)` and not `x.sin()`. Adding a `__float__` method is not acceptable since I keep additional information in the object such as derivatives for "automatic differentiation". TIA Edit: If you're curious about what the code looks like, check [this](http://ideone.com/H6z60) out. In an ideal world I would be able to use sin/cos/sqrt in a type-generic way. I consider these functions part of the objects interface even if they are "free functions". In `__somefunction` I did not qualify the functions with `math.` nor `__main__.`. It just works because I manually fall back on `math.sin` (etc) in my custom functions via the decorator. But I consider this to be an ugly hack.
2011/08/22
[ "https://Stackoverflow.com/questions/7151776", "https://Stackoverflow.com", "https://Stackoverflow.com/users/172531/" ]
If you want the return type of `math.sin()` to be your user-defined type, you appear to be out of luck. Python's `math` library is basically a thin wrapper around a fast native IEEE 754 floating point math library. If you want to be internally consistent and duck-typed, you can at least put the extensibility shim that python is missing into your own code. ``` def sin(x): try: return x.__sin__() except AttributeError: return math.sin(x) ``` Now you can import this `sin` function and use it indiscriminately wherever you used `math.sin` previously. It's not quite as pretty as having `math.sin` pick up your duck-typing automatically but at least it can be consistent within your codebase.
Define your own versions in a module. This is what's done in cmath for complex number and in numpy for arrays.
66,583,626
In plotly I can create a histogram as e.g. [in this example code from the documentation](https://plotly.com/python/histograms/): ``` import plotly.express as px df = px.data.tips() fig = px.histogram(df, x="total_bill") fig.show() ``` which results to: [![enter image description here](https://i.stack.imgur.com/17o4f.png)](https://i.stack.imgur.com/17o4f.png) My question is how do I get the data values of the histogram? From what I can think of, this question should be equivalent to how do I access the values of a trace? (google did not help with either) I could use numpy to redo the histogram: ``` import numpy as np np.histogram(df.total_bill) ``` But this will not always result to the same buckets, plus it is re-doing all the sometimes expensive computation that goes to create a histogram. [![enter image description here](https://i.stack.imgur.com/fZqTh.png)](https://i.stack.imgur.com/fZqTh.png)
2021/03/11
[ "https://Stackoverflow.com/questions/66583626", "https://Stackoverflow.com", "https://Stackoverflow.com/users/508907/" ]
My understanding of your question is that you would like to get the exact intervals and counts displayed in the histogram. For smaller subset of `px.data.tips()`, this: [![enter image description here](https://i.stack.imgur.com/8ksQb.png)](https://i.stack.imgur.com/8ksQb.png) And reading off the chart those values would be: ``` counts = [2, 4, 3, 1] bins = [5, 15, 25, 35, 45] ``` There's no *direct* way to do this, but that doesn't mean it's impossible. At least if you're willing to use the awesome `fig.full_figure_for_development()` and a *little* numpy. ### Code highlights *(complete snippet at the very end)* ``` xbins = f.data[0].xbins plotbins = list(np.arange(start=xbins['start'], stop=xbins['end']+xbins['size'], step=xbins['size'])) counts, bins = np.histogram(list(f.data[0].x), bins=plotbins) ``` ### Output: ``` [2 4 3 1] [ 5 15 25 35 45] ``` ### All the details: What I'm guessing you would *like* to be able to do is this: Run: ``` fig.data[0].count ``` And get: ``` [2, 4, 3, 1] ``` But the closest you'll get is this: Run: ``` fig.data[0].x ``` And get: ``` [15.53, 10.07, 12.6 , 32.83, 35.83, 29.03, 27.18, 22.67, 17.82, 18.78] ``` And those are just the raw values from the input`df['total_bill'].tail(10)`. So DerekO is right in that the rest is handled by javascript. But [`fig.full_figure_for_development()`](https://community.plotly.com/t/announcing-plotly-py-4-10-date-axis-improvements-a-faster-px-imshow-and-full-figures-for-development/44685) will: > > [...] return a new go.Figure object, prepopulated with the same values > you provided, as well as all the default values computed by Plotly.js, > to allow you to learn more about what attributes control every detail > of your figure and how you can customize them. > > > So running `f = fig.full_figure_for_development(warn=False)`, and then: ``` f.data[0].xbins ``` Will give you: ``` histogram.XBins({ 'end': 45, 'size': 10, 'start': 5 }) ``` And now you know enough to get the same values in your figure with a little numpy: ### Complete code: ``` import plotly.express as px import numpy as np df = px.data.tips() df = df.tail(10) fig = px.histogram(df, x="total_bill") f = fig.full_figure_for_development(warn=False) xbins = f.data[0].xbins plotbins = list(np.arange(start=xbins['start'], stop=xbins['end']+xbins['size'], step=xbins['size'])) counts, bins = np.histogram(list(f.data[0].x), bins=plotbins) print(counts, bins) ```
In the same Plotly Histogram documentation, there's a section called [Accessing the counts yaxis values](https://plotly.com/python/histograms/#accessing-the-counts-yaxis-values), and it explains that the y values are calculated by the JavaScript in the browser when the figure renders so you can't access it in the figure object (for example, through `fig.layout` or `fig.data`, which you might try for other types of charts) They recommend calculating the counts and bins yourself using `np.histogram`, then passing these values to `px.bar` to ensure that your histogram matches the buckets as you intend.
66,583,626
In plotly I can create a histogram as e.g. [in this example code from the documentation](https://plotly.com/python/histograms/): ``` import plotly.express as px df = px.data.tips() fig = px.histogram(df, x="total_bill") fig.show() ``` which results to: [![enter image description here](https://i.stack.imgur.com/17o4f.png)](https://i.stack.imgur.com/17o4f.png) My question is how do I get the data values of the histogram? From what I can think of, this question should be equivalent to how do I access the values of a trace? (google did not help with either) I could use numpy to redo the histogram: ``` import numpy as np np.histogram(df.total_bill) ``` But this will not always result to the same buckets, plus it is re-doing all the sometimes expensive computation that goes to create a histogram. [![enter image description here](https://i.stack.imgur.com/fZqTh.png)](https://i.stack.imgur.com/fZqTh.png)
2021/03/11
[ "https://Stackoverflow.com/questions/66583626", "https://Stackoverflow.com", "https://Stackoverflow.com/users/508907/" ]
In the same Plotly Histogram documentation, there's a section called [Accessing the counts yaxis values](https://plotly.com/python/histograms/#accessing-the-counts-yaxis-values), and it explains that the y values are calculated by the JavaScript in the browser when the figure renders so you can't access it in the figure object (for example, through `fig.layout` or `fig.data`, which you might try for other types of charts) They recommend calculating the counts and bins yourself using `np.histogram`, then passing these values to `px.bar` to ensure that your histogram matches the buckets as you intend.
After making some proofs, I came to the conclusion that you can get histogram values in y axis by using groupby. You can generate your own dataframe with total\_bill values and count for each one of the values like this: ``` import plotly.express as px df = px.data.tips() fig = px.histogram(df, x="total_bill") fig.show() print(df.groupby('total_bill').total_bill.agg('count').to_frame('COUNT').reset_index().head()) total_bill COUNT 0 3.07 1 1 5.75 1 2 7.25 2 3 7.51 1 4 7.56 1 ``` I guess plotly does something similar, and then add some other grouping to stack values in a certain range together in each one of the histogram bars.
66,583,626
In plotly I can create a histogram as e.g. [in this example code from the documentation](https://plotly.com/python/histograms/): ``` import plotly.express as px df = px.data.tips() fig = px.histogram(df, x="total_bill") fig.show() ``` which results to: [![enter image description here](https://i.stack.imgur.com/17o4f.png)](https://i.stack.imgur.com/17o4f.png) My question is how do I get the data values of the histogram? From what I can think of, this question should be equivalent to how do I access the values of a trace? (google did not help with either) I could use numpy to redo the histogram: ``` import numpy as np np.histogram(df.total_bill) ``` But this will not always result to the same buckets, plus it is re-doing all the sometimes expensive computation that goes to create a histogram. [![enter image description here](https://i.stack.imgur.com/fZqTh.png)](https://i.stack.imgur.com/fZqTh.png)
2021/03/11
[ "https://Stackoverflow.com/questions/66583626", "https://Stackoverflow.com", "https://Stackoverflow.com/users/508907/" ]
My understanding of your question is that you would like to get the exact intervals and counts displayed in the histogram. For smaller subset of `px.data.tips()`, this: [![enter image description here](https://i.stack.imgur.com/8ksQb.png)](https://i.stack.imgur.com/8ksQb.png) And reading off the chart those values would be: ``` counts = [2, 4, 3, 1] bins = [5, 15, 25, 35, 45] ``` There's no *direct* way to do this, but that doesn't mean it's impossible. At least if you're willing to use the awesome `fig.full_figure_for_development()` and a *little* numpy. ### Code highlights *(complete snippet at the very end)* ``` xbins = f.data[0].xbins plotbins = list(np.arange(start=xbins['start'], stop=xbins['end']+xbins['size'], step=xbins['size'])) counts, bins = np.histogram(list(f.data[0].x), bins=plotbins) ``` ### Output: ``` [2 4 3 1] [ 5 15 25 35 45] ``` ### All the details: What I'm guessing you would *like* to be able to do is this: Run: ``` fig.data[0].count ``` And get: ``` [2, 4, 3, 1] ``` But the closest you'll get is this: Run: ``` fig.data[0].x ``` And get: ``` [15.53, 10.07, 12.6 , 32.83, 35.83, 29.03, 27.18, 22.67, 17.82, 18.78] ``` And those are just the raw values from the input`df['total_bill'].tail(10)`. So DerekO is right in that the rest is handled by javascript. But [`fig.full_figure_for_development()`](https://community.plotly.com/t/announcing-plotly-py-4-10-date-axis-improvements-a-faster-px-imshow-and-full-figures-for-development/44685) will: > > [...] return a new go.Figure object, prepopulated with the same values > you provided, as well as all the default values computed by Plotly.js, > to allow you to learn more about what attributes control every detail > of your figure and how you can customize them. > > > So running `f = fig.full_figure_for_development(warn=False)`, and then: ``` f.data[0].xbins ``` Will give you: ``` histogram.XBins({ 'end': 45, 'size': 10, 'start': 5 }) ``` And now you know enough to get the same values in your figure with a little numpy: ### Complete code: ``` import plotly.express as px import numpy as np df = px.data.tips() df = df.tail(10) fig = px.histogram(df, x="total_bill") f = fig.full_figure_for_development(warn=False) xbins = f.data[0].xbins plotbins = list(np.arange(start=xbins['start'], stop=xbins['end']+xbins['size'], step=xbins['size'])) counts, bins = np.histogram(list(f.data[0].x), bins=plotbins) print(counts, bins) ```
After making some proofs, I came to the conclusion that you can get histogram values in y axis by using groupby. You can generate your own dataframe with total\_bill values and count for each one of the values like this: ``` import plotly.express as px df = px.data.tips() fig = px.histogram(df, x="total_bill") fig.show() print(df.groupby('total_bill').total_bill.agg('count').to_frame('COUNT').reset_index().head()) total_bill COUNT 0 3.07 1 1 5.75 1 2 7.25 2 3 7.51 1 4 7.56 1 ``` I guess plotly does something similar, and then add some other grouping to stack values in a certain range together in each one of the histogram bars.
14,251,877
I worked out a code that make sense to me but not python since I'm new to python. Check my code here: ``` checksum_algos = ['md5','sha1'] for filename in ["%smanifest-%s.txt" % (prefix for prefix in ['', 'tag'], a for a in checksum_algos)]: f = os.path.join(self.path, filename) if isfile(f): yield f ``` My intention is to search filename in a list like : `['manifest-md5.txt','tagmanifest-md5.txt','manifest-sha1.txt','tagmanifest-sha1.txt']` but I got `syntax` problem to implement it. Thanks for any help.
2013/01/10
[ "https://Stackoverflow.com/questions/14251877", "https://Stackoverflow.com", "https://Stackoverflow.com/users/921082/" ]
You're overthinking it. ``` for filename in ("%smanifest-%s.txt" % (prefix, a) for prefix in ['', 'tag'] for a in checksum_algos): ```
Or you need [`itertools.product()`](http://docs.python.org/2/library/itertools.html#itertools.product): ``` >>> import itertools >>> [i for i in itertools.product(('', 'tag'), ('sha', 'md5'))] [('', 'sha'), ('', 'md5'), ('tag', 'sha'), ('tag', 'md5')] ```
14,251,877
I worked out a code that make sense to me but not python since I'm new to python. Check my code here: ``` checksum_algos = ['md5','sha1'] for filename in ["%smanifest-%s.txt" % (prefix for prefix in ['', 'tag'], a for a in checksum_algos)]: f = os.path.join(self.path, filename) if isfile(f): yield f ``` My intention is to search filename in a list like : `['manifest-md5.txt','tagmanifest-md5.txt','manifest-sha1.txt','tagmanifest-sha1.txt']` but I got `syntax` problem to implement it. Thanks for any help.
2013/01/10
[ "https://Stackoverflow.com/questions/14251877", "https://Stackoverflow.com", "https://Stackoverflow.com/users/921082/" ]
You're overthinking it. ``` for filename in ("%smanifest-%s.txt" % (prefix, a) for prefix in ['', 'tag'] for a in checksum_algos): ```
Using new style string formatting and `itertools`: ``` from itertools import product ["{0}manifest-{1}.txt".format(i,e) for i,e in product(*(tags,checksum_algos))] ``` **out:** ``` ['manifest-md5.txt', 'manifest-sha1.txt', 'tagmanifest-md5.txt', 'tagmanifest-sha1.txt'] ```
62,601,766
I am trying to use SIFT for feature detection with Python, but it is no longer part of OpenCV **or** OpenCV contrib. With OpenCV opencv-contrib-python (both versions 4.2.0.34, the latest as of this question), I get: ``` >>> import cv2 >>> cv2.SIFT_create() Traceback (most recent call last): File "<stdin>", line 1, in <module> AttributeError: module 'cv2.cv2' has no attribute 'SIFT_create' >>> cv2.xfeatures2d.SIFT_create() Traceback (most recent call last): File "<stdin>", line 1, in <module> cv2.error: OpenCV(4.2.0) C:\projects\opencv-python\opencv_contrib\modules\xfeatures2d\src\sift.cpp:1210: error: (-213:The function/feature is not implemented) This algorithm is patented and is excluded in this configuration; Set OPENCV_ENABLE_NONFREE CMake option and rebuild the library in function 'cv::xfeatures2d::SIFT::create' ``` Every related answer I have found has suggested using contrib or an older version, but neither of these work anymore. Is it easier to build it from source to get SIFT back as the error indicates, or to use an alternative? How would I do either of these? All I need is some way to do feature detection, preferably scale-invariant. [This question](https://stackoverflow.com/questions/10157806/are-there-any-fast-alternatives-to-surf-and-sift-for-scale-invariant-feature-ext) mentions SIFT alternatives but is very outdated (best answers are around 8 years old). What can we do now in 2020? EDIT Showing OpenCV 3 not working: Trying to install OpenCV 3: ``` >>> pip install opencv-python==3 ERROR: Could not find a version that satisfies the requirement opencv-python==3 (from versions: 3.4.2.16, 3.4.2.17, 3.4.3.18, 3.4.4.19, 3.4.5.20, 3.4.6.27, 3.4.7.28, 3.4.8.29, 3.4.9.31, 3.4.9.33, 4.0.0.21, 4.0.1.23, 4.0.1.24, 4.1.0.25, 4.1.1.26, 4.1.2.30, 4.2.0.32, 4.2.0.34) ERROR: No matching distribution found for opencv-python==3 >>> pip install opencv-python==3.4.2.16 ``` Then in Python: ``` >>> import cv2 >>> print(cv2.__version__) 3.4.2 >>> cv2.SIFT_create() Traceback (most recent call last): File "<stdin>", line 1, in <module> AttributeError: module 'cv2.cv2' has no attribute 'SIFT_create' ```
2020/06/26
[ "https://Stackoverflow.com/questions/62601766", "https://Stackoverflow.com", "https://Stackoverflow.com/users/8605685/" ]
The patent for SIFT expired this Mar 2020. But the opencv might not be updated by moving the SIFT to free open source collection. See this issue: <https://github.com/skvark/opencv-python/issues/126> To rebuild with the non-free components: ``` git clone --recursive https://github.com/skvark/opencv-python.git cd opencv-python export CMAKE_ARGS="-DOPENCV_ENABLE_NONFREE=ON" python setup.py bdist_wheel ```
From [the issue](https://github.com/skvark/opencv-python/issues/126): to rebuild with the non-free components: ``` git clone --recursive https://github.com/skvark/opencv-python.git cd opencv-python export CMAKE_ARGS="-DOPENCV_ENABLE_NONFREE=ON" python setup.py bdist_wheel ```
46,016,131
``` I have a list of tuples `data`: data =[(array([[2, 1, 3]]), array([1])), (array([[2, 1, 2]]), array([1])), (array([[4, 4, 4]]), array([0])), (array([[4, 1, 1]]), array([0])), (array([[4, 4, 3]]), array([0]))] ``` For simplicity's sake, this list here only has 5 tuples. When I run the following code, it seem I am able to unpack each tuple with each iteration: ``` for x,y in data2: print(x,y) output: [[2 1 3]] [1] [[2 1 2]] [1] [[4 4 4]] [0] [[4 1 1]] [0] [[4 4 3]] [0] This also works: for x,y in data2[:2]: print(x,y) output: [[2 1 3]] [1] [[2 1 2]] [1] However, when I take only a single tuple from the list: for x,y in data2[0]: print(x,y) output: ValueError Traceback (most recent call last) <ipython-input-185-1eed1fccdb3a> in <module>() ----> 1 for x,y in data2[0]: 2 print(x,y) ValueError: not enough values to unpack (expected 2, got 1) ``` I'm confused as to how tuples are being unpacked in the earlier cases, that are preventing the last case to also successfully unpack the tuples. Thank you.
2017/09/02
[ "https://Stackoverflow.com/questions/46016131", "https://Stackoverflow.com", "https://Stackoverflow.com/users/6802252/" ]
In the first two cases you're looping through `list`, in the last one you're accessing `tuple` Not sure what you want to achieve, but instead of `data[0]`, `data[:1]` would work.
If your data looks like this: ``` data =[([[2, 1, 3]], [1]), ([[2, 1, 2]], [1]), ([[4, 4, 4]]), [0]), ([[4, 1, 1]], [0]), ([[4, 4, 3]], [0])] for [a], b in data: print a, b ``` Output: ``` [2, 1, 3] [1] [2, 1, 2] [1] [4, 4, 4] [0] [4, 1, 1] [0] [4, 4, 3] [0] ```
1,239,538
I've been trying to use [suds](https://fedorahosted.org/suds/wiki) for Python to call a SOAP WSDL. I just need to call the service programmatically and write the output XML document. However suds automatically parses this data into it's own pythonic data format. I've been looking through [the examples](https://fedorahosted.org/suds/wiki/Documentation) and [the documentation](http://jortel.fedorapeople.org/suds/doc/), but I can't seem to find a way to return the XML document that the SOAP service gives me. Is there an easy way to do this I'm overlooking? Is there an easier way to do this in Python than suds?
2009/08/06
[ "https://Stackoverflow.com/questions/1239538", "https://Stackoverflow.com", "https://Stackoverflow.com/users/54283/" ]
At this early stage in suds development, the easiest way to get to the raw XML content is not what one would expect. The examples on the site show us with something like this: ``` client = Client(url) result = client.service.Invoke(subm) ``` however, the result is a pre-parsed object that is great for access by Python, but not for XML document access. Fortunately the Client object still has the original SOAP message received stored. ``` result = client.last_received() print result ``` Will give you the actual SOAP message received back.
You could take a look at a library such as [soaplib](http://wiki.github.com/jkp/soaplib): its a really nice way to consume (and serve) SOAP webservices in Python. The latest version has some code to dynamically generate Python bindings either dynamically (at runtime) or statically (run a script against some WSDL). [disclaimer: I'm the maintainer of the project! - I didn't write the bulk of it though]
65,514,398
I have a radar chart. Need to change the grid from circle-form to pentagon-form. Currently, I have this output: [![enter image description here](https://i.stack.imgur.com/mDLeM.jpg)](https://i.stack.imgur.com/mDLeM.jpg) Whereas I expect smth like this: [![enter image description here](https://i.stack.imgur.com/45Hft.png)](https://i.stack.imgur.com/45Hft.png) Here's the info about the system i have: Windows 10 (64-bit); Python - 3.8.0 (32-bit); matplotlib - 3.3.3. This question i've seen here: [How to make a polygon radar (spider) chart in python](https://stackoverflow.com/questions/52910187/how-to-make-a-polygon-radar-spider-chart-in-python) but it doesn't resolve my question. But it doesn't work for me and i can't understand why. I can simply copy the code, but the result is - outer border changes to pentagon-form, but the inner gridlines remain circular. But it works for other people! The code of the program is below: ```py import numpy as np import matplotlib.pyplot as plt from matplotlib.patches import Circle, RegularPolygon from matplotlib.path import Path from matplotlib.projections.polar import PolarAxes from matplotlib.projections import register_projection from matplotlib.spines import Spine from matplotlib.transforms import Affine2D def radar_factory(num_vars, frame='circle'): """Create a radar chart with `num_vars` axes. This function creates a RadarAxes projection and registers it. Parameters ---------- num_vars : int Number of variables for radar chart. frame : {'circle' | 'polygon'} Shape of frame surrounding axes. """ # calculate evenly-spaced axis angles theta = np.linspace(0, 2*np.pi, num_vars, endpoint=False) class RadarAxes(PolarAxes): name = 'radar' def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # rotate plot such that the first axis is at the top self.set_theta_zero_location('N') def fill(self, *args, closed=True, **kwargs): """Override fill so that line is closed by default""" return super().fill(closed=closed, *args, **kwargs) def plot(self, *args, **kwargs): """Override plot so that line is closed by default""" lines = super().plot(*args, **kwargs) for line in lines: self._close_line(line) def _close_line(self, line): x, y = line.get_data() # FIXME: markers at x[0], y[0] get doubled-up if x[0] != x[-1]: x = np.concatenate((x, [x[0]])) y = np.concatenate((y, [y[0]])) line.set_data(x, y) def set_varlabels(self, labels): self.set_thetagrids(np.degrees(theta), labels) def _gen_axes_patch(self): # The Axes patch must be centered at (0.5, 0.5) and of radius 0.5 # in axes coordinates. if frame == 'circle': return Circle((0.5, 0.5), 0.5) elif frame == 'polygon': return RegularPolygon((0.5, 0.5), num_vars, radius=0.5, edgecolor="k") else: raise ValueError("unknown value for 'frame': %s" % frame) def draw(self, renderer): """ Draw. If frame is polygon, make gridlines polygon-shaped """ if frame == 'polygon': gridlines = self.yaxis.get_gridlines() for gl in gridlines: gl.get_path()._interpolation_steps = num_vars super().draw(renderer) def _gen_axes_spines(self): if frame == 'circle': return super()._gen_axes_spines() elif frame == 'polygon': # spine_type must be 'left'/'right'/'top'/'bottom'/'circle'. spine = Spine(axes=self, spine_type='circle', path=Path.unit_regular_polygon(num_vars)) # unit_regular_polygon gives a polygon of radius 1 centered at # (0, 0) but we want a polygon of radius 0.5 centered at (0.5, # 0.5) in axes coordinates. spine.set_transform(Affine2D().scale(.5).translate(.5, .5) + self.transAxes) return {'polar': spine} else: raise ValueError("unknown value for 'frame': %s" % frame) register_projection(RadarAxes) return theta data = [['O1', 'O2', 'O3', 'O4', 'O5'], ('Title', [ [4, 3.5, 4, 2, 3,], [1.07, 5.95, 2.04, 1.05, 0.00,], ] )] N = len(data[0]) theta = radar_factory(N, frame='polygon') # polygon !!! spoke_labels = data.pop(0) title, case_data = data[0] fig, ax = plt.subplots(figsize=(5, 5), subplot_kw=dict(projection='radar')) fig.subplots_adjust(top=0.85, bottom=0.05) ax.set_rgrids([0, 1, 2.0, 3.0, 4.0, 5.0, 6]) ax.set_title(title, position=(0.5, 1.1), ha='center') for d in case_data: line = ax.plot(theta, d) ax.fill(theta, d, alpha=0.25) ax.set_varlabels(spoke_labels) plt.show() ```
2020/12/30
[ "https://Stackoverflow.com/questions/65514398", "https://Stackoverflow.com", "https://Stackoverflow.com/users/14903741/" ]
I don't have enough reputation to add a comment so I'll put this down as an answer. An update to the code that can serve as a workaround and works on Matplotlib > 3.5 has been recently added to issue 19981 by prohde. You can check it here: <https://github.com/matplotlib/matplotlib/issues/19981>
As mentioned by @joao-neves the docs will be updated in this [PR](https://github.com/matplotlib/matplotlib/pull/22458/files). The working code for your example would be ```py import numpy as np import matplotlib.pyplot as plt from matplotlib.patches import Circle, RegularPolygon from matplotlib.path import Path from matplotlib.projections.polar import PolarAxes from matplotlib.projections import register_projection from matplotlib.spines import Spine from matplotlib.transforms import Affine2D def radar_factory(num_vars, frame='circle'): """Create a radar chart with `num_vars` axes. This function creates a RadarAxes projection and registers it. Parameters ---------- num_vars : int Number of variables for radar chart. frame : {'circle' | 'polygon'} Shape of frame surrounding axes. """ # calculate evenly-spaced axis angles theta = np.linspace(0, 2*np.pi, num_vars, endpoint=False) class RadarTransform(PolarAxes.PolarTransform): def transform_path_non_affine(self, path): # Paths with non-unit interpolation steps correspond to gridlines, # in which case we force interpolation (to defeat PolarTransform's # autoconversion to circular arcs). if path._interpolation_steps > 1: path = path.interpolated(num_vars) return Path(self.transform(path.vertices), path.codes) class RadarAxes(PolarAxes): name = 'radar' PolarTransform = RadarTransform def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # rotate plot such that the first axis is at the top self.set_theta_zero_location('N') def fill(self, *args, closed=True, **kwargs): """Override fill so that line is closed by default""" return super().fill(closed=closed, *args, **kwargs) def plot(self, *args, **kwargs): """Override plot so that line is closed by default""" lines = super().plot(*args, **kwargs) for line in lines: self._close_line(line) def _close_line(self, line): x, y = line.get_data() # FIXME: markers at x[0], y[0] get doubled-up if x[0] != x[-1]: x = np.concatenate((x, [x[0]])) y = np.concatenate((y, [y[0]])) line.set_data(x, y) def set_varlabels(self, labels): self.set_thetagrids(np.degrees(theta), labels) def _gen_axes_patch(self): # The Axes patch must be centered at (0.5, 0.5) and of radius 0.5 # in axes coordinates. if frame == 'circle': return Circle((0.5, 0.5), 0.5) elif frame == 'polygon': return RegularPolygon((0.5, 0.5), num_vars, radius=0.5, edgecolor="k") else: raise ValueError("unknown value for 'frame': %s" % frame) def draw(self, renderer): """ Draw. If frame is polygon, make gridlines polygon-shaped """ if frame == 'polygon': gridlines = self.yaxis.get_gridlines() for gl in gridlines: gl.get_path()._interpolation_steps = num_vars super().draw(renderer) def _gen_axes_spines(self): if frame == 'circle': return super()._gen_axes_spines() elif frame == 'polygon': # spine_type must be 'left'/'right'/'top'/'bottom'/'circle'. spine = Spine(axes=self, spine_type='circle', path=Path.unit_regular_polygon(num_vars)) # unit_regular_polygon gives a polygon of radius 1 centered at # (0, 0) but we want a polygon of radius 0.5 centered at (0.5, # 0.5) in axes coordinates. spine.set_transform(Affine2D().scale(.5).translate(.5, .5) + self.transAxes) return {'polar': spine} else: raise ValueError("unknown value for 'frame': %s" % frame) register_projection(RadarAxes) return theta data = [['O1', 'O2', 'O3', 'O4', 'O5'], ('Title', [ [4, 3.5, 4, 2, 3,], [1.07, 5.95, 2.04, 1.05, 0.00,], ] )] N = len(data[0]) theta = radar_factory(N, frame='polygon') spoke_labels = data.pop(0) title, case_data = data[0] fig, ax = plt.subplots(figsize=(5, 5), subplot_kw=dict(projection='radar')) fig.subplots_adjust(top=0.85, bottom=0.05) ax.set_rgrids([0, 1, 2.0, 3.0, 4.0, 5.0, 6]) ax.set_title(title, position=(0.5, 1.1), ha='center') for d in case_data: line = ax.plot(theta, d) ax.fill(theta, d, alpha=0.25, label='_nolegend_') ax.set_varlabels(spoke_labels) plt.show() ``` [![enter image description here](https://i.stack.imgur.com/UdHJ3m.png)](https://i.stack.imgur.com/UdHJ3m.png)
6,259,623
> > **Possible Duplicate:** > > [How does Python compare string and int?](https://stackoverflow.com/questions/3270680/how-does-python-compare-string-and-int) > > > An intern was just asking me to help debug code that looked something like this: ``` widths = [image.width for image in images] widths.append(374) width = max(widths) ``` ...when the first line should have been: ``` widths = [int(image.width) for image in images] ``` Thus, the code was choosing the string '364' rather than the integer 374. How on earth does python compare a string and an integer? I could understand comparing a single character (if python had a `char` datatype) to an integer, but I don't see any straightforward way to compare a string of characters to an integer.
2011/06/07
[ "https://Stackoverflow.com/questions/6259623", "https://Stackoverflow.com", "https://Stackoverflow.com/users/2147/" ]
Python 2.x compares *every* built-in type to every other. From the [docs](http://docs.python.org/library/stdtypes.html#comparisons): > > Objects of different types, except different numeric types and different string types, never compare equal; such objects are ordered consistently but arbitrarily (so that sorting a heterogeneous array yields a consistent result). > > > This "arbitrary order" in CPython is actually sorted by type name. In Python 3.x, you will get a `TypeError` if you try to compare a string to an integer.
When comparing values of incompatible types in python 2.x, the ordering will be arbitrary but consistent. This is to allow you to put values of different types in a sorted collection. In CPython 2.x any string will always be higher than any integer, but as I said that's arbitrary. The actual ordering does not matter, it is just important that the ordering is consistent (i.e. you won't get a case where e.g. `x > y` and `y > z`, but `z > x`).
6,259,623
> > **Possible Duplicate:** > > [How does Python compare string and int?](https://stackoverflow.com/questions/3270680/how-does-python-compare-string-and-int) > > > An intern was just asking me to help debug code that looked something like this: ``` widths = [image.width for image in images] widths.append(374) width = max(widths) ``` ...when the first line should have been: ``` widths = [int(image.width) for image in images] ``` Thus, the code was choosing the string '364' rather than the integer 374. How on earth does python compare a string and an integer? I could understand comparing a single character (if python had a `char` datatype) to an integer, but I don't see any straightforward way to compare a string of characters to an integer.
2011/06/07
[ "https://Stackoverflow.com/questions/6259623", "https://Stackoverflow.com", "https://Stackoverflow.com/users/2147/" ]
Python 2.x compares *every* built-in type to every other. From the [docs](http://docs.python.org/library/stdtypes.html#comparisons): > > Objects of different types, except different numeric types and different string types, never compare equal; such objects are ordered consistently but arbitrarily (so that sorting a heterogeneous array yields a consistent result). > > > This "arbitrary order" in CPython is actually sorted by type name. In Python 3.x, you will get a `TypeError` if you try to compare a string to an integer.
From [the documentation](http://docs.python.org/reference/expressions.html#notin): > > Most other objects of built-in types > compare unequal unless they are the > same object; the choice whether one > object is considered smaller or larger > than another one is made arbitrarily > but consistently within one execution > of a program > > > Hope this is clear enough - like it has been said, it is arbitrary.
6,259,623
> > **Possible Duplicate:** > > [How does Python compare string and int?](https://stackoverflow.com/questions/3270680/how-does-python-compare-string-and-int) > > > An intern was just asking me to help debug code that looked something like this: ``` widths = [image.width for image in images] widths.append(374) width = max(widths) ``` ...when the first line should have been: ``` widths = [int(image.width) for image in images] ``` Thus, the code was choosing the string '364' rather than the integer 374. How on earth does python compare a string and an integer? I could understand comparing a single character (if python had a `char` datatype) to an integer, but I don't see any straightforward way to compare a string of characters to an integer.
2011/06/07
[ "https://Stackoverflow.com/questions/6259623", "https://Stackoverflow.com", "https://Stackoverflow.com/users/2147/" ]
When comparing values of incompatible types in python 2.x, the ordering will be arbitrary but consistent. This is to allow you to put values of different types in a sorted collection. In CPython 2.x any string will always be higher than any integer, but as I said that's arbitrary. The actual ordering does not matter, it is just important that the ordering is consistent (i.e. you won't get a case where e.g. `x > y` and `y > z`, but `z > x`).
From [the documentation](http://docs.python.org/reference/expressions.html#notin): > > Most other objects of built-in types > compare unequal unless they are the > same object; the choice whether one > object is considered smaller or larger > than another one is made arbitrarily > but consistently within one execution > of a program > > > Hope this is clear enough - like it has been said, it is arbitrary.
27,914,648
I am using geopy to geocode some addresses and I want to catch the timeout errors and print them out so I can do some quality control on the input. I am putting the geocode request in a try/catch but it's not working. Any ideas on what I need to do? Here is my code: ``` try: location = geolocator.geocode(my_address) except ValueError as error_message: print("Error: geocode failed on input %s with message %s"%(a, error_message)) ``` I get the following exception: ``` File "/usr/local/lib/python2.7/site-packages/geopy/geocoders/base.py", line 158, in _call_geocoder raise GeocoderTimedOut('Service timed out') geopy.exc.GeocoderTimedOut: Service timed out ``` Thank you in advance!
2015/01/13
[ "https://Stackoverflow.com/questions/27914648", "https://Stackoverflow.com", "https://Stackoverflow.com/users/1860317/" ]
Try this: ``` from geopy.geocoders import Nominatim from geopy.exc import GeocoderTimedOut my_address = '1600 Pennsylvania Avenue NW Washington, DC 20500' geolocator = Nominatim() try: location = geolocator.geocode(my_address) print(location.latitude, location.longitude) except GeocoderTimedOut as e: print("Error: geocode failed on input %s with message %s"%(my_address, e.message)) ``` You can also consider increasing the timeout on the geocode call you are making to your geolocator. In my example it would be something like: ``` location = geolocator.geocode(my_address, timeout=10) ``` or ``` location = geolocator.geocode(my_address, timeout=None) ```
You may be experiencing this problem because you tried to request this address multiple times and they temporarily blocked you or slowed you down because of their [usage policy](https://operations.osmfoundation.org/policies/nominatim/). It states no more requests than one per second and that you should cache your results. I ran into this problem and you have a couple solutions. If you don't want to change your code much you can get a Google API key that you can use for something like 2500 requests/day for free or you can cache your results. Because I was already using DynamoDB on AWS for my problem I went ahead and just created a table that I cache my results in. [Here is the gist of my code.](https://gist.github.com/tylerjw/81fb59b53da6c2de1f295f3f0e22d75c)
27,914,648
I am using geopy to geocode some addresses and I want to catch the timeout errors and print them out so I can do some quality control on the input. I am putting the geocode request in a try/catch but it's not working. Any ideas on what I need to do? Here is my code: ``` try: location = geolocator.geocode(my_address) except ValueError as error_message: print("Error: geocode failed on input %s with message %s"%(a, error_message)) ``` I get the following exception: ``` File "/usr/local/lib/python2.7/site-packages/geopy/geocoders/base.py", line 158, in _call_geocoder raise GeocoderTimedOut('Service timed out') geopy.exc.GeocoderTimedOut: Service timed out ``` Thank you in advance!
2015/01/13
[ "https://Stackoverflow.com/questions/27914648", "https://Stackoverflow.com", "https://Stackoverflow.com/users/1860317/" ]
Try this: ``` from geopy.geocoders import Nominatim from geopy.exc import GeocoderTimedOut my_address = '1600 Pennsylvania Avenue NW Washington, DC 20500' geolocator = Nominatim() try: location = geolocator.geocode(my_address) print(location.latitude, location.longitude) except GeocoderTimedOut as e: print("Error: geocode failed on input %s with message %s"%(my_address, e.message)) ``` You can also consider increasing the timeout on the geocode call you are making to your geolocator. In my example it would be something like: ``` location = geolocator.geocode(my_address, timeout=10) ``` or ``` location = geolocator.geocode(my_address, timeout=None) ```
I dealt with the Same Problem for so many days this is my code: ``` geolocator = Nominatim(user_agent="ny_explorer") location = geolocator.geocode(address_venue) ``` **ERROR** Service timed out solution: Add a new attribute that declares the timeout: ``` location = geolocator.geocode(address_venue,timeout=10000) ```
51,963,377
I am trying to write a discriminator that evaluates patches of an image. Therefore I generate 32x32 non-overlapping patches from the input and then concatenate them on a new axis. The reason I am using a time-distributed layer is that at the end, the discriminator should evaluate the whole image as true or fake. Thus, I am trying to perform a forward pass on each patch individually and then averaging the discriminator output across the patches by the lambda layer: ``` def my_average(x): x = K.mean(x, axis=1) return x def my_average_shape(input_shape): shape = list(input_shape) del shape[1] return tuple(shape) def defineD(input_shape): a = Input(shape=(256, 256, 1)) cropping_list = [] n_patches = 256/32 for x in range(256/32): for y in range(256/32): cropping_list += [ K.expand_dims( Cropping2D((( x * 32, 256 - (x+1) * 32), ( y * 32, 256 - (y+1) * 32)))(a) , axis=1) ] x = Concatenate(1)(cropping_list) x = TimeDistributed(Conv2D(4 * 8, 3, padding='same'))(x) # x = TimeDistributed(MaxPooling2D())(x) x = TimeDistributed(LeakyReLU())(x) # 16 x = TimeDistributed(Conv2D(4 * 16, 3, padding='same'))(x) x = TimeDistributed(MaxPooling2D())(x) x = TimeDistributed(LeakyReLU())(x) # 8 x = TimeDistributed(Conv2D(4 * 32, 3, padding='same'))(x) x = TimeDistributed(MaxPooling2D())(x) x = TimeDistributed(LeakyReLU())(x) # 4 x = TimeDistributed(Flatten())(x) x = TimeDistributed(Dense(2, activation='sigmoid'))(x) x = Lambda(my_average, my_average_shape)(x) return keras.models.Model(inputs=a, outputs=x) ``` For some reason I get the following error: ``` File "testing.py", line 41, in <module> defineD((256,256,1) ) File "testing.py", line 38, in defineD return keras.models.Model(inputs=a, outputs=x) File "/usr/local/lib/python2.7/dist-packages/keras/legacy/interfaces.py", line 91, in wrapper return func(*args, **kwargs) File "/usr/local/lib/python2.7/dist-packages/keras/engine/network.py", line 93, in __init__ self._init_graph_network(*args, **kwargs) File "/usr/local/lib/python2.7/dist-packages/keras/engine/network.py", line 237, in _init_graph_network self.inputs, self.outputs) File "/usr/local/lib/python2.7/dist-packages/keras/engine/network.py", line 1353, in _map_graph_network tensor_index=tensor_index) File "/usr/local/lib/python2.7/dist-packages/keras/engine/network.py", line 1340, in build_map node_index, tensor_index) File "/usr/local/lib/python2.7/dist-packages/keras/engine/network.py", line 1340, in build_map node_index, tensor_index) File "/usr/local/lib/python2.7/dist-packages/keras/engine/network.py", line 1340, in build_map node_index, tensor_index) File "/usr/local/lib/python2.7/dist-packages/keras/engine/network.py", line 1340, in build_map node_index, tensor_index) File "/usr/local/lib/python2.7/dist-packages/keras/engine/network.py", line 1340, in build_map node_index, tensor_index) File "/usr/local/lib/python2.7/dist-packages/keras/engine/network.py", line 1340, in build_map node_index, tensor_index) File "/usr/local/lib/python2.7/dist-packages/keras/engine/network.py", line 1340, in build_map node_index, tensor_index) File "/usr/local/lib/python2.7/dist-packages/keras/engine/network.py", line 1340, in build_map node_index, tensor_index) File "/usr/local/lib/python2.7/dist-packages/keras/engine/network.py", line 1340, in build_map node_index, tensor_index) File "/usr/local/lib/python2.7/dist-packages/keras/engine/network.py", line 1340, in build_map node_index, tensor_index) File "/usr/local/lib/python2.7/dist-packages/keras/engine/network.py", line 1340, in build_map node_index, tensor_index) File "/usr/local/lib/python2.7/dist-packages/keras/engine/network.py", line 1340, in build_map node_index, tensor_index) File "/usr/local/lib/python2.7/dist-packages/keras/engine/network.py", line 1312, in build_map node = layer._inbound_nodes[node_index] AttributeError: 'NoneType' object has no attribute '_inbound_nodes' ```
2018/08/22
[ "https://Stackoverflow.com/questions/51963377", "https://Stackoverflow.com", "https://Stackoverflow.com/users/4880918/" ]
You need to put your cropping operations in a function and then use that function in a `Lambda` layer: ``` def my_cropping(a): cropping_list = [] n_patches = 256/32 for x in range(256//32): for y in range(256//32): cropping_list += [ K.expand_dims( Cropping2D((( x * 32, 256 - (x+1) * 32), ( y * 32, 256 - (y+1) * 32)))(a) , axis=1) ] return cropping_list ``` To use it: ``` cropping_list = Lambda(my_cropping)(a) ```
I ran into the same issue and it solved indeed by wrapping a Lambda layer around the tensor as @today proposed. Thanks for that hint, it pointed me in the right direction. I wanted to turn a vector into a diagonal matrix to I wanted to concatenate a vector with a square image and by turning the vector in a diag matrix. It worked with the following snippet: ``` def diagonalize(vector): diagonalized = tf.matrix_diag(vector) # make diagonal matrix from vector out_singlechan = tf.expand_dims(diagonalized, -1) # append 1 channel to get compatible to the multichannel image dim return out_singlechan lstm_out = Lambda(diagonalize, output_shape=(self.img_shape[0],self.img_shape[1],1))(lstm_out) ```
820,671
I want to create an object in python that has a few attributes and I want to protect myself from accidentally using the wrong attribute name. The code is as follows: ``` class MyClass( object ) : m = None # my attribute __slots__ = ( "m" ) # ensure that object has no _m etc a = MyClass() # create one a.m = "?" # here is a PROBLEM ``` But after running this simple code, I get a very strange error: ``` Traceback (most recent call last): File "test.py", line 8, in <module> a.m = "?" AttributeError: 'test' object attribute 'm' is read-only ``` Is there any wise programmer who can spare a bit of their time and enlighten me about "read-only" errors?
2009/05/04
[ "https://Stackoverflow.com/questions/820671", "https://Stackoverflow.com", "https://Stackoverflow.com/users/69882/" ]
When you declare instance variables using `__slots__`, Python creates a [descriptor object](https://docs.python.org/2/howto/descriptor.html) as a class variable with the same name. In your case, this descriptor is overwritten by the class variable `m` that you are defining at the following line: ``` m = None # my attribute ``` Here is what you need to do: Do not define a class variable called `m`, and initialize the instance variable `m` in the `__init__` method. ``` class MyClass(object): __slots__ = ("m",) def __init__(self): self.m = None a = MyClass() a.m = "?" ``` As a side note, tuples with single elements need a comma after the element. Both work in your code because `__slots__` accepts a single string or an iterable/sequence of strings. In general, to define a tuple containing the element `1`, use `(1,)` or `1,` and not `(1)`.
`__slots__` works with instance variables, whereas what you have there is a class variable. This is how you should be doing it: ``` class MyClass( object ) : __slots__ = ( "m", ) def __init__(self): self.m = None a = MyClass() a.m = "?" # No error ```