query
stringlengths
9
60
language
stringclasses
1 value
code
stringlengths
105
25.7k
url
stringlengths
91
217
parse query string in url
python
def query(self): """query_string part of a url (eg, http://host.com/path?query=string)""" self._query = query = "" query_kwargs = self.query_kwargs if query_kwargs: query = urlencode(query_kwargs, doseq=True) return query
https://github.com/Jaymon/endpoints/blob/2f1c4ae2c69a168e69447d3d8395ada7becaa5fb/endpoints/http.py#L987-L993
parse query string in url
python
def urlQueryParser(url, querydict): """ parse a url query """ address_parse = urlparse(url) return urlunparse(address_parse._replace(query=urlencode(querydict)))
https://github.com/johntruckenbrodt/spatialist/blob/007f49296a156de8d7168ad235b5a5b8e8d3633d/spatialist/ancillary.py#L585-L590
parse query string in url
python
def parse_url(self, url_string): """Parse the URL string with the url map of this app instance. :param url_string: the origin URL string. :returns: the tuple as `(url, url_adapter, query_args)`, the url is parsed by the standard library `urlparse`, the url_adapter is from the werkzeug bound URL map, the query_args is a multidict from the werkzeug. """ url = urllib.parse.urlparse(url_string) url = self.validate_url(url) url_adapter = self.url_map.bind(server_name=url.hostname, url_scheme=url.scheme, path_info=url.path) query_args = url_decode(url.query) return url, url_adapter, query_args
https://github.com/douban/brownant/blob/3c7e6d30f67b8f0f8ca1f823ea3daed74e8725cd/brownant/app.py#L36-L51
parse query string in url
python
def parse_querystring(self, req: Request, name: str, field: Field) -> typing.Any: """Pull a querystring value from the request.""" return core.get_value(req.query, name, field)
https://github.com/marshmallow-code/webargs/blob/40cc2d25421d15d9630b1a819f1dcefbbf01ed95/src/webargs/aiohttpparser.py#L81-L83
parse query string in url
python
def urlparse(url): """Parse the URL in a Python2/3 independent fashion. :param str url: The URL to parse :rtype: Parsed """ value = 'http%s' % url[5:] if url[:5] == 'postgresql' else url parsed = _urlparse.urlparse(value) path, query = parsed.path, parsed.query hostname = parsed.hostname if parsed.hostname else '' return PARSED(parsed.scheme.replace('http', 'postgresql'), parsed.netloc, path, parsed.params, query, parsed.fragment, parsed.username, parsed.password, hostname.replace('%2f', '/'), parsed.port)
https://github.com/gmr/queries/blob/a68855013dc6aaf9ed7b6909a4701f8da8796a0a/queries/utils.py#L130-L150
parse query string in url
python
def parse_url(url): """ Parse a URL into the parts I need for processing: * protocol * domain * port * path :param url: A string :return: A tuple containing the above """ split_url = url.split('/', 3) if len(split_url) == 3: # http://foo.com path = '/' elif len(split_url) == 4: path = '/' + split_url[3] else: raise ValueError('Invalid URL: %s' % url) try: parse_result = urlparse(url) except Exception: raise ValueError('Invalid URL: %s' % url) protocol = parse_result.scheme protocol = protocol.lower() if protocol not in ('http', 'https'): raise ValueError('Invalid URL protocol "%s"' % protocol) split_netloc = parse_result.netloc.split(':') domain = split_netloc[0] domain = domain.lower() if len(split_netloc) == 2: try: port = int(split_netloc[1]) except: raise ValueError('Invalid port: "%s"' % split_netloc[1]) elif protocol == 'https': port = 443 elif protocol == 'http': port = 80 else: raise ValueError('Invalid scheme: "%s"' % protocol) return protocol, domain, port, path
https://github.com/tagcubeio/tagcube-cli/blob/709e4b0b11331a4d2791dc79107e5081518d75bf/tagcube_cli/subcommands/batch.py#L64-L110
parse query string in url
python
def url(self) -> str: """Returns the full url requested.""" return urlunparse( ParseResult( self.scheme, self.host, self.path, '', self.query_string.decode('ascii'), '', ), )
https://github.com/pgjones/quart/blob/7cb2d3bd98e8746025764f2b933abc12041fa175/quart/wrappers/_base.py#L286-L292
parse query string in url
python
def parse(url): """Parses a database URL.""" config = {} url = urlparse.urlparse(url) # Remove query strings. path = url.path[1:] path = path.split('?', 2)[0] # Update with environment configuration. config.update({ "DB": int(path or 0), "PASSWORD": url.password or None, "HOST": url.hostname or "localhost", "PORT": int(url.port or 6379), }) return config
https://github.com/dstufft/dj-redis-url/blob/06da0cc45db48f3274baf4b83ae413a051c6d8b2/dj_redis_url.py#L34-L53
parse query string in url
python
def parseUrl(url): """Return a dict containing scheme, netloc, url, params, query, fragment keys. query is a dict where the values are always lists. If the query key appears only once in the URL, the list will have a single value. """ scheme, netloc, url, params, query, fragment = urllib.parse.urlparse(url) query_dict = { k: sorted(v) if len(v) > 1 else v[0] for k, v in list(urllib.parse.parse_qs(query).items()) } return { 'scheme': scheme, 'netloc': netloc, 'url': url, 'params': params, 'query': query_dict, 'fragment': fragment, }
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/url.py#L28-L47
parse query string in url
python
def parse_querystring(self, req, name, field): """Pull a querystring value from the request.""" return core.get_value(req.GET, name, field)
https://github.com/marshmallow-code/webargs/blob/40cc2d25421d15d9630b1a819f1dcefbbf01ed95/src/webargs/pyramidparser.py#L48-L50
parse query string in url
python
def parse_querystring(self, req, name, field): """Pull a querystring value from the request.""" return get_value(req.query_arguments, name, field)
https://github.com/marshmallow-code/webargs/blob/40cc2d25421d15d9630b1a819f1dcefbbf01ed95/src/webargs/tornadoparser.py#L97-L99
parse query string in url
python
def get_querystring(uri): """Get Querystring information from uri. :param uri: uri :return: querystring info or {} """ parts = urlparse.urlsplit(uri) return urlparse.parse_qs(parts.query)
https://github.com/jxtech/wechatpy/blob/4df0da795618c0895a10f1c2cde9e9d5c0a93aaa/wechatpy/utils.py#L130-L137
parse query string in url
python
def parse_querystring(self, req, name, field): """Pull a querystring value from the request.""" return core.get_value(req.params, name, field)
https://github.com/marshmallow-code/webargs/blob/40cc2d25421d15d9630b1a819f1dcefbbf01ed95/src/webargs/falconparser.py#L94-L96
parse query string in url
python
def parse_querystring(self, req, name, field): """Pull a querystring value from the request.""" return core.get_value(req.args, name, field)
https://github.com/marshmallow-code/webargs/blob/40cc2d25421d15d9630b1a819f1dcefbbf01ed95/src/webargs/flaskparser.py#L77-L79
parse query string in url
python
def domain_parse(url): """ urlparse wrapper for user input @type url: str @rtype: urlparse.ParseResult """ url = url.lower() if not url.startswith('http://') and not url.startswith('https://'): url = '{schema}{host}'.format(schema='http://', host=url) url = urlparse(url) if not url.hostname: raise ValueError('Invalid domain provided') # Strip www prefix any additional URL data url = urlparse('{scheme}://{host}'.format(scheme=url.scheme, host=url.hostname.lstrip('www.'))) return url
https://github.com/FujiMakoto/IPS-Vagrant/blob/7b1d6d095034dd8befb026d9315ecc6494d52269/ips_vagrant/common/__init__.py#L55-L70
parse query string in url
python
def parse_querystring(self, req, name, field): """Pull a querystring value from the request.""" return core.get_value(req.query, name, field)
https://github.com/marshmallow-code/webargs/blob/40cc2d25421d15d9630b1a819f1dcefbbf01ed95/src/webargs/bottleparser.py#L29-L31
parse query string in url
python
def parse_url(self) -> RequestUrl: """ 获取url解析对象 """ if self._URL is None: current_url = b"%s://%s%s" % ( encode_str(self.schema), encode_str(self.host), self._current_url ) self._URL = RequestUrl(current_url) return cast(RequestUrl, self._URL)
https://github.com/zeromake/aiko/blob/53b246fa88652466a9e38ac3d1a99a6198195b0f/aiko/request.py#L422-L433
parse query string in url
python
def url(self, url_str: str) -> None: """ url 重写 """ if "?" in url_str: url_arr = url_str.split("?") self.parse_url.path = url_arr[0] self.parse_url.querystring = url_arr[1]
https://github.com/zeromake/aiko/blob/53b246fa88652466a9e38ac3d1a99a6198195b0f/aiko/request.py#L339-L346
parse query string in url
python
def parse(url): """Parses a database URL.""" config = {} if not isinstance(url, six.string_types): url = '' url = urlparse.urlparse(url) # Remove query strings. path = url.path[1:] path = path.split('?', 2)[0] # Update with environment configuration. config.update({ 'NAME': path, 'USER': url.username, 'PASSWORD': url.password, 'HOST': url.hostname, 'PORT': url.port, }) if url.scheme in SCHEMES: config['ENGINE'] = SCHEMES[url.scheme] return config
https://github.com/ferrix/dj-mongohq-url/blob/0901ca46cf7071881726310e666a5fe9aa0303ef/dj_mongohq_url.py#L36-L62
parse query string in url
python
def parse(url): """Parses a search URL.""" config = {} url = urlparse.urlparse(url) # Remove query strings. path = url.path[1:] path = path.split('?', 2)[0] if url.scheme in SCHEMES: config["ENGINE"] = SCHEMES[url.scheme] if url.scheme in USES_URL: config["URL"] = urlparse.urlunparse(("http",) + url[1:]) if url.scheme in USES_INDEX: if path.endswith("/"): path = path[:-1] split = path.rsplit("/", 1) if len(split) > 1: path = split[:-1] index = split[-1] else: path = "" index = split[0] config.update({ "URL": urlparse.urlunparse(("http",) + url[1:2] + (path,) + url[3:]), "INDEX_NAME": index, }) if url.scheme in USES_PATH: config.update({ "PATH": path, }) return config
https://github.com/dstufft/dj-search-url/blob/3185095eed15ce8e0a83e693d2d2269794a146b3/dj_search_url.py#L45-L85
parse query string in url
python
def url_parse_query (query, encoding=None): """Parse and re-join the given CGI query.""" if isinstance(query, unicode): if encoding is None: encoding = url_encoding query = query.encode(encoding, 'ignore') # if ? is in the query, split it off, seen at msdn.microsoft.com append = "" while '?' in query: query, rest = query.rsplit('?', 1) append = '?'+url_parse_query(rest)+append l = [] for k, v, sep in parse_qsl(query, keep_blank_values=True): k = url_quote_part(k, '/-:,;') if v: v = url_quote_part(v, '/-:,;') l.append("%s=%s%s" % (k, v, sep)) elif v is None: l.append("%s%s" % (k, sep)) else: # some sites do not work when the equal sign is missing l.append("%s=%s" % (k, sep)) return ''.join(l) + append
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/url.py#L250-L272
parse query string in url
python
def _parse_url(self,url): '''解析url,返回(ip,port,target)三元组''' (ip,port,target) = ('',DEFAULT_SERVER_PORT,'') m = re.match(r'[rtspRTSP:/]+(?P<ip>(\d{1,3}\.){3}\d{1,3})(:(?P<port>\d+))?(?P<target>.*)',url) if m is not None: ip = m.group('ip') port = int(m.group('port')) target = m.group('target') #PRINT('ip: %s, port: %d, target: %s'%(ip,port,target), GREEN) return ip,port,target
https://github.com/statueofmike/rtsp/blob/4816de2da3cc9966122c8511943e6db713052a17/scripts/others/rtsp.py#L86-L95
parse query string in url
python
def url(self) -> str: """ path + query 的url """ url_str = self.parse_url.path or "" if self.parse_url.querystring is not None: url_str += "?" + self.parse_url.querystring return url_str
https://github.com/zeromake/aiko/blob/53b246fa88652466a9e38ac3d1a99a6198195b0f/aiko/request.py#L329-L336
parse query string in url
python
def parse(url): """ Parses out the information for this url, returning its components expanded out to Python objects. :param url | <str> :return (<str> path, <dict> query, <str> fragment) """ result = urlparse.urlparse(nstr(url)) path = result.scheme + '://' + result.netloc if result.path: path += result.path query = {} # extract the python information from the query if result.query: url_query = urlparse.parse_qs(result.query) for key, value in url_query.items(): if type(value) == list and len(value) == 1: value = value[0] query[key] = value return path, query, result.fragment
https://github.com/bitesofcode/projex/blob/d31743ec456a41428709968ab11a2cf6c6c76247/projex/urls.py#L69-L95
parse query string in url
python
def _urlparse_qs(url): """ Parse a URL query string and return the components as a dictionary. Based on the cgi.parse_qs method.This is a utility function provided with urlparse so that users need not use cgi module for parsing the url query string. Arguments: :type url: str :param url: URL with query string to be parsed """ # Extract the query part from the URL. querystring = urlparse(url)[4] # Split the query into name/value pairs. pairs = [s2 for s1 in querystring.split('&') for s2 in s1.split(';')] # Split the name/value pairs. result = OrderedDefaultDict(list) for name_value in pairs: pair = name_value.split('=', 1) if len(pair) != 2: continue if len(pair[1]) > 0: name = _unquote(pair[0].replace('+', ' ')) value = _unquote(pair[1].replace('+', ' ')) result[name].append(value) return result
https://github.com/knipknap/exscript/blob/72718eee3e87b345d5a5255be9824e867e42927b/Exscript/util/url.py#L72-L103
parse query string in url
python
def parse_search_url(url): """Parses a search URL.""" config = {} url = urlparse.urlparse(url) # Remove query strings. path = url.path[1:] path = path.split('?', 2)[0] if url.scheme in SEARCH_SCHEMES: config["ENGINE"] = SEARCH_SCHEMES[url.scheme] if url.scheme in USES_URL: config["URL"] = urlparse.urlunparse(("http",) + url[1:]) if url.scheme in USES_INDEX: if path.endswith("/"): path = path[:-1] split = path.rsplit("/", 1) if len(split) > 1: path = split[:-1] index = split[-1] else: path = "" index = split[0] config.update({ "URL": urlparse.urlunparse(("http",) + url[1:2] + (path,) + url[3:]), "INDEX_NAME": index, }) if url.scheme in USES_PATH: config.update({ "PATH": path, }) return config
https://github.com/MechanisM/django-confy/blob/53818db22d1f05623d257aac2abdc625f5972d88/confy/search.py#L23-L63
parse query string in url
python
def parse_url(request, url): """Parse url URL parameter.""" try: validate = URLValidator() validate(url) except ValidationError: if url.startswith('/'): host = request.get_host() scheme = 'https' if request.is_secure() else 'http' url = '{scheme}://{host}{uri}'.format(scheme=scheme, host=host, uri=url) else: url = request.build_absolute_uri(reverse(url)) return url
https://github.com/makinacorpus/django-screamshot/blob/28488599292f41c553aee7d63259e1f856f43b16/screamshot/utils.py#L198-L212
parse query string in url
python
def parse_url(url): """Return a dictionary of parsed url Including scheme, netloc, path, params, query, fragment, uri, username, password, host, port and http_host """ try: url = unicode(url) except UnicodeDecodeError: pass if py3k: make_utf8 = lambda x: x else: make_utf8 = lambda x: isinstance(x, unicode) and x.encode('utf-8') or x if '://' in url: scheme, url = url.split('://', 1) else: scheme = 'http' url = 'http://' + url parsed = urlparse.urlsplit(url) r = ObjectDict() r['scheme'] = make_utf8(scheme) r['netloc'] = make_utf8(parsed.netloc) r['path'] = make_utf8(parsed.path) r['query'] = make_utf8(parsed.query) r['fragment'] = make_utf8(parsed.fragment) r['uri'] = make_utf8(parsed.path) if parsed.query: r['uri'] += '?' + make_utf8(parsed.query) r['username'] = make_utf8(parsed.username) r['password'] = make_utf8(parsed.password) host = make_utf8(parsed.hostname.encode('idna').decode('utf-8')) r['host'] = r['hostname'] = host try: r['port'] = parsed.port except ValueError: r['port'] = None if r['port']: r['http_host'] = '%s:%d' % (r['host'], r['port']) else: r['http_host'] = r['host'] return r
https://github.com/ifduyue/urlfetch/blob/e0ea4673367c157eb832ba4ba2635306c81a61be/urlfetch.py#L801-L845
parse query string in url
python
def parse_querystring(msg): 'parse a querystring into keys and values' for part in msg.querystring.strip().lstrip('?').split('&'): key, value = part.split('=') yield key, value
https://github.com/BrianHicks/emit/blob/19a86c2392b136c9e857000798ccaa525aa0ed84/examples/regex/graph.py#L14-L18
parse query string in url
python
def parse_query_param(url, param): """Parses the query string of a URL and returns the value of a parameter. Args: url: A URL. param: A string representing the name of the parameter. Returns: The value of the parameter. """ try: return parse.parse_qs(parse.urlparse(url).query)[param][0] except: return None
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/common.py#L285-L299
parse query string in url
python
def parse_url(url_data): """Parse a URL.""" if url_data.is_directory(): # both ftp and file links represent directories as HTML data key = "html" elif url_data.is_file() and firefox.has_sqlite and firefox.extension.search(url_data.url): key = "firefox" elif url_data.scheme == "itms-services": key = "itms_services" else: # determine parse routine according to content types mime = url_data.content_type key = url_data.ContentMimetypes[mime] funcname = "parse_"+key if funcname in globals(): globals()[funcname](url_data) else: url_data.aggregate.plugin_manager.run_parser_plugins(url_data, pagetype=key)
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/parser/__init__.py#L26-L43
parse query string in url
python
def _parse_href(self, href): """ Extract "real" URL from Google redirected url by getting `q` querystring parameter. """ params = parse_qs(urlsplit(href).query) return params.get('q')
https://github.com/nprapps/copydoc/blob/e1ab09b287beb0439748c319cf165cbc06c66624/copydoc.py#L189-L195
parse query string in url
python
def parse(url): """Parse a URL. >>> parse('http://example.com/foo/') URL(scheme='http', ..., domain='example', tld='com', ..., path='/foo/', ...) """ parts = split(url) if parts.scheme: username, password, host, port = split_netloc(parts.netloc) subdomain, domain, tld = split_host(host) else: username = password = subdomain = domain = tld = port = '' return URL(parts.scheme, username, password, subdomain, domain, tld, port, parts.path, parts.query, parts.fragment, url)
https://github.com/rbaier/python-urltools/blob/76bf599aeb4cb463df8e38367aa40a7d8ec7d9a1/urltools/urltools.py#L275-L288
parse query string in url
python
def get_url_params(url: str, fragment: bool = False) -> dict: """ Parse URL params """ parsed_url = urlparse(url) if fragment: url_query = parse_qsl(parsed_url.fragment) else: url_query = parse_qsl(parsed_url.query) return dict(url_query)
https://github.com/vadimk2016/v-vk-api/blob/ef5656e09944b5319a1f573cfb7b022f3d31c0cf/v_vk_api/utils.py#L23-L32
parse query string in url
python
def parse_url_query_params(url, fragment=True): """Parse url query params :param fragment: bool: flag is used for parsing oauth url :param url: str: url string :return: dict """ parsed_url = urlparse(url) if fragment: url_query = parse_qsl(parsed_url.fragment) else: url_query = parse_qsl(parsed_url.query) # login_response_url_query can have multiple key url_query = dict(url_query) return url_query
https://github.com/prawn-cake/vk-requests/blob/dde01c1ed06f13de912506163a35d8c7e06a8f62/vk_requests/utils.py#L55-L69
parse query string in url
python
def urlparse(uri): """Parse and decode the parts of a URI.""" scheme, netloc, path, params, query, fragment = parse.urlparse(uri) return ( parse.unquote(scheme), parse.unquote(netloc), parse.unquote(path), parse.unquote(params), parse.unquote(query), parse.unquote(fragment) )
https://github.com/palantir/python-language-server/blob/96e08d85635382d17024c352306c4759f124195d/pyls/uris.py#L13-L23
parse query string in url
python
def parse_url(url): """ parse url and the result is tuple of (hostname, port, resource path and the flag of secure mode) url: url string. """ if ":" not in url: raise ValueError("url is invalid") scheme, url = url.split(":", 1) parsed = urlparse(url, scheme="ws") if parsed.hostname: hostname = parsed.hostname else: raise ValueError("hostname is invalid") port = 0 if parsed.port: port = parsed.port is_secure = False if scheme == "ws": if not port: port = 80 elif scheme == "wss": is_secure = True if not port: port = 443 else: raise ValueError("scheme %s is invalid" % scheme) if parsed.path: resource = parsed.path else: resource = "/" if parsed.query: resource += "?" + parsed.query return hostname, port, resource, is_secure
https://github.com/websocket-client/websocket-client/blob/3c25814664fef5b78716ed8841123ed1c0d17824/websocket/_url.py#L33-L73
parse query string in url
python
def parse_url(self): """ Parses a URL of the form: - ws://host[:port][path] - wss://host[:port][path] - ws+unix:///path/to/my.socket """ self.scheme = None self.resource = None self.host = None self.port = None if self.url is None: return scheme, url = self.url.split(":", 1) parsed = urlsplit(url, scheme="http") if parsed.hostname: self.host = parsed.hostname elif '+unix' in scheme: self.host = 'localhost' else: raise ValueError("Invalid hostname from: %s", self.url) if parsed.port: self.port = parsed.port if scheme == "ws": if not self.port: self.port = 8080 elif scheme == "wss": if not self.port: self.port = 443 elif scheme in ('ws+unix', 'wss+unix'): pass else: raise ValueError("Invalid scheme: %s" % scheme) if parsed.path: resource = parsed.path else: resource = "/" if '+unix' in scheme: self.unix_socket_path = resource resource = '/' if parsed.query: resource += "?" + parsed.query self.scheme = scheme self.resource = resource
https://github.com/noisyboiler/wampy/blob/7c7ef246fec1b2bf3ec3a0e24c85c42fdd99d4bf/wampy/mixins.py#L12-L64
parse query string in url
python
def parse_url(url): """Parses correctly url :param url: url to parse """ parsed = url if not url.startswith("http://") and not url.startswith( "https://"): # if url is like www.yahoo.com parsed = "http://" + parsed elif url.startswith("https://"): parsed = parsed[8:] parsed = "http://" + parsed index_hash = parsed.rfind("#") # remove trailing # index_slash = parsed.rfind("/") if index_hash > index_slash: parsed = parsed[0: index_hash] return parsed
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/internet/web.py#L128-L147
parse query string in url
python
def _get_url(url): """Returns a URL string. If the ``url`` parameter is a ParsedResult from `urlparse` the full url will be unparsed and made into a string. Otherwise the ``url`` parameter is returned as is. :param url: ``str`` || ``object`` """ if isinstance(url, urlparse.ParseResult): return urlparse.urlunparse(url) else: return url
https://github.com/cloudnull/cloudlib/blob/5038111ce02521caa2558117e3bae9e1e806d315/cloudlib/http.py#L104-L116
parse query string in url
python
def parse_url(url): """ Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is performed to parse incomplete urls. Fields not provided will be None. Partly backwards-compatible with :mod:`urlparse`. Example:: >>> parse_url('http://google.com/mail/') Url(scheme='http', host='google.com', port=None, path='/mail/', ...) >>> parse_url('google.com:80') Url(scheme=None, host='google.com', port=80, path=None, ...) >>> parse_url('/foo?bar') Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...) """ # While this code has overlap with stdlib's urlparse, it is much # simplified for our needs and less annoying. # Additionally, this implementations does silly things to be optimal # on CPython. if not url: # Empty return Url() scheme = None auth = None host = None port = None path = None fragment = None query = None # Scheme if '://' in url: scheme, url = url.split('://', 1) # Find the earliest Authority Terminator # (http://tools.ietf.org/html/rfc3986#section-3.2) url, path_, delim = split_first(url, ['/', '?', '#']) if delim: # Reassemble the path path = delim + path_ # Auth if '@' in url: # Last '@' denotes end of auth part auth, url = url.rsplit('@', 1) # IPv6 if url and url[0] == '[': host, url = url.split(']', 1) host += ']' # Port if ':' in url: _host, port = url.split(':', 1) if not host: host = _host if port: # If given, ports must be integers. No whitespace, no plus or # minus prefixes, no non-integer digits such as ^2 (superscript). if not port.isdigit(): raise LocationParseError(url) try: port = int(port) except ValueError: raise LocationParseError(url) else: # Blank ports are cool, too. (rfc3986#section-3.2.3) port = None elif not host and url: host = url if not path: return Url(scheme, auth, host, port, path, query, fragment) # Fragment if '#' in path: path, fragment = path.split('#', 1) # Query if '?' in path: path, query = path.split('?', 1) return Url(scheme, auth, host, port, path, query, fragment)
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/urllib3/util/url.py#L132-L222
parse query string in url
python
def getQueryParams(url): """Get URL query parameters.""" query = urlsplit(url)[3] out.debug(u'Extracting query parameters from %r (%r)...' % (url, query)) return cgi.parse_qs(query)
https://github.com/wummel/dosage/blob/a0109c3a46219f280e6e5e77183674e40da0f304/dosagelib/util.py#L371-L375
parse query string in url
python
def parse_url(url, encoding=None): """Return urlparsed url from the given argument (which could be an already parsed url) """ return url if isinstance(url, urlparse.ParseResult) else \ urlparse.urlparse(unicode_to_str(url, encoding))
https://github.com/svven/summary/blob/3a6c43d2da08a7452f6b76d1813aa70ba36b8a54/summary/url.py#L141-L146
parse query string in url
python
def parse(cls, url, default_scheme='http', encoding='utf-8'): '''Parse a URL and return a URLInfo.''' if url is None: return None url = url.strip() if frozenset(url) & C0_CONTROL_SET: raise ValueError('URL contains control codes: {}'.format(ascii(url))) scheme, sep, remaining = url.partition(':') if not scheme: raise ValueError('URL missing scheme: {}'.format(ascii(url))) scheme = scheme.lower() if not sep and default_scheme: # Likely something like example.com/mystuff remaining = url scheme = default_scheme elif not sep: raise ValueError('URI missing colon: {}'.format(ascii(url))) if default_scheme and '.' in scheme or scheme == 'localhost': # Maybe something like example.com:8080/mystuff or # maybe localhost:8080/mystuff remaining = '{}:{}'.format(scheme, remaining) scheme = default_scheme info = URLInfo() info.encoding = encoding if scheme not in RELATIVE_SCHEME_DEFAULT_PORTS: info.raw = url info.scheme = scheme info.path = remaining return info if remaining.startswith('//'): remaining = remaining[2:] path_index = remaining.find('/') query_index = remaining.find('?') fragment_index = remaining.find('#') try: index_tuple = (path_index, query_index, fragment_index) authority_index = min(num for num in index_tuple if num >= 0) except ValueError: authority_index = len(remaining) authority = remaining[:authority_index] resource = remaining[authority_index:] try: index_tuple = (query_index, fragment_index) path_index = min(num for num in index_tuple if num >= 0) except ValueError: path_index = len(remaining) path = remaining[authority_index + 1:path_index] or '/' if fragment_index >= 0: query_index = fragment_index else: query_index = len(remaining) query = remaining[path_index + 1:query_index] fragment = remaining[query_index + 1:] userinfo, host = cls.parse_authority(authority) hostname, port = cls.parse_host(host) username, password = cls.parse_userinfo(userinfo) if not hostname: raise ValueError('Hostname is empty: {}'.format(ascii(url))) info.raw = url info.scheme = scheme info.authority = authority info.path = normalize_path(path, encoding=encoding) info.query = normalize_query(query, encoding=encoding) info.fragment = normalize_fragment(fragment, encoding=encoding) info.userinfo = userinfo info.username = percent_decode(username, encoding=encoding) info.password = percent_decode(password, encoding=encoding) info.host = host info.hostname = hostname info.port = port or RELATIVE_SCHEME_DEFAULT_PORTS[scheme] info.resource = resource return info
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/url.py#L124-L219
parse query string in url
python
def parse_url(url): """ Takes a URL string and returns its protocol and server """ # Verify that the protocol makes sense. We shouldn't guess! if not RE_PROTOCOL_SERVER.match(url): raise Exception("URL should begin with `protocol://domain`") protocol, server, path, _, _, _ = urlparse.urlparse(url) return protocol, server
https://github.com/amcfague/webunit2/blob/3157e5837aad0810800628c1383f1fe11ee3e513/webunit2/utils.py#L45-L53
parse query string in url
python
def parse(url): """ Parses a database URL in this format: [database type]://[username]:[password]@[host]:[port]/[database name] or, for cloud SQL: [database type]://[username]:[password]@[project_id]:[instance_name]/[database name] """ config = {} url = urlparse.urlparse(url) # Remove query strings. path = url.path[1:] path = path.split('?', 2)[0] try: port = url.port hostname = url.hostname except ValueError: port = None if url.scheme == 'rdbms': # local appengine stub requires INSTANCE parameter config['INSTANCE'] = url.netloc.split('@')[-1] hostname = None else: hostname = "/cloudsql/{}".format(url.netloc.split('@')[-1]) config.update({ 'NAME': path, 'USER': url.username, 'PASSWORD': url.password, 'HOST': hostname, 'PORT': port, }) if url.scheme in SCHEMES: config['ENGINE'] = SCHEMES[url.scheme] return config
https://github.com/masci/django-appengine-toolkit/blob/9ffe8b05a263889787fb34a3e28ebc66b1f0a1d2/appengine_toolkit/__init__.py#L43-L82
parse query string in url
python
def parse_string(s): ''' Parses a foreign resource URL into the URL string itself and any relevant args and kwargs ''' matched_obj = SPLIT_URL_RE.match(s) if not matched_obj: raise URLParseException('Invalid Resource URL: "%s"' % s) url_string, arguments_string = matched_obj.groups() args_as_strings = URL_ARGUMENTS_RE.findall(arguments_string) # Determine args and kwargs args = [] kwargs = {} for arg_string in args_as_strings: kwarg_match = ARG_RE.match(arg_string) if kwarg_match: key, value = kwarg_match.groups() kwargs[key.strip()] = value.strip() else: args.append(arg_string.strip()) # Default to HTTP if url_string has no URL if not SCHEME_RE.match(url_string): url_string = '%s://%s' % (DEFAULT_SCHEME, url_string) return url_string.strip(), args, kwargs
https://github.com/michaelpb/omnic/blob/1111cfd73c9dc1955afe42d9cf2a468c46f83cd6/omnic/types/resourceurl.py#L55-L82
parse query string in url
python
def parse(url_str): """ Extract all parts from a URL string and return them as a dictionary """ url_str = to_unicode(url_str) result = urlparse(url_str) netloc_parts = result.netloc.rsplit('@', 1) if len(netloc_parts) == 1: username = password = None host = netloc_parts[0] else: user_and_pass = netloc_parts[0].split(':') if len(user_and_pass) == 2: username, password = user_and_pass elif len(user_and_pass) == 1: username = user_and_pass[0] password = None host = netloc_parts[1] if host and ':' in host: host = host.split(':')[0] return {'host': host, 'username': username, 'password': password, 'scheme': result.scheme, 'port': result.port, 'path': result.path, 'query': result.query, 'fragment': result.fragment}
https://github.com/codeinthehole/purl/blob/e70ed132f1fdc17d00c78199cedb1e3adcb2bc55/purl/url.py#L100-L129
parse query string in url
python
def url_query_params(url): """Return query parameters as a dict from the specified URL. :param url: URL. :type url: str :rtype: dict """ return dict(urlparse.parse_qsl(urlparse.urlparse(url).query, True))
https://github.com/NateFerrero/oauth2lib/blob/d161b010f8a596826050a09e5e94d59443cc12d9/oauth2lib/utils.py#L15-L22
parse query string in url
python
def __query(cls, url): """Reads a URL""" try: return urllib2.urlopen(url).read().decode('utf-8').replace('\n', '') except urllib2.HTTPError: _, exception, _ = sys.exc_info() if cls.__debug: print('HTTPError = ' + str(exception.code)) except urllib2.URLError: _, exception, _ = sys.exc_info() if cls.__debug: print('URLError = ' + str(exception.reason)) except Exception: _, exception, _ = sys.exc_info() if cls.__debug: print('generic exception: ' + str(exception)) raise pass return "inval"
https://github.com/mperlet/PyDect200/blob/4758d80c663324a612c2772e6442db1472016913/PyDect200/PyDect200.py#L56-L74
parse query string in url
python
def parse_url(arg, extract, key=None): """ Returns the portion of a URL corresponding to a part specified by 'extract' Can optionally specify a key to retrieve an associated value if extract parameter is 'QUERY' Parameters ---------- extract : one of {'PROTOCOL', 'HOST', 'PATH', 'REF', 'AUTHORITY', 'FILE', 'USERINFO', 'QUERY'} key : string (optional) Examples -------- >>> url = "https://www.youtube.com/watch?v=kEuEcWfewf8&t=10" >>> parse_url(url, 'QUERY', 'v') # doctest: +SKIP 'kEuEcWfewf8' Returns ------- extracted : string """ return ops.ParseURL(arg, extract, key).to_expr()
https://github.com/ibis-project/ibis/blob/1e39a5fd9ef088b45c155e8a5f541767ee8ef2e7/ibis/expr/api.py#L2304-L2327
parse query string in url
python
def query(url, method='GET', params=None, data=None, data_file=None, header_dict=None, header_list=None, header_file=None, username=None, password=None, auth=None, decode=False, decode_type='auto', status=False, headers=False, text=False, cookies=None, cookie_jar=None, cookie_format='lwp', persist_session=False, session_cookie_jar=None, data_render=False, data_renderer=None, header_render=False, header_renderer=None, template_dict=None, test=False, test_url=None, node='minion', port=80, opts=None, backend=None, ca_bundle=None, verify_ssl=None, cert=None, text_out=None, headers_out=None, decode_out=None, stream=False, streaming_callback=None, header_callback=None, handle=False, agent=USERAGENT, hide_fields=None, raise_error=True, **kwargs): ''' Query a resource, and decode the return data ''' ret = {} if opts is None: if node == 'master': opts = salt.config.master_config( os.path.join(salt.syspaths.CONFIG_DIR, 'master') ) elif node == 'minion': opts = salt.config.minion_config( os.path.join(salt.syspaths.CONFIG_DIR, 'minion') ) else: opts = {} if not backend: backend = opts.get('backend', 'tornado') match = re.match(r'https?://((25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}(25[0-5]|2[0-4]\d|[01]?\d\d?)($|/)', url) if not match: salt.utils.network.refresh_dns() if backend == 'requests': if HAS_REQUESTS is False: ret['error'] = ('http.query has been set to use requests, but the ' 'requests library does not seem to be installed') log.error(ret['error']) return ret else: requests_log = logging.getLogger('requests') requests_log.setLevel(logging.WARNING) # Some libraries don't support separation of url and GET parameters # Don't need a try/except block, since Salt depends on tornado url_full = tornado.httputil.url_concat(url, params) if params else url if ca_bundle is None: ca_bundle = get_ca_bundle(opts) if verify_ssl is None: verify_ssl = opts.get('verify_ssl', True) if cert is None: cert = opts.get('cert', None) if data_file is not None: data = _render( data_file, data_render, data_renderer, template_dict, opts ) # Make sure no secret fields show up in logs log_url = sanitize_url(url_full, hide_fields) log.debug('Requesting URL %s using %s method', log_url, method) log.debug("Using backend: %s", backend) if method == 'POST' and log.isEnabledFor(logging.TRACE): # Make sure no secret fields show up in logs if isinstance(data, dict): log_data = data.copy() if isinstance(hide_fields, list): for item in data: for field in hide_fields: if item == field: log_data[item] = 'XXXXXXXXXX' log.trace('Request POST Data: %s', pprint.pformat(log_data)) else: log.trace('Request POST Data: %s', pprint.pformat(data)) if header_file is not None: header_tpl = _render( header_file, header_render, header_renderer, template_dict, opts ) if isinstance(header_tpl, dict): header_dict = header_tpl else: header_list = header_tpl.splitlines() if header_dict is None: header_dict = {} if header_list is None: header_list = [] if cookie_jar is None: cookie_jar = os.path.join(opts.get('cachedir', salt.syspaths.CACHE_DIR), 'cookies.txt') if session_cookie_jar is None: session_cookie_jar = os.path.join(opts.get('cachedir', salt.syspaths.CACHE_DIR), 'cookies.session.p') if persist_session is True and HAS_MSGPACK: # TODO: This is hackish; it will overwrite the session cookie jar with # all cookies from this one connection, rather than behaving like a # proper cookie jar. Unfortunately, since session cookies do not # contain expirations, they can't be stored in a proper cookie jar. if os.path.isfile(session_cookie_jar): with salt.utils.files.fopen(session_cookie_jar, 'rb') as fh_: session_cookies = salt.utils.msgpack.load(fh_) if isinstance(session_cookies, dict): header_dict.update(session_cookies) else: with salt.utils.files.fopen(session_cookie_jar, 'wb') as fh_: salt.utils.msgpack.dump('', fh_) for header in header_list: comps = header.split(':') if len(comps) < 2: continue header_dict[comps[0].strip()] = comps[1].strip() if not auth: if username and password: auth = (username, password) if agent == USERAGENT: user_agent = opts.get('user_agent', None) if user_agent: agent = user_agent agent = '{0} http.query()'.format(agent) header_dict['User-agent'] = agent if backend == 'requests': sess = requests.Session() sess.auth = auth sess.headers.update(header_dict) log.trace('Request Headers: %s', sess.headers) sess_cookies = sess.cookies sess.verify = verify_ssl elif backend == 'urllib2': sess_cookies = None else: # Tornado sess_cookies = None if cookies is not None: if cookie_format == 'mozilla': sess_cookies = salt.ext.six.moves.http_cookiejar.MozillaCookieJar(cookie_jar) else: sess_cookies = salt.ext.six.moves.http_cookiejar.LWPCookieJar(cookie_jar) if not os.path.isfile(cookie_jar): sess_cookies.save() sess_cookies.load() if test is True: if test_url is None: return {} else: url = test_url ret['test'] = True if backend == 'requests': req_kwargs = {} if stream is True: if requests.__version__[0] == '0': # 'stream' was called 'prefetch' before 1.0, with flipped meaning req_kwargs['prefetch'] = False else: req_kwargs['stream'] = True # Client-side cert handling if cert is not None: if isinstance(cert, six.string_types): if os.path.exists(cert): req_kwargs['cert'] = cert elif isinstance(cert, list): if os.path.exists(cert[0]) and os.path.exists(cert[1]): req_kwargs['cert'] = cert else: log.error('The client-side certificate path that' ' was passed is not valid: %s', cert) result = sess.request( method, url, params=params, data=data, **req_kwargs ) result.raise_for_status() if stream is True: # fake a HTTP response header header_callback('HTTP/1.0 {0} MESSAGE'.format(result.status_code)) # fake streaming the content streaming_callback(result.content) return { 'handle': result, } if handle is True: return { 'handle': result, 'body': result.content, } log.debug('Final URL location of Response: %s', sanitize_url(result.url, hide_fields)) result_status_code = result.status_code result_headers = result.headers result_text = result.content result_cookies = result.cookies body = result.content if not isinstance(body, six.text_type): body = body.decode(result.encoding or 'utf-8') ret['body'] = body elif backend == 'urllib2': request = urllib_request.Request(url_full, data) handlers = [ urllib_request.HTTPHandler, urllib_request.HTTPCookieProcessor(sess_cookies) ] if url.startswith('https'): hostname = request.get_host() handlers[0] = urllib_request.HTTPSHandler(1) if not HAS_MATCHHOSTNAME: log.warning('match_hostname() not available, SSL hostname checking ' 'not available. THIS CONNECTION MAY NOT BE SECURE!') elif verify_ssl is False: log.warning('SSL certificate verification has been explicitly ' 'disabled. THIS CONNECTION MAY NOT BE SECURE!') else: if ':' in hostname: hostname, port = hostname.split(':') else: port = 443 sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((hostname, int(port))) sockwrap = ssl.wrap_socket( sock, ca_certs=ca_bundle, cert_reqs=ssl.CERT_REQUIRED ) try: match_hostname(sockwrap.getpeercert(), hostname) except CertificateError as exc: ret['error'] = ( 'The certificate was invalid. ' 'Error returned was: %s', pprint.pformat(exc) ) return ret # Client-side cert handling if cert is not None: cert_chain = None if isinstance(cert, six.string_types): if os.path.exists(cert): cert_chain = (cert) elif isinstance(cert, list): if os.path.exists(cert[0]) and os.path.exists(cert[1]): cert_chain = cert else: log.error('The client-side certificate path that was ' 'passed is not valid: %s', cert) return if hasattr(ssl, 'SSLContext'): # Python >= 2.7.9 context = ssl.SSLContext.load_cert_chain(*cert_chain) handlers.append(urllib_request.HTTPSHandler(context=context)) # pylint: disable=E1123 else: # Python < 2.7.9 cert_kwargs = { 'host': request.get_host(), 'port': port, 'cert_file': cert_chain[0] } if len(cert_chain) > 1: cert_kwargs['key_file'] = cert_chain[1] handlers[0] = salt.ext.six.moves.http_client.HTTPSConnection(**cert_kwargs) opener = urllib_request.build_opener(*handlers) for header in header_dict: request.add_header(header, header_dict[header]) request.get_method = lambda: method try: result = opener.open(request) except URLError as exc: return {'Error': six.text_type(exc)} if stream is True or handle is True: return { 'handle': result, 'body': result.content, } result_status_code = result.code result_headers = dict(result.info()) result_text = result.read() if 'Content-Type' in result_headers: res_content_type, res_params = cgi.parse_header(result_headers['Content-Type']) if res_content_type.startswith('text/') and \ 'charset' in res_params and \ not isinstance(result_text, six.text_type): result_text = result_text.decode(res_params['charset']) if six.PY3 and isinstance(result_text, bytes): result_text = result.body.decode('utf-8') ret['body'] = result_text else: # Tornado req_kwargs = {} # Client-side cert handling if cert is not None: if isinstance(cert, six.string_types): if os.path.exists(cert): req_kwargs['client_cert'] = cert elif isinstance(cert, list): if os.path.exists(cert[0]) and os.path.exists(cert[1]): req_kwargs['client_cert'] = cert[0] req_kwargs['client_key'] = cert[1] else: log.error('The client-side certificate path that ' 'was passed is not valid: %s', cert) if isinstance(data, dict): data = _urlencode(data) if verify_ssl: req_kwargs['ca_certs'] = ca_bundle max_body = opts.get('http_max_body', salt.config.DEFAULT_MINION_OPTS['http_max_body']) connect_timeout = opts.get('http_connect_timeout', salt.config.DEFAULT_MINION_OPTS['http_connect_timeout']) timeout = opts.get('http_request_timeout', salt.config.DEFAULT_MINION_OPTS['http_request_timeout']) client_argspec = None proxy_host = opts.get('proxy_host', None) if proxy_host: # tornado requires a str for proxy_host, cannot be a unicode str in py2 proxy_host = salt.utils.stringutils.to_str(proxy_host) proxy_port = opts.get('proxy_port', None) proxy_username = opts.get('proxy_username', None) if proxy_username: # tornado requires a str, cannot be unicode str in py2 proxy_username = salt.utils.stringutils.to_str(proxy_username) proxy_password = opts.get('proxy_password', None) if proxy_password: # tornado requires a str, cannot be unicode str in py2 proxy_password = salt.utils.stringutils.to_str(proxy_password) no_proxy = opts.get('no_proxy', []) # Since tornado doesnt support no_proxy, we'll always hand it empty proxies or valid ones # except we remove the valid ones if a url has a no_proxy hostname in it if urlparse(url_full).hostname in no_proxy: proxy_host = None proxy_port = None # We want to use curl_http if we have a proxy defined if proxy_host and proxy_port: if HAS_CURL_HTTPCLIENT is False: ret['error'] = ('proxy_host and proxy_port has been set. This requires pycurl and tornado, ' 'but the libraries does not seem to be installed') log.error(ret['error']) return ret tornado.httpclient.AsyncHTTPClient.configure('tornado.curl_httpclient.CurlAsyncHTTPClient') client_argspec = salt.utils.args.get_function_argspec( tornado.curl_httpclient.CurlAsyncHTTPClient.initialize) else: client_argspec = salt.utils.args.get_function_argspec( tornado.simple_httpclient.SimpleAsyncHTTPClient.initialize) supports_max_body_size = 'max_body_size' in client_argspec.args req_kwargs.update({ 'method': method, 'headers': header_dict, 'auth_username': username, 'auth_password': password, 'body': data, 'validate_cert': verify_ssl, 'allow_nonstandard_methods': True, 'streaming_callback': streaming_callback, 'header_callback': header_callback, 'connect_timeout': connect_timeout, 'request_timeout': timeout, 'proxy_host': proxy_host, 'proxy_port': proxy_port, 'proxy_username': proxy_username, 'proxy_password': proxy_password, 'raise_error': raise_error, 'decompress_response': False, }) # Unicode types will cause a TypeError when Tornado's curl HTTPClient # invokes setopt. Therefore, make sure all arguments we pass which # contain strings are str types. req_kwargs = salt.utils.data.decode(req_kwargs, to_str=True) try: download_client = HTTPClient(max_body_size=max_body) \ if supports_max_body_size \ else HTTPClient() result = download_client.fetch(url_full, **req_kwargs) except tornado.httpclient.HTTPError as exc: ret['status'] = exc.code ret['error'] = six.text_type(exc) return ret except socket.gaierror as exc: if status is True: ret['status'] = 0 ret['error'] = six.text_type(exc) return ret if stream is True or handle is True: return { 'handle': result, 'body': result.body, } result_status_code = result.code result_headers = result.headers result_text = result.body if 'Content-Type' in result_headers: res_content_type, res_params = cgi.parse_header(result_headers['Content-Type']) if res_content_type.startswith('text/') and \ 'charset' in res_params and \ not isinstance(result_text, six.text_type): result_text = result_text.decode(res_params['charset']) if six.PY3 and isinstance(result_text, bytes): result_text = result_text.decode('utf-8') ret['body'] = result_text if 'Set-Cookie' in result_headers and cookies is not None: result_cookies = parse_cookie_header(result_headers['Set-Cookie']) for item in result_cookies: sess_cookies.set_cookie(item) else: result_cookies = None if isinstance(result_headers, list): result_headers_dict = {} for header in result_headers: comps = header.split(':') result_headers_dict[comps[0].strip()] = ':'.join(comps[1:]).strip() result_headers = result_headers_dict log.debug('Response Status Code: %s', result_status_code) log.trace('Response Headers: %s', result_headers) log.trace('Response Cookies: %s', sess_cookies) # log.trace("Content: %s", result_text) coding = result_headers.get('Content-Encoding', "identity") # Requests will always decompress the content, and working around that is annoying. if backend != 'requests': result_text = __decompressContent(coding, result_text) try: log.trace('Response Text: %s', result_text) except UnicodeEncodeError as exc: log.trace('Cannot Trace Log Response Text: %s. This may be due to ' 'incompatibilities between requests and logging.', exc) if text_out is not None: with salt.utils.files.fopen(text_out, 'w') as tof: tof.write(result_text) if headers_out is not None and os.path.exists(headers_out): with salt.utils.files.fopen(headers_out, 'w') as hof: hof.write(result_headers) if cookies is not None: sess_cookies.save() if persist_session is True and HAS_MSGPACK: # TODO: See persist_session above if 'set-cookie' in result_headers: with salt.utils.files.fopen(session_cookie_jar, 'wb') as fh_: session_cookies = result_headers.get('set-cookie', None) if session_cookies is not None: salt.utils.msgpack.dump({'Cookie': session_cookies}, fh_) else: salt.utils.msgpack.dump('', fh_) if status is True: ret['status'] = result_status_code if headers is True: ret['headers'] = result_headers if decode is True: if decode_type == 'auto': content_type = result_headers.get( 'content-type', 'application/json' ) if 'xml' in content_type: decode_type = 'xml' elif 'json' in content_type: decode_type = 'json' elif 'yaml' in content_type: decode_type = 'yaml' else: decode_type = 'plain' valid_decodes = ('json', 'xml', 'yaml', 'plain') if decode_type not in valid_decodes: ret['error'] = ( 'Invalid decode_type specified. ' 'Valid decode types are: {0}'.format( pprint.pformat(valid_decodes) ) ) log.error(ret['error']) return ret if decode_type == 'json': ret['dict'] = salt.utils.json.loads(result_text) elif decode_type == 'xml': ret['dict'] = [] items = ET.fromstring(result_text) for item in items: ret['dict'].append(xml.to_dict(item)) elif decode_type == 'yaml': ret['dict'] = salt.utils.data.decode(salt.utils.yaml.safe_load(result_text)) else: text = True if decode_out: with salt.utils.files.fopen(decode_out, 'w') as dof: dof.write(result_text) if text is True: ret['text'] = result_text return ret
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/http.py#L133-L700
parse query string in url
python
def parse_query_string(self, params): """ Override this method if you need to support query string filter keys other than those in the format of ``key[field_name]=val``. Maps query string values == 'null' to ``None``. :param params: The query string parameters from ``request.params``. :return: Dictionary. """ results = {} for key, val in params.items(): lookup_len = len(self.query_string_lookup) + 1 if key[0:lookup_len] == '{}['.format(self.query_string_lookup) and key[-1] == ']': results[key[lookup_len:-1]] = val if val.lower() != 'null' else None return results
https://github.com/danpoland/pyramid-restful-framework/blob/4d8c9db44b1869c3d1fdd59ca304c3166473fcbb/pyramid_restful/filters.py#L40-L56
parse query string in url
python
def parse_query(self, query): """Parse query string using given grammar""" tree = pypeg2.parse(query, Main, whitespace="") return tree.accept(self.converter)
https://github.com/inveniosoftware/invenio-query-parser/blob/21a2c36318003ff52d2e18e7196bb420db8ecb4b/invenio_query_parser/contrib/spires/converter.py#L39-L42
parse query string in url
python
def url(self): """ Convert self into a url This function should more or less round-trip with :func:`.parse_url`. The returned url may not be exactly the same as the url inputted to :func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls with a blank port will have : removed). Example: :: >>> U = parse_url('http://google.com/mail/') >>> U.url 'http://google.com/mail/' >>> Url('http', 'username:password', 'host.com', 80, ... '/path', 'query', 'fragment').url 'http://username:password@host.com:80/path?query#fragment' """ scheme, auth, host, port, path, query, fragment = self url = '' # We use "is not None" we want things to happen with empty strings (or 0 port) if scheme is not None: url += scheme + '://' if auth is not None: url += auth + '@' if host is not None: url += host if port is not None: url += ':' + str(port) if path is not None: url += path if query is not None: url += '?' + query if fragment is not None: url += '#' + fragment return url
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/urllib3/util/url.py#L56-L93
parse query string in url
python
def make_url_args(params): """Utility function for constructing a URL query string from a dictionary of parameters. The dictionary's values can be of various types: lists, tuples, dictionaries, strings, or None. :param dict params: the key-value pairs to construct a query string from :rtype: string""" p = [] for key, value in params.iteritems(): if isinstance(value, (list, tuple)): for v in value: p.append((key, v)) elif isinstance(value, dict): for k, v in value.items(): p.append(('%s[%s]' % (key, k), v)) elif isinstance(value, bool): p.append((key, str(value).lower())) elif value is None: continue else: p.append((key, str(value))) return urllib.urlencode(p).encode("UTF-8")
https://github.com/tempodb/tempodb-python/blob/8ce45231bd728c6c97ef799cf0f1513ea3a9a7d3/tempodb/endpoint.py#L11-L33
parse query string in url
python
def get_url_args(url): """ Returns a dictionary from a URL params """ url_data = urllib.parse.urlparse(url) arg_dict = urllib.parse.parse_qs(url_data.query) return arg_dict
https://github.com/un33k/django-toolware/blob/973f3e003dc38b812897dab88455bee37dcaf931/toolware/utils/generic.py#L141-L145
parse query string in url
python
def query_string(self, **params): """Specify query string to use with the collection. Returns: :py:class:`SearchResult` """ return SearchResult(self, self._api.get(self._href, **params))
https://github.com/ManageIQ/manageiq-api-client-python/blob/e0c8884929e45766c2835bc7dcf4e78b0794248f/src/manageiq_client/api.py#L332-L337
parse query string in url
python
def parse(url): """Parses a cache URL.""" config = {} url = urlparse.urlparse(url) # Handle python 2.6 broken url parsing path, query = url.path, url.query if '?' in path and query == '': path, query = path.split('?', 1) cache_args = dict([(key.upper(), ';'.join(val)) for key, val in urlparse.parse_qs(query).items()]) # Update with environment configuration. backend = BACKENDS.get(url.scheme) if not backend: raise Exception('Unknown backend: "{0}"'.format(url.scheme)) config['BACKEND'] = BACKENDS[url.scheme] redis_options = {} if url.scheme == 'hiredis': redis_options['PARSER_CLASS'] = 'redis.connection.HiredisParser' # File based if not url.netloc: if url.scheme in ('memcached', 'pymemcached', 'djangopylibmc'): config['LOCATION'] = 'unix:' + path elif url.scheme in ('redis', 'hiredis'): match = re.match(r'.+?(?P<db>\d+)', path) if match: db = match.group('db') path = path[:path.rfind('/')] else: db = '0' config['LOCATION'] = 'unix:%s:%s' % (path, db) else: config['LOCATION'] = path # URL based else: # Handle multiple hosts config['LOCATION'] = ';'.join(url.netloc.split(',')) if url.scheme in ('redis', 'hiredis'): if url.password: redis_options['PASSWORD'] = url.password # Specifying the database is optional, use db 0 if not specified. db = path[1:] or '0' port = url.port if url.port else 6379 config['LOCATION'] = "redis://%s:%s/%s" % (url.hostname, port, db) if redis_options: config.setdefault('OPTIONS', {}).update(redis_options) if url.scheme == 'uwsgicache': config['LOCATION'] = config.get('LOCATION', 'default') or 'default' # Pop special options from cache_args # https://docs.djangoproject.com/en/1.10/topics/cache/#cache-arguments options = {} for key in ['MAX_ENTRIES', 'CULL_FREQUENCY']: val = cache_args.pop(key, None) if val is not None: options[key] = int(val) if options: config.setdefault('OPTIONS', {}).update(options) config.update(cache_args) return config
https://github.com/ghickman/django-cache-url/blob/aba81916a3e0b6e49007eb514b690bcd2ccca118/django_cache_url.py#L53-L123
parse query string in url
python
def extract_url_query_parameter(url, parameter): """Given a URL (ex: "http://www.test.com/path?query=3") and a parameter (ex: "query"), return the value as a list :param url: a `str` URL :param parameter: the URL query we went to extract :return: a `list` of values for the given query name in the given URL or an empty string if the query is not in the URL """ query_string = urlparse(url).query return parse_qs(query_string).get(parameter, [])
https://github.com/kennydo/nyaalib/blob/ab787b7ba141ed53d2ad978bf13eb7b8bcdd4b0d/nyaalib/__init__.py#L207-L216
parse query string in url
python
def query(name, match=None, match_type='string', status=None, status_type='string', wait_for=None, **kwargs): ''' Perform an HTTP query and statefully return the result Passes through all the parameters described in the :py:func:`utils.http.query function <salt.utils.http.query>`: name The name of the query. match Specifies a pattern to look for in the return text. By default, this will perform a string comparison of looking for the value of match in the return text. match_type Specifies the type of pattern matching to use on match. Default is ``string``, but can also be set to ``pcre`` to use regular expression matching if a more complex pattern matching is required. .. note:: Despite the name of ``match_type`` for this argument, this setting actually uses Python's ``re.search()`` function rather than Python's ``re.match()`` function. status The status code for a URL for which to be checked. Can be used instead of or in addition to the ``match`` setting. status_type Specifies the type of pattern matching to use for status. Default is ``string``, but can also be set to ``pcre`` to use regular expression matching if a more complex pattern matching is required. .. versionadded:: Neon .. note:: Despite the name of ``match_type`` for this argument, this setting actually uses Python's ``re.search()`` function rather than Python's ``re.match()`` function. If both ``match`` and ``status`` options are set, both settings will be checked. However, note that if only one option is ``True`` and the other is ``False``, then ``False`` will be returned. If this case is reached, the comments in the return data will contain troubleshooting information. For more information about the ``http.query`` state, refer to the :ref:`HTTP Tutorial <tutorial-http>`. .. code-block:: yaml query_example: http.query: - name: 'http://example.com/' - status: 200 ''' # Monitoring state, but changes may be made over HTTP ret = {'name': name, 'result': None, 'comment': '', 'changes': {}, 'data': {}} # Data field for monitoring state if match is None and status is None: ret['result'] = False ret['comment'] += ( ' Either match text (match) or a status code (status) is required.' ) return ret if 'decode' not in kwargs: kwargs['decode'] = False kwargs['text'] = True kwargs['status'] = True if __opts__['test']: kwargs['test'] = True if wait_for: data = __salt__['http.wait_for_successful_query'](name, wait_for=wait_for, **kwargs) else: data = __salt__['http.query'](name, **kwargs) if match is not None: if match_type == 'string': if str(match) in data.get('text', ''): ret['result'] = True ret['comment'] += ' Match text "{0}" was found.'.format(match) else: ret['result'] = False ret['comment'] += ' Match text "{0}" was not found.'.format(match) elif match_type == 'pcre': if re.search(str(match), str(data.get('text', ''))): ret['result'] = True ret['comment'] += ' Match pattern "{0}" was found.'.format(match) else: ret['result'] = False ret['comment'] += ' Match pattern "{0}" was not found.'.format(match) if status is not None: if status_type == 'string': if str(data.get('status', '')) == str(status): ret['comment'] += ' Status {0} was found.'.format(status) if ret['result'] is None: ret['result'] = True else: ret['comment'] += ' Status {0} was not found.'.format(status) ret['result'] = False elif status_type == 'pcre': if re.search(str(status), str(data.get('status', ''))): ret['comment'] += ' Status pattern "{0}" was found.'.format(status) if ret['result'] is None: ret['result'] = True else: ret['comment'] += ' Status pattern "{0}" was not found.'.format(status) ret['result'] = False # cleanup spaces in comment ret['comment'] = ret['comment'].strip() if __opts__['test'] is True: ret['result'] = None ret['comment'] += ' (TEST MODE' if 'test_url' in kwargs: ret['comment'] += ', TEST URL WAS: {0}'.format(kwargs['test_url']) ret['comment'] += ')' ret['data'] = data return ret
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/http.py#L23-L153
parse query string in url
python
def parse_url_to_dict(url): """Parse a url and return a dict with keys for all of the parts. The urlparse function() returns a wacky combination of a namedtuple with properties. """ p = urlparse(url) return { 'scheme': p.scheme, 'netloc': p.netloc, 'path': p.path, 'params': p.params, 'query': p.query, 'fragment': p.fragment, 'username': p.username, 'password': p.password, 'hostname': p.hostname, 'port': p.port }
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/util/__init__.py#L936-L956
parse query string in url
python
def parse(url): """Parses an email URL.""" conf = {} url = urlparse.urlparse(url) qs = urlparse.parse_qs(url.query) # Remove query strings path = url.path[1:] path = path.split('?', 2)[0] # Update with environment configuration conf.update({ 'EMAIL_FILE_PATH': path, 'EMAIL_HOST_USER': unquote(url.username), 'EMAIL_HOST_PASSWORD': unquote(url.password), 'EMAIL_HOST': url.hostname, 'EMAIL_PORT': url.port, 'EMAIL_USE_SSL': False, 'EMAIL_USE_TLS': False, }) if url.scheme in SCHEMES: conf['EMAIL_BACKEND'] = SCHEMES[url.scheme] # Set defaults for `smtp` if url.scheme == 'smtp': if not conf['EMAIL_HOST']: conf['EMAIL_HOST'] = 'localhost' if not conf['EMAIL_PORT']: conf['EMAIL_PORT'] = 25 # Set defaults for `smtps` if url.scheme == 'smtps': warnings.warn( "`smpts` scheme will be deprecated in a future version," " use `submission` instead", UserWarning, ) conf['EMAIL_USE_TLS'] = True # Set defaults for `submission`/`submit` if url.scheme in ('submission', 'submit'): conf['EMAIL_USE_TLS'] = True if not conf['EMAIL_PORT']: conf['EMAIL_PORT'] = 587 # Query args overwrite defaults if 'ssl' in qs and qs['ssl']: if qs['ssl'][0] in TRUTHY: conf['EMAIL_USE_SSL'] = True conf['EMAIL_USE_TLS'] = False elif 'tls' in qs and qs['tls']: if qs['tls'][0] in TRUTHY: conf['EMAIL_USE_SSL'] = False conf['EMAIL_USE_TLS'] = True # From addresses if '_server_email' in qs: conf['SERVER_EMAIL'] = qs['_server_email'][0] if '_default_from_email' in qs: conf['DEFAULT_FROM_EMAIL'] = qs['_default_from_email'][0] return conf
https://github.com/migonzalvar/dj-email-url/blob/5727ca02f4f1ad8d3158ca702e084ba639c86fbe/dj_email_url.py#L60-L124
parse query string in url
python
def _parse_alt_url(html_chunk): """ Parse URL from alternative location if not found where it should be. Args: html_chunk (obj): HTMLElement containing slice of the page with details. Returns: str: Book's URL. """ url_list = html_chunk.find("a", fn=has_param("href")) url_list = map(lambda x: x.params["href"], url_list) url_list = filter(lambda x: not x.startswith("autori/"), url_list) if not url_list: return None return normalize_url(BASE_URL, url_list[0])
https://github.com/edeposit/edeposit.amqp.harvester/blob/38cb87ccdf6bf2f550a98460d0a329c4b9dc8e2e/src/edeposit/amqp/harvester/scrappers/cpress_cz.py#L45-L62
parse query string in url
python
def url_parse(url, scheme=None, allow_fragments=True): """Parses a URL from a string into a :class:`URL` tuple. If the URL is lacking a scheme it can be provided as second argument. Otherwise, it is ignored. Optionally fragments can be stripped from the URL by setting `allow_fragments` to `False`. The inverse of this function is :func:`url_unparse`. :param url: the URL to parse. :param scheme: the default schema to use if the URL is schemaless. :param allow_fragments: if set to `False` a fragment will be removed from the URL. """ s = make_literal_wrapper(url) is_text_based = isinstance(url, text_type) if scheme is None: scheme = s('') netloc = query = fragment = s('') i = url.find(s(':')) if i > 0 and _scheme_re.match(to_native(url[:i], errors='replace')): # make sure "iri" is not actually a port number (in which case # "scheme" is really part of the path) rest = url[i + 1:] if not rest or any(c not in s('0123456789') for c in rest): # not a port number scheme, url = url[:i].lower(), rest if url[:2] == s('//'): delim = len(url) for c in s('/?#'): wdelim = url.find(c, 2) if wdelim >= 0: delim = min(delim, wdelim) netloc, url = url[2:delim], url[delim:] if ((s('[') in netloc and s(']') not in netloc) or (s(']') in netloc and s('[') not in netloc)): raise ValueError('Invalid IPv6 URL') if allow_fragments and s('#') in url: url, fragment = url.split(s('#'), 1) if s('?') in url: url, query = url.split(s('?'), 1) result_type = is_text_based and URL or BytesURL return result_type(scheme, netloc, url, query, fragment)
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/lib/werkzeug/urls.py#L326-L371
parse query string in url
python
def parse_url(url): """Return a clean URL. Remove the prefix for the Auth URL if Found. :param url: :return aurl: """ if url.startswith(('http', 'https', '//')): if url.startswith('//'): return urlparse.urlparse(url, scheme='http') else: return urlparse.urlparse(url) else: return urlparse.urlparse(urlparse.urljoin('http://', url))
https://github.com/cloudnull/cloudlib/blob/5038111ce02521caa2558117e3bae9e1e806d315/cloudlib/http.py#L43-L55
parse query string in url
python
def url(self): """The URL as a string of the resource.""" urlparts = self._url if self.__post__: urlparts = list(urlparts) urlparts[3] = '' # Clear out query string on POST if self.__token__ is not None: # But not the token urlparts[3] = compat.urlencode({'token': self.__token__}) return compat.urlunsplit(urlparts)
https://github.com/jasonbot/arcrest/blob/b1ba71fd59bb6349415e7879d753d307dbc0da26/arcrest/server.py#L177-L185
parse query string in url
python
def parse(self, url_data): """Parse XML URL data.""" self.url_data = url_data self.loc = False self.url = u"" data = url_data.get_content() isfinal = True try: self.parser.Parse(data, isfinal) except ExpatError as expaterr: self.url_data.add_warning(expaterr.message,tag=WARN_XML_PARSE_ERROR)
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/parser/sitemap.py#L37-L47
parse query string in url
python
def query(url, **kwargs): ''' Query a resource, and decode the return data Passes through all the parameters described in the :py:func:`utils.http.query function <salt.utils.http.query>`: .. autofunction:: salt.utils.http.query CLI Example: .. code-block:: bash salt '*' http.query http://somelink.com/ salt '*' http.query http://somelink.com/ method=POST \ params='key1=val1&key2=val2' salt '*' http.query http://somelink.com/ method=POST \ data='<xml>somecontent</xml>' For more information about the ``http.query`` module, refer to the :ref:`HTTP Tutorial <tutorial-http>`. ''' opts = __opts__.copy() if 'opts' in kwargs: opts.update(kwargs['opts']) del kwargs['opts'] return salt.utils.http.query(url=url, opts=opts, **kwargs)
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/http.py#L17-L44
parse query string in url
python
def parse_html_urls(file_name, html_data): ''' Returns a list of tuples in the form (url, file_name, line_number) ''' try: html = lxml.html.fromstring(html_data) anchor_tags = html.cssselect('a') for a in anchor_tags: # A link was started but not finished, href with nothing set! if not 'href' in a.attrib or a.attrib['href'] == '': BROKEN_URLS.append(('None', file_name, a.sourceline)) url = clean_url(a.attrib['href']) if is_valid_url(url): if url not in URL_CACHE: URL_CACHE.add(url) yield (url, file_name, a.sourceline) except SyntaxError: pass
https://github.com/ckcollab/existence/blob/09a9ceae28db6fa3a9c3d1e2af3faf76b4f3d11e/existence.py#L38-L60
parse query string in url
python
def url_to_string(url): """ Return the contents of a web site url as a string. """ try: page = urllib2.urlopen(url) except (urllib2.HTTPError, urllib2.URLError) as err: ui.error(c.MESSAGES["url_unreachable"], err) sys.exit(1) return page
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/utils.py#L92-L102
parse query string in url
python
def get_url(self): """API url :return: url :rtype: str """ url = self.data[self.execute_name] parsed = urlparse(url) if not parsed.scheme: url = '{}://{}'.format(self.default_protocol, url) if not url.split(':')[-1].isalnum(): url += ':{}'.format(self.default_port) return url
https://github.com/Nekmo/amazon-dash/blob/0e2bdc24ff8ea32cecb2f5f54f5cc1c0f99c197b/amazon_dash/execute.py#L300-L312
parse query string in url
python
def query(self, value=None): """ Return or set the query string :param string value: the new query string to use :returns: string or new :class:`URL` instance """ if value is not None: return URL._mutate(self, query=value) return self._tuple.query
https://github.com/codeinthehole/purl/blob/e70ed132f1fdc17d00c78199cedb1e3adcb2bc55/purl/url.py#L320-L329
parse query string in url
python
def _parse_http(self, url, dest): '''will get the filename of an http address, and return a statement to download it to some location Parameters ========== url: the source url to retrieve with curl dest: the destination folder to put it in the image ''' file_name = os.path.basename(url) download_path = "%s/%s" %(dest, file_name) command = "curl %s -o %s" %(url, download_path) self.install.append(command)
https://github.com/singularityhub/singularity-cli/blob/cb36b4504812ca87e29c6a40b222a545d1865799/spython/main/parse/docker.py#L219-L232
parse query string in url
python
def get_url(url: str, params: dict = {}, timeout: float = 5.0, cache: bool = True): """Wrapper for requests.get(url) Args: url: url to retrieve params: query string parameters timeout: allow this much time for the request and time it out if over cache: Cache for up to a day unless this is false Returns: Requests Result obj or None if timed out """ try: if not cache: with requests_cache.disabled(): r = requests.get(url, params=params, timeout=timeout) else: r = requests.get(url, params=params, timeout=timeout) log.debug(f"Response headers {r.headers} From cache {r.from_cache}") return r except requests.exceptions.Timeout: log.warn(f"Timed out getting url in get_url: {url}") return None except Exception as e: log.warn(f"Error getting url: {url} error: {e}") return None
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/utils.py#L27-L56
parse query string in url
python
def parse_tag (self, tag, attr, value, name, base): """Add given url data to url list.""" assert isinstance(tag, unicode), repr(tag) assert isinstance(attr, unicode), repr(attr) assert isinstance(name, unicode), repr(name) assert isinstance(base, unicode), repr(base) assert isinstance(value, unicode) or value is None, repr(value) # look for meta refresh if tag == u'meta' and value: mo = refresh_re.match(value) if mo: self.found_url(mo.group("url"), name, base) elif attr != 'content': self.found_url(value, name, base) elif attr == u'style' and value: for mo in css_url_re.finditer(value): url = unquote(mo.group("url"), matching=True) self.found_url(url, name, base) elif attr == u'archive': for url in value.split(u','): self.found_url(url, name, base) elif attr == u'srcset': for img_candidate in value.split(u','): url = img_candidate.split()[0] self.found_url(url, name, base) else: self.found_url(value, name, base)
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/htmlutil/linkparse.py#L251-L277
parse query string in url
python
def user_parse(data): """Parse information from the provider.""" _user = data.get('response', {}).get('user', {}) yield 'id', _user.get('name') yield 'username', _user.get('name') yield 'link', _user.get('blogs', [{}])[0].get('url')
https://github.com/klen/aioauth-client/blob/54f58249496c26965adb4f752f2b24cfe18d0084/aioauth_client.py#L619-L624
parse query string in url
python
def _parse_url(path): """Given a urlencoded path, returns the path and the dictionary of query arguments, all in Unicode.""" # path changes from bytes to Unicode in going from Python 2 to # Python 3. if sys.version_info[0] < 3: o = urlparse(urllib.parse.unquote_plus(path).decode('utf8')) else: o = urlparse(urllib.parse.unquote_plus(path)) path = o.path args = {} # Convert parse_qs' str --> [str] dictionary to a str --> str # dictionary since we never use multi-value GET arguments # anyway. multiargs = parse_qs(o.query, keep_blank_values=True) for arg, value in list(multiargs.items()): args[arg] = value[0] return path, args
https://github.com/knipknap/exscript/blob/72718eee3e87b345d5a5255be9824e867e42927b/Exscript/servers/httpd.py#L50-L71
parse query string in url
python
def query(self): ''' The :attr:`query_string` parsed into a :class:`FormsDict`. These values are sometimes called "URL arguments" or "GET parameters", but not to be confused with "URL wildcards" as they are provided by the :class:`Router`. ''' get = self.environ['bottle.get'] = FormsDict() pairs = _parse_qsl(self.environ.get('QUERY_STRING', '')) if len(pairs) > self.MAX_PARAMS: raise HTTPError(413, 'Too many parameters') for key, value in pairs: get[key] = value return get
https://github.com/klen/pyserve/blob/5942ff2eb41566fd39d73abbd3e5c7caa7366aa8/pyserve/bottle.py#L988-L999
parse query string in url
python
def query(self): ''' The :attr:`query_string` parsed into a :class:`FormsDict`. These values are sometimes called "URL arguments" or "GET parameters", but not to be confused with "URL wildcards" as they are provided by the :class:`Router`. ''' pairs = parse_qsl(self.query_string, keep_blank_values=True) get = self.environ['bottle.get'] = FormsDict() for key, value in pairs[:self.MAX_PARAMS]: get[key] = value return get
https://github.com/ryanpetrello/cleaver/blob/0ef266e1a0603c6d414df4b9926ce2a59844db35/cleaver/reports/web/bottle.py#L988-L997
parse query string in url
python
def get_url(self, resource, params=None): """ Generate url for request """ # replace placeholders pattern = r'\{(.+?)\}' resource = re.sub(pattern, lambda t: str(params.get(t.group(1), '')), resource) # build url parts = (self.endpoint, '/api/', resource) return '/'.join(map(lambda x: str(x).strip('/'), parts))
https://github.com/akolpakov/paynova-api-python-client/blob/930277623fc7b142ae9365a44f15a3a7b79bd974/paynova_api_python_client/paynova.py#L37-L51
parse query string in url
python
def parse(url, engine=None, conn_max_age=0, ssl_require=False): """Parses a database URL.""" if url == 'sqlite://:memory:': # this is a special case, because if we pass this URL into # urlparse, urlparse will choke trying to interpret "memory" # as a port number return { 'ENGINE': SCHEMES['sqlite'], 'NAME': ':memory:' } # note: no other settings are required for sqlite # otherwise parse the url as normal config = {} url = urlparse.urlparse(url) # Split query strings from path. path = url.path[1:] if '?' in path and not url.query: path, query = path.split('?', 2) else: path, query = path, url.query query = urlparse.parse_qs(query) # If we are using sqlite and we have no path, then assume we # want an in-memory database (this is the behaviour of sqlalchemy) if url.scheme == 'sqlite' and path == '': path = ':memory:' # Handle postgres percent-encoded paths. hostname = url.hostname or '' if '%2f' in hostname.lower(): # Switch to url.netloc to avoid lower cased paths hostname = url.netloc if "@" in hostname: hostname = hostname.rsplit("@", 1)[1] if ":" in hostname: hostname = hostname.split(":", 1)[0] hostname = hostname.replace('%2f', '/').replace('%2F', '/') # Lookup specified engine. engine = SCHEMES[url.scheme] if engine is None else engine port = (str(url.port) if url.port and engine in [SCHEMES['oracle'], SCHEMES['mssql']] else url.port) # Update with environment configuration. config.update({ 'NAME': urlparse.unquote(path or ''), 'USER': urlparse.unquote(url.username or ''), 'PASSWORD': urlparse.unquote(url.password or ''), 'HOST': hostname, 'PORT': port or '', 'CONN_MAX_AGE': conn_max_age, }) # Pass the query string into OPTIONS. options = {} for key, values in query.items(): if url.scheme == 'mysql' and key == 'ssl-ca': options['ssl'] = {'ca': values[-1]} continue options[key] = values[-1] if ssl_require: options['sslmode'] = 'require' # Support for Postgres Schema URLs if 'currentSchema' in options and engine in ( 'django.contrib.gis.db.backends.postgis', 'django.db.backends.postgresql_psycopg2', 'django.db.backends.postgresql', 'django_redshift_backend', ): options['options'] = '-c search_path={0}'.format(options.pop('currentSchema')) if options: config['OPTIONS'] = options if engine: config['ENGINE'] = engine return config
https://github.com/kennethreitz/dj-database-url/blob/27a56ebfaea70cc68c3c4080fdc9a92578952606/dj_database_url.py#L72-L157
parse query string in url
python
def get_url_directory_string(url): """ Determines the url's directory string. :param str url: the url to extract the directory string from :return str: the directory string on the server """ domain = UrlExtractor.get_allowed_domain(url) splitted_url = url.split('/') # the following commented list comprehension could replace # the following for, if not and break statement # index = [index for index in range(len(splitted_url)) # if not re.search(domain, splitted_url[index]) is None][0] for index in range(len(splitted_url)): if not re.search(domain, splitted_url[index]) is None: if splitted_url[-1] is "": splitted_url = splitted_url[index + 1:-2] else: splitted_url = splitted_url[index + 1:-1] break return '_'.join(splitted_url)
https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/helper_classes/url_extractor.py#L149-L172
parse query string in url
python
def query(cls, url=urljoin(config.API_URL, 'stac/search'), **kwargs): """ Get request """ logger.debug('Query URL: %s, Body: %s' % (url, json.dumps(kwargs))) response = requests.post(url, data=json.dumps(kwargs)) # API error if response.status_code != 200: raise SatSearchError(response.text) return response.json()
https://github.com/sat-utils/sat-search/blob/d81e4774a41990b73b55db4b1e05b21062dd957c/satsearch/search.py#L78-L85
parse query string in url
python
def is_parseable (self): """See if URL target is parseable for recursion.""" if self.is_directory(): return True if self.content_type in self.ContentMimetypes: return True log.debug(LOG_CHECK, "URL with content type %r is not parseable.", self.content_type) return False
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/checker/ftpurl.py#L168-L175
parse query string in url
python
def fetch_and_parse(url, bodyLines): """Takes a url, and returns a dictionary of data with 'bodyLines' lines""" pageHtml = fetch_page(url) return parse(url, pageHtml, bodyLines)
https://github.com/mnkhouri/news_scraper/blob/7fd3487c587281a4816f0761f0c4d2196ae05702/news_scraper/scrape.py#L68-L72
parse query string in url
python
def url(self, var, default=NOTSET): """ :rtype: urlparse.ParseResult """ return self.get_value(var, cast=urlparse, default=default, parse_default=True)
https://github.com/joke2k/django-environ/blob/c2620021614557abe197578f99deeef42af3e082/environ/environ.py#L193-L197
parse query string in url
python
def url_params(request, except_params=None, as_is=False): """ create string with GET-params of request usage example: c['sort_url'] = url_params(request, except_params=('sort',)) ... <a href="{{ sort_url }}&sort=lab_number">Лабораторный номер</a> """ if not request.GET: return '' params = [] for key, value in request.GET.items(): if except_params and key not in except_params: for v in request.GET.getlist(key): params.append('%s=%s' % (key, urlquote(v))) if as_is: str_params = '?' + '&'.join(params) else: str_params = '?' + '&'.join(params) str_params = urlquote(str_params) return mark_safe(str_params)
https://github.com/telminov/sw-django-utils/blob/43b8491c87a5dd8fce145834c00198f4de14ceb9/djutils/views/helpers.py#L28-L50
parse query string in url
python
def _parse_urls(self, match): '''Parse URLs.''' mat = match.group(0) # Fix a bug in the regex concerning www...com and www.-foo.com domains # TODO fix this in the regex instead of working around it here domain = match.group(5) if domain[0] in '.-': return mat # Only allow IANA one letter domains that are actually registered if len(domain) == 5 \ and domain[-4:].lower() in ('.com', '.org', '.net') \ and not domain.lower() in IANA_ONE_LETTER_DOMAINS: return mat # Check for urls without http(s) pos = mat.find('http') if pos != -1: pre, url = mat[:pos], mat[pos:] full_url = url # Find the www and force https:// else: pos = mat.lower().find('www') pre, url = mat[:pos], mat[pos:] full_url = 'https://%s' % url if self._include_spans: span = match.span(0) # add an offset if pre is e.g. ' ' span = (span[0] + len(pre), span[1]) self._urls.append((url, span)) else: self._urls.append(url) if self._html: return '%s%s' % (pre, self.format_url(full_url, self._shorten_url(escape(url))))
https://github.com/edmondburnett/twitter-text-python/blob/2a23ced35bfd34c4bc4b7148afd85771e9eb8669/ttp/ttp.py#L155-L195
parse query string in url
python
def with_query(self, *args, **kwargs): """Return a new URL with query part replaced. Accepts any Mapping (e.g. dict, multidict.MultiDict instances) or str, autoencode the argument if needed. A sequence of (key, value) pairs is supported as well. It also can take an arbitrary number of keyword arguments. Clear query if None is passed. """ # N.B. doesn't cleanup query/fragment new_query = self._get_str_query(*args, **kwargs) return URL( self._val._replace(path=self._val.path, query=new_query), encoded=True )
https://github.com/aio-libs/yarl/blob/e47da02c00ad764e030ca7647a9565548c97d362/yarl/__init__.py#L873-L891
parse query string in url
python
def get_url(self, **kwargs): """ Return an url, relative to the request associated with this table. Any keywords arguments provided added to the query string, replacing existing values. """ return build( self._request.path, self._request.GET, self._meta.prefix, **kwargs )
https://github.com/adammck/djtables/blob/8fa279e7088123f00cca9c838fe028ebf327325e/lib/djtables/table.py#L27-L38
parse query string in url
python
def parse_url(url, extra_schemes={}): """ parse a munge url type:URL URL.type examples: file.yaml yaml:file.txt http://example.com/file.yaml yaml:http://example.com/file.txt mysql://user:password@localhost/database/table django:///home/user/project/settings_dir.settings/app_name/model """ if not url: raise ValueError('url cannot be empty') cls = None res = urlsplit(url) # check config first if res.scheme in extra_schemes: # TODO - nerge these with any existing and recurse addr = extra_schemes[res.scheme] if 'type' in addr: cls = find_cls(res.scheme, extra_schemes) if 'url' in addr: url = addr['url'] if cls: res = urlsplit(url) return MungeURL(cls, res) # TODO - nerge these with any existing and recurse return parse_url(url) if res.scheme: cls = find_cls(res.scheme, extra_schemes) # check file extension if not cls: (rest, sep, ext) = url.rpartition('.') cls = find_cls(ext, extra_schemes) if not cls: raise ValueError('unable to find codec for %s' % url) return MungeURL(cls, res)
https://github.com/20c/munge/blob/e20fef8c24e48d4b0a5c387820fbb2b7bebb0af0/munge/config.py#L211-L259
parse query string in url
python
def get(url): """Retrieve an url.""" writeln("Getting data from url", url) response = requests.get(url) if response.status_code == 200: writeln(response.text) else: writeln(str(response.status_code), response.reason)
https://github.com/ellethee/argparseinator/blob/05e9c00dfaa938b9c4ee2aadc6206f5e0918e24e/examples/httprequest.py#L11-L18
parse query string in url
python
def query(self): """A QueryDict object holding the query parameters (QUERY_STRING).""" if self._query is None: query_string = self.environ.get('QUERY_STRING') self._query = QueryDict([ (k.decode('utf-8'), v.decode('utf-8')) for k, v in urlparse.parse_qsl( query_string, keep_blank_values=True) ]) return self._query
https://github.com/trendels/rhino/blob/f1f0ef21b6080a2bd130b38b5bef163074c94aed/rhino/request.py#L286-L295
parse query string in url
python
def _parse_request(self): ''' Parse the request ''' self.req_method = 'unknown' self.req_params = {} self.req_rpc_version = '2.0' self.req_id = 0 self.data = self.rfile.read(int(self.headers.get('content-length'))) data_dict = json.loads(self.data) self.req_method = data_dict['method'] self.req_params = decode_data(data_dict['params']) self.req_rpc_version = data_dict['jsonrpc'] self.req_id = data_dict['id']
https://github.com/cisco-sas/kitty/blob/cb0760989dcdfe079e43ac574d872d0b18953a32/kitty/remote/rpc.py#L207-L220
parse query string in url
python
def _parse_string_host(host_str): """ Parse host string into a dictionary host :param host_str: :return: """ host_str = EsParser._fix_host_prefix(host_str) parsed_url = urlparse(host_str) host = {HostParsing.HOST: parsed_url.hostname} if parsed_url.port: host[HostParsing.PORT] = parsed_url.port if parsed_url.scheme == HostParsing.HTTPS: host[HostParsing.PORT] = parsed_url.port or EsParser.SSL_DEFAULT_PORT host[HostParsing.USE_SSL] = True host[HostParsing.SCHEME] = HostParsing.HTTPS elif parsed_url.scheme: host[HostParsing.SCHEME] = parsed_url.scheme if parsed_url.username or parsed_url.password: host[HostParsing.HTTP_AUTH] = '%s:%s' % (parsed_url.username, parsed_url.password) if parsed_url.path and parsed_url.path != '/': host[HostParsing.URL_PREFIX] = parsed_url.path return host
https://github.com/avihad/twistes/blob/9ab8f5aa088b8886aefe3dec85a400e5035e034a/twistes/parser.py#L70-L91
parse query string in url
python
def try_parse_url(url): """ User urlparse to try to parse URL returning None on exception """ if len(url.strip()) < 4: logger.info('URL too short: {}'.format(url)) return None try: parsed_url = urlparse(url) except ValueError: logger.info('Parse URL ValueError: {}'.format(url)) return None if parsed_url.scheme: return parsed_url try: parsed_url = urlparse('http://' + parsed_url.geturl()) except ValueError: logger.info('Invalid URL for assumed http scheme: urlparse("{}") from "{}" '.format('http://' + parsed_url.geturl(), url)) return None if not parsed_url.scheme: logger.info('Unable to guess a scheme for URL: {}'.format(url)) return None return parsed_url
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/web.py#L88-L108
parse query string in url
python
def query(self, url, method='GET', extra_params=None, extra_headers=None, retry=3, raw=False, stream=False): """ Return a JSON object or raw session. :param url: Arlo API URL :param method: Specify the method GET, POST or PUT. Default is GET. :param extra_params: Dictionary to be appended on request.body :param extra_headers: Dictionary to be apppended on request.headers :param retry: Attempts to retry a query. Default is 3. :param raw: Boolean if query() will return request object instead JSON. :param stream: Boolean if query() will return a stream object. """ response = None loop = 0 # always make sure the headers and params are clean self.cleanup_headers() while loop <= retry: # override request.body or request.headers dictionary if extra_params: params = self.__params params.update(extra_params) else: params = self.__params _LOGGER.debug("Params: %s", params) if extra_headers: headers = self.__headers headers.update(extra_headers) else: headers = self.__headers _LOGGER.debug("Headers: %s", headers) _LOGGER.debug("Querying %s on attempt: %s/%s", url, loop, retry) loop += 1 # define connection method req = None if method == 'GET': req = self.session.get(url, headers=headers, stream=stream) elif method == 'PUT': req = self.session.put(url, json=params, headers=headers) elif method == 'POST': req = self.session.post(url, json=params, headers=headers) if req and (req.status_code == 200): if raw: _LOGGER.debug("Required raw object.") response = req else: response = req.json() # leave if everything worked fine break return response
https://github.com/tchellomello/python-arlo/blob/db70aeb81705309c56ad32bbab1094f6cd146524/pyarlo/__init__.py#L93-L158
parse query string in url
python
def get_url_for_get(url, parameters=None): # type: (str, Optional[Dict]) -> str """Get full url for GET request including parameters Args: url (str): URL to download parameters (Optional[Dict]): Parameters to pass. Defaults to None. Returns: str: Full url """ spliturl = urlsplit(url) getparams = OrderedDict(parse_qsl(spliturl.query)) if parameters is not None: getparams.update(parameters) spliturl = spliturl._replace(query=urlencode(getparams)) return urlunsplit(spliturl)
https://github.com/OCHA-DAP/hdx-python-utilities/blob/9c89e0aa5afac2c002b02a2d8f0e5b91eeb3d2a3/src/hdx/utilities/downloader.py#L135-L152
parse query string in url
python
def _query(self, url=None, params=""): """ method to query a URL with the given parameters Parameters: url -> URL to query params -> dictionary with parameter values Returns: HTTP response code, headers If an exception occurred, headers fields are None """ if url is None: raise NoUrlError("No URL was provided.") # return values headers = {'location': None, 'title': None} headerdata = urllib.urlencode(params) try: request = urllib2.Request(url, headerdata) response = urllib2.urlopen(request) # return numeric HTTP status code unless JSONP was requested if 'jsonp' in params: status = response.read() else: status = response.getcode() info = response.info() try: headers['location'] = info['Content-Location'] except KeyError: pass try: headers['title'] = info['X-Instapaper-Title'] except KeyError: pass return (status, headers) except urllib2.HTTPError as exception: # handle API not returning JSONP response on 403 if 'jsonp' in params: return ('%s({"status":%d})' % (params['jsonp'], exception.code), headers) else: return (exception.code, headers) except IOError as exception: return (exception.code, headers)
https://github.com/mrtazz/InstapaperLibrary/blob/bf273c02b468e523994d46def07f70902f596676/instapaperlib/instapaperlib.py#L135-L176